Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Validate tunnel options length in act_tunnel_key, from Xin Long.

 2) Fix DMA sync bug in gve driver, from Adi Suresh.

 3) TSO kills performance on some r8169 chips due to HW issues, disable
    by default in that case, from Corinna Vinschen.

 4) Fix clock disable mismatch in fec driver, from Chubong Yuan.

 5) Fix interrupt status bits define in hns3 driver, from Huazhong Tan.

 6) Fix workqueue deadlocks in qeth driver, from Julian Wiedmann.

 7) Don't napi_disable() twice in r8152 driver, from Hayes Wang.

 8) Fix SKB extension memory leak, from Florian Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (54 commits)
  r8152: avoid to call napi_disable twice
  MAINTAINERS: Add myself as maintainer of virtio-vsock
  udp: drop skb extensions before marking skb stateless
  net: rtnetlink: prevent underflows in do_setvfinfo()
  can: m_can_platform: remove unnecessary m_can_class_resume() call
  can: m_can_platform: set net_device structure as driver data
  hv_netvsc: Fix send_table offset in case of a host bug
  hv_netvsc: Fix offset usage in netvsc_send_table()
  net-ipv6: IPV6_TRANSPARENT - check NET_RAW prior to NET_ADMIN
  sfc: Only cancel the PPS workqueue if it exists
  nfc: port100: handle command failure cleanly
  net-sysfs: fix netdev_queue_add_kobject() breakage
  r8152: Re-order napi_disable in rtl8152_close
  net: qca_spi: Move reset_count to struct qcaspi
  net: qca_spi: fix receive buffer size check
  net/ibmvnic: Ignore H_FUNCTION return from H_EOI to tolerate XIVE mode
  Revert "net/ibmvnic: Fix EOI when running in XIVE mode"
  net/mlxfw: Verify FSM error code translation doesn't exceed array size
  net/mlx5: Update the list of the PCI supported devices
  net/mlx5: Fix auto group size calculation
  ...
This commit is contained in:
Linus Torvalds 2019-11-22 14:28:14 -08:00
commit 34c36f4564
50 changed files with 356 additions and 178 deletions

View File

@ -643,7 +643,7 @@ F: drivers/net/ethernet/alacritech/*
FORCEDETH GIGABIT ETHERNET DRIVER FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com> M: Rain River <rain.1986.08.12@gmail.com>
M: Zhu Yanjun <yanjun.zhu@oracle.com> M: Zhu Yanjun <zyjzyj2000@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/ethernet/nvidia/* F: drivers/net/ethernet/nvidia/*
@ -17215,6 +17215,7 @@ F: virt/lib/
VIRTIO AND VHOST VSOCK DRIVER VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com> M: Stefan Hajnoczi <stefanha@redhat.com>
M: Stefano Garzarella <sgarzare@redhat.com>
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->is_peripheral = false; mcan_class->is_peripheral = false;
platform_set_drvdata(pdev, mcan_class->dev); platform_set_drvdata(pdev, mcan_class->net);
m_can_init_ram(mcan_class); m_can_init_ram(mcan_class);
@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev)
if (err) if (err)
clk_disable_unprepare(mcan_class->hclk); clk_disable_unprepare(mcan_class->hclk);
m_can_class_resume(dev);
return err; return err;
} }

View File

@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
cancel_work_sync(&fep->tx_timeout_work); cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev); fec_ptp_stop(pdev);
@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep); fec_enet_mii_remove(fep);
if (fep->reg_phy) if (fep->reg_phy)
regulator_disable(fep->reg_phy); regulator_disable(fep->reg_phy);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
if (of_phy_is_fixed_link(np)) if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np); of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node); of_node_put(fep->phy_node);
free_netdev(ndev); free_netdev(ndev);
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0; return 0;
} }

View File

@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
u64 iov_offset, u64 iov_len) u64 iov_offset, u64 iov_len)
{ {
u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
u64 first_page = iov_offset / PAGE_SIZE;
dma_addr_t dma; dma_addr_t dma;
u64 addr; u64 page;
for (addr = iov_offset; addr < iov_offset + iov_len; for (page = first_page; page <= last_page; page++) {
addr += PAGE_SIZE) { dma = page_buses[page];
dma = page_buses[addr / PAGE_SIZE];
dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
} }
} }

View File

@ -166,7 +166,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1 #define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2 #define HCLGE_IMP_RESET_BIT 2
#define HCLGE_RESET_INT_M GENMASK(2, 0) #define HCLGE_RESET_INT_M GENMASK(7, 5)
#define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0 #define HCLGE_FUN_RST_ING_B 0

View File

@ -2878,10 +2878,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) && if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) { adapter->reset_reason == VNIC_RESET_MOBILITY) {
struct irq_desc *desc = irq_to_desc(scrq->irq); u64 val = (0xff000000) | scrq->hw_irq;
struct irq_chip *chip = irq_desc_get_chip(desc);
chip->irq_eoi(&desc->irq_data); rc = plpar_hcall_norets(H_EOI, val);
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
} }
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,

View File

@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break; break;
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i); err = mlx4_en_get_flow(dev, cmd, i);
if (!err) if (!err)
@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof; struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp; struct mlx4_en_priv *tmp;
int total_tx_count;
int port_up = 0; int port_up = 0;
int xdp_count; int xdp_count;
int err = 0; int err = 0;
@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count > total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL; err = -EINVAL;
en_err(priv, en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
channel->tx_count * priv->prof->num_up + xdp_count, total_tx_count, MAX_TX_RINGS);
MAX_TX_RINGS);
goto out; goto out;
} }

View File

@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof; struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp; struct mlx4_en_priv *tmp;
int total_count;
int port_up = 0; int port_up = 0;
int err = 0; int err = 0;
@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
MLX4_EN_NUM_UP_HIGH; MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up; new_prof.num_up;
total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
if (total_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
total_count, MAX_TX_RINGS);
goto out;
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err) if (err)
goto out; goto out;

View File

@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) { if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size); ipv4_encap_size, max_encap_size);
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out;
} }
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header) if (!encap_header) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
/* used by mlx5e_detach_encap to lookup a neigh hash table /* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule * entry in the neigh hash table when a user deletes a rule
@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) { if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size); ipv6_encap_size, max_encap_size);
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out;
} }
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header) if (!encap_header) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
/* used by mlx5e_detach_encap to lookup a neigh hash table /* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule * entry in the neigh hash table when a user deletes a rule

View File

@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap, u32 eth_proto_cap,
u8 connector_type) u8 connector_type, bool ext)
{ {
if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER, [MLX5E_PORT_OTHER] = PORT_OTHER,
}; };
static u8 get_connector_port(u32 eth_proto, u8 connector_type) static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{ {
if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type]; return ptys2connector_type[connector_type];
if (eth_proto & if (eth_proto &
@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper, link_ksettings->base.port = get_connector_port(eth_proto_oper,
connector_type); connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type); connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings); get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE) if (an_status == MLX5_AN_COMPLETE)

View File

@ -4252,9 +4252,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) { switch (proto) {
case IPPROTO_GRE: case IPPROTO_GRE:
return features;
case IPPROTO_IPIP: case IPPROTO_IPIP:
case IPPROTO_IPV6: case IPPROTO_IPV6:
return features; if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
return features;
break;
case IPPROTO_UDP: case IPPROTO_UDP:
udph = udp_hdr(skb); udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest); port = be16_to_cpu(udph->dest);

View File

@ -3268,7 +3268,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (netdev_port_same_parent_id(priv->netdev, out_dev)) { if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper; struct net_device *uplink_upper;
@ -3310,19 +3323,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep; attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev; attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++; attr->out_count++;
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (parse_attr->filter_dev != priv->netdev) { } else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure /* All mlx5 devices are called to configure
* high level device filters. Therefore, the * high level device filters. Therefore, the
@ -4000,9 +4000,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma) struct tc_cls_matchall_offload *ma)
{ {
struct netlink_ext_ack *extack = ma->common.extack; struct netlink_ext_ack *extack = ma->common.extack;
int prio = TC_H_MAJ(ma->common.prio) >> 16;
if (prio != 1) { if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL; return -EINVAL;
} }

View File

@ -2117,7 +2117,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock: unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return 0; return err;
} }
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,

View File

@ -579,7 +579,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash); rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator); ida_destroy(&fg->fte_allocator);
if (ft->autogroup.active) if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--; ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash, err = rhltable_remove(&ft->fgs_hash,
&fg->hash, &fg->hash,
@ -1126,6 +1126,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true; ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups; ft->autogroup.required_groups = max_num_groups;
/* We save place for flow groups in addition to max types */
ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft; return ft;
} }
@ -1328,8 +1330,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups) if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */ group_size = ft->autogroup.group_size;
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
/* ft->max_fte == ft->autogroup.max_types */ /* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0) if (group_size == 0)
@ -1356,7 +1357,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg)) if (IS_ERR(fg))
goto out; goto out;
ft->autogroup.num_groups++; if (group_size == ft->autogroup.group_size)
ft->autogroup.num_groups++;
out: out:
return fg; return fg;

View File

@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct { struct {
bool active; bool active;
unsigned int required_groups; unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups; unsigned int num_groups;
} autogroup; } autogroup;
/* Protect fwd_rules */ /* Protect fwd_rules */

View File

@ -1566,6 +1566,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */

View File

@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
} }
} }
static u16 dr_get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn, struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn) struct mlx5dr_domain_rx_tx *nic_dmn)
@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow) if (!ctrl->may_grow)
return false; return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
if (ctrl->num_of_collisions >= ctrl->increase_threshold && if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true; return true;

View File

@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn; unsigned int irqn;
void *cqc, *in; void *cqc, *in;
__be64 *pas; __be64 *pas;
int vector;
u32 i; u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in) if (!in)
goto err_cqwq; goto err_cqwq;
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) { if (err) {
kvfree(in); kvfree(in);
goto err_cqwq; goto err_cqwq;

View File

@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount); return !refcount_read(&ste->refcount);
} }
static u16 get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
/* Init one ste as a pattern for ste data array */ /* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi, void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn, struct mlx5dr_domain_rx_tx *nic_dmn,
@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl; struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
u32 bits_in_mask;
u8 next_lu_type; u8 next_lu_type;
u16 byte_mask; u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
/* Don't allocate table more than required,
* the size of the table defined via the byte_mask, so no need
* to allocate more than that.
*/
bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
log_table_size = min(log_table_size, bits_in_mask);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size, log_table_size,
next_lu_type, next_lu_type,
@ -671,7 +651,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true; htbl->ctrl.may_grow = true;
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1) if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false; htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */ /* Threshold is 50%, one is added to table of size 1 */

View File

@ -66,6 +66,8 @@ static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
return err; return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
fsm_state_err = min_t(enum mlxfw_fsm_state_err,
fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n", pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]); mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed"); NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");

View File

@ -994,7 +994,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
if (d) if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else else
return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; return RT_TABLE_MAIN;
} }
static struct mlxsw_sp_rif * static struct mlxsw_sp_rif *
@ -1598,27 +1598,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
{ {
struct mlxsw_sp_ipip_entry *ipip_entry = struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
enum mlxsw_sp_l3proto ul_proto;
union mlxsw_sp_l3addr saddr;
u32 ul_tb_id;
if (!ipip_entry) if (!ipip_entry)
return 0; return 0;
/* For flat configuration cases, moving overlay to a different VRF might
* cause local address conflict, and the conflicting tunnels need to be
* demoted.
*/
ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
saddr, ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack); true, false, false, extack);
} }

View File

@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available); available);
if (available > QCASPI_HW_BUF_LEN) { if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
/* This could only happen by interferences on the SPI line. /* This could only happen by interferences on the SPI line.
* So retry later ... * So retry later ...
*/ */
@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0; u16 signature = 0;
u16 spi_config; u16 spi_config;
u16 wrbuf_space = 0; u16 wrbuf_space = 0;
static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) { if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid /* Read signature twice, if not valid
@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET; qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++; qca->stats.trig_reset++;
reset_count = 0; qca->reset_count = 0;
break; break;
case QCASPI_SYNC_RESET: case QCASPI_SYNC_RESET:
reset_count++; qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
reset_count); qca->reset_count);
if (reset_count >= QCASPI_RESET_TIMEOUT) { if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */ /* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN; qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++; qca->stats.reset_timeout++;

View File

@ -94,6 +94,7 @@ struct qcaspi {
unsigned int intr_req; unsigned int intr_req;
unsigned int intr_svc; unsigned int intr_svc;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *device_root; struct dentry *device_root;

View File

@ -7179,8 +7179,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
} }
/* RTL8168e-vl has a HW issue with TSO */ /* RTL8168e-vl and one RTL8168c variant are known to have a
if (tp->mac_version == RTL_GIGA_MAC_VER_34) { * HW issue with TSO.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);

View File

@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx); (void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work); cancel_work_sync(&efx->ptp_data->work);
cancel_work_sync(&efx->ptp_data->pps_work); if (efx->ptp_data->pps_workwq)
cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq); skb_queue_purge(&efx->ptp_data->txq);

View File

@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */ /* The number of entries in the send indirection table */
u32 count; u32 count;
/* The offset of the send indirection table from top of this struct. /* The offset of the send indirection table from the beginning of
* struct nvsp_message.
* The send indirection table tells which channel to put the send * The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number. * traffic on. Each entry is a channel number.
*/ */

View File

@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev,
} }
static void netvsc_send_table(struct net_device *ndev, static void netvsc_send_table(struct net_device *ndev,
const struct nvsp_message *nvmsg) struct netvsc_device *nvscdev,
const struct nvsp_message *nvmsg,
u32 msglen)
{ {
struct net_device_context *net_device_ctx = netdev_priv(ndev); struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 count, *tab; u32 count, offset, *tab;
int i; int i;
count = nvmsg->msg.v5_msg.send_table.count; count = nvmsg->msg.v5_msg.send_table.count;
offset = nvmsg->msg.v5_msg.send_table.offset;
if (count != VRSS_SEND_TAB_SIZE) { if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count); netdev_err(ndev, "Received wrong send-table size:%u\n", count);
return; return;
} }
tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
nvmsg->msg.v5_msg.send_table.offset); * wrong due to a host bug. So fix the offset here.
*/
if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
msglen >= sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
offset = sizeof(struct nvsp_message_header) +
sizeof(union nvsp_6_message_uber);
/* Boundary check for all versions */
if (offset > msglen - count * sizeof(u32)) {
netdev_err(ndev, "Received send-table offset too big:%u\n",
offset);
return;
}
tab = (void *)nvmsg + offset;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
net_device_ctx->tx_table[i] = tab[i]; net_device_ctx->tx_table[i] = tab[i];
@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc ? "added" : "removed"); net_device_ctx->vf_alloc ? "added" : "removed");
} }
static void netvsc_receive_inband(struct net_device *ndev, static void netvsc_receive_inband(struct net_device *ndev,
const struct nvsp_message *nvmsg) struct netvsc_device *nvscdev,
const struct nvsp_message *nvmsg,
u32 msglen)
{ {
switch (nvmsg->hdr.msg_type) { switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
netvsc_send_table(ndev, nvmsg); netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break; break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{ {
struct vmbus_channel *channel = nvchan->channel; struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc); const struct nvsp_message *nvmsg = hv_pkt_data(desc);
u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg); trace_nvsp_recv(ndev, channel, nvmsg);
@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
break; break;
case VM_PKT_DATA_INBAND: case VM_PKT_DATA_INBAND:
netvsc_receive_inband(ndev, nvmsg); netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
break; break;
default: default:

View File

@ -145,8 +145,11 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
static int sun4i_mdio_remove(struct platform_device *pdev) static int sun4i_mdio_remove(struct platform_device *pdev)
{ {
struct mii_bus *bus = platform_get_drvdata(pdev); struct mii_bus *bus = platform_get_drvdata(pdev);
struct sun4i_mdio_data *data = bus->priv;
mdiobus_unregister(bus); mdiobus_unregister(bus);
if (data->regulator)
regulator_disable(data->regulator);
mdiobus_free(bus); mdiobus_free(bus);
return 0; return 0;

View File

@ -601,6 +601,8 @@ static int phylink_register_sfp(struct phylink *pl,
* Create a new phylink instance, and parse the link parameters found in @np. * Create a new phylink instance, and parse the link parameters found in @np.
* This will parse in-band modes, fixed-link or SFP configuration. * This will parse in-band modes, fixed-link or SFP configuration.
* *
* Note: the rtnl lock must not be held when calling this function.
*
* Returns a pointer to a &struct phylink, or an error-pointer value. Users * Returns a pointer to a &struct phylink, or an error-pointer value. Users
* must use IS_ERR() to check for errors from this function. * must use IS_ERR() to check for errors from this function.
*/ */
@ -678,6 +680,8 @@ EXPORT_SYMBOL_GPL(phylink_create);
* *
* Destroy a phylink instance. Any PHY that has been attached must have been * Destroy a phylink instance. Any PHY that has been attached must have been
* cleaned up via phylink_disconnect_phy() prior to calling this function. * cleaned up via phylink_disconnect_phy() prior to calling this function.
*
* Note: the rtnl lock must not be held when calling this function.
*/ */
void phylink_destroy(struct phylink *pl) void phylink_destroy(struct phylink *pl)
{ {
@ -1254,7 +1258,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
pl->link_config.duplex = our_kset.base.duplex; pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { /* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration. For a fixed link, this isn't able to change any
* parameters, which just leaves inband mode.
*/
if (pl->link_an_mode == MLO_AN_INBAND &&
!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) {
phylink_mac_config(pl, &pl->link_config); phylink_mac_config(pl, &pl->link_config);
phylink_mac_an_restart(pl); phylink_mac_an_restart(pl);
} }
@ -1334,15 +1344,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
if (pause->tx_pause) if (pause->tx_pause)
config->pause |= MLO_PAUSE_TX; config->pause |= MLO_PAUSE_TX;
if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { /* If we have a PHY, phylib will call our link state function if the
* mode has changed, which will trigger a resolve and update the MAC
* configuration.
*/
if (pl->phydev) {
phy_set_asym_pause(pl->phydev, pause->rx_pause,
pause->tx_pause);
} else if (!test_bit(PHYLINK_DISABLE_STOPPED,
&pl->phylink_disable_state)) {
switch (pl->link_an_mode) { switch (pl->link_an_mode) {
case MLO_AN_PHY:
/* Silently mark the carrier down, and then trigger a resolve */
if (pl->netdev)
netif_carrier_off(pl->netdev);
phylink_run_resolve(pl);
break;
case MLO_AN_FIXED: case MLO_AN_FIXED:
/* Should we allow fixed links to change against the config? */ /* Should we allow fixed links to change against the config? */
phylink_resolve_flow(pl, config); phylink_resolve_flow(pl, config);

View File

@ -4283,10 +4283,10 @@ static int rtl8152_close(struct net_device *netdev)
unregister_pm_notifier(&tp->pm_notifier); unregister_pm_notifier(&tp->pm_notifier);
#endif #endif
tasklet_disable(&tp->tx_tl); tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags); clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb); usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule); cancel_delayed_work_sync(&tp->schedule);
napi_disable(&tp->napi);
netif_stop_queue(netdev); netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf); res = usb_autopm_get_interface(tp->intf);
@ -4552,10 +4552,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
netif_stop_queue(netdev); netif_stop_queue(netdev);
tasklet_disable(&tp->tx_tl); tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags); clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb); usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule); cancel_delayed_work_sync(&tp->schedule);
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) { if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control); mutex_lock(&tp->control);
tp->rtl_ops.disable(tp); tp->rtl_ops.disable(tp);
@ -4673,7 +4673,7 @@ static int rtl8152_system_resume(struct r8152 *tp)
netif_device_attach(netdev); netif_device_attach(netdev);
if (netif_running(netdev) && netdev->flags & IFF_UP) { if (netif_running(netdev) && (netdev->flags & IFF_UP)) {
tp->rtl_ops.up(tp); tp->rtl_ops.up(tp);
netif_carrier_off(netdev); netif_carrier_off(netdev);
set_bit(WORK_ENABLE, &tp->flags); set_bit(WORK_ENABLE, &tp->flags);
@ -5244,9 +5244,15 @@ static int rtl8152_set_tunable(struct net_device *netdev,
} }
if (tp->rx_copybreak != val) { if (tp->rx_copybreak != val) {
napi_disable(&tp->napi); if (netdev->flags & IFF_UP) {
tp->rx_copybreak = val; mutex_lock(&tp->control);
napi_enable(&tp->napi); napi_disable(&tp->napi);
tp->rx_copybreak = val;
napi_enable(&tp->napi);
mutex_unlock(&tp->control);
} else {
tp->rx_copybreak = val;
}
} }
break; break;
default: default:
@ -5274,9 +5280,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return -EINVAL; return -EINVAL;
if (tp->rx_pending != ring->rx_pending) { if (tp->rx_pending != ring->rx_pending) {
napi_disable(&tp->napi); if (netdev->flags & IFF_UP) {
tp->rx_pending = ring->rx_pending; mutex_lock(&tp->control);
napi_enable(&tp->napi); napi_disable(&tp->napi);
tp->rx_pending = ring->rx_pending;
napi_enable(&tp->napi);
mutex_unlock(&tp->control);
} else {
tp->rx_pending = ring->rx_pending;
}
} }
return 0; return 0;

View File

@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc) if (rc)
usb_unlink_urb(dev->out_urb); usb_kill_urb(dev->out_urb);
exit: exit:
mutex_unlock(&dev->out_urb_lock); mutex_unlock(&dev->out_urb_lock);

View File

@ -839,6 +839,7 @@ struct qeth_card {
struct service_level qeth_service_level; struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd; struct qdio_ssqd_desc ssqd;
debug_info_t *debug; debug_info_t *debug;
struct mutex sbp_lock;
struct mutex conf_mutex; struct mutex conf_mutex;
struct mutex discipline_mutex; struct mutex discipline_mutex;
struct napi_struct napi; struct napi_struct napi;

View File

@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
CCW_DEVID(cdev), dstat, cstat); CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1); 16, 1, irb, 64, 1);
return 1; return -EIO;
} }
if (dstat & DEV_STAT_UNIT_CHECK) { if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] & if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) { SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND"); QETH_CARD_TEXT(card, 2, "REVIND");
return 1; return -EIO;
} }
if (sense[SENSE_COMMAND_REJECT_BYTE] & if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) { SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi"); QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1; return -EIO;
} }
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE"); QETH_CARD_TEXT(card, 2, "AFFE");
return 1; return -EIO;
} }
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN"); QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0; return 0;
} }
QETH_CARD_TEXT(card, 2, "DGENCHK"); QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1; return -EIO;
} }
return 0; return 0;
} }

View File

@ -467,10 +467,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card)
if (card->info.promisc_mode == enable) if (card->info.promisc_mode == enable)
return; return;
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
qeth_setadp_promisc_mode(card, enable); qeth_setadp_promisc_mode(card, enable);
else if (card->options.sbp.reflect_promisc) } else {
qeth_l2_promisc_to_bridge(card, enable); mutex_lock(&card->sbp_lock);
if (card->options.sbp.reflect_promisc)
qeth_l2_promisc_to_bridge(card, enable);
mutex_unlock(&card->sbp_lock);
}
} }
/* New MAC address is added to the hash table and marked to be written on card /* New MAC address is added to the hash table and marked to be written on card
@ -631,6 +635,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
int rc; int rc;
qeth_l2_vnicc_set_defaults(card); qeth_l2_vnicc_set_defaults(card);
mutex_init(&card->sbp_lock);
if (gdev->dev.type == &qeth_generic_devtype) { if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l2_create_device_attributes(&gdev->dev); rc = qeth_l2_create_device_attributes(&gdev->dev);
@ -804,10 +809,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
} else } else
card->info.hwtrap = 0; card->info.hwtrap = 0;
mutex_lock(&card->sbp_lock);
qeth_bridgeport_query_support(card); qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs) if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev, dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n"); "The device represents a Bridge Capable Port\n");
mutex_unlock(&card->sbp_lock);
qeth_l2_register_dev_addr(card); qeth_l2_register_dev_addr(card);
@ -1162,9 +1169,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
/* Role should not change by itself, but if it did, */ /* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */ /* information from the hardware is authoritative. */
mutex_lock(&data->card->conf_mutex); mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role; data->card->options.sbp.role = entry->role;
mutex_unlock(&data->card->conf_mutex); mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s", snprintf(env_role, sizeof(env_role), "ROLE=%s",
@ -1230,9 +1237,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
: (data->hostevs.lost_event_mask == 0x02) : (data->hostevs.lost_event_mask == 0x02)
? "Bridge port state change" ? "Bridge port state change"
: "Unknown reason"); : "Unknown reason");
mutex_lock(&data->card->conf_mutex); mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0; data->card->options.sbp.hostnotification = 0;
mutex_unlock(&data->card->conf_mutex); mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort, qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL); 0, NULL, NULL);
} else } else

View File

@ -24,6 +24,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
return sprintf(buf, "n/a (VNIC characteristics)\n"); return sprintf(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) && if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs) card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card, rc = qeth_bridgeport_query_ports(card,
@ -57,6 +58,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
else else
rc = sprintf(buf, "%s\n", word); rc = sprintf(buf, "%s\n", word);
} }
mutex_unlock(&card->sbp_lock);
return rc; return rc;
} }
@ -91,6 +93,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
return -EINVAL; return -EINVAL;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -104,6 +107,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
} else } else
card->options.sbp.role = role; card->options.sbp.role = role;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -158,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
return rc; return rc;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -168,6 +173,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
} else } else
card->options.sbp.hostnotification = enable; card->options.sbp.hostnotification = enable;
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -223,6 +229,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
return -EINVAL; return -EINVAL;
mutex_lock(&card->conf_mutex); mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
if (qeth_l2_vnicc_is_in_use(card)) if (qeth_l2_vnicc_is_in_use(card))
rc = -EBUSY; rc = -EBUSY;
@ -234,6 +241,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
rc = 0; rc = 0;
} }
mutex_unlock(&card->sbp_lock);
mutex_unlock(&card->conf_mutex); mutex_unlock(&card->conf_mutex);
return rc ? rc : count; return rc ? rc : count;
@ -269,6 +277,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
return; return;
if (!card->options.sbp.supported_funcs) if (!card->options.sbp.supported_funcs)
return; return;
mutex_lock(&card->sbp_lock);
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */ /* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role); qeth_bridgeport_setrole(card, card->options.sbp.role);
@ -280,8 +290,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
rc = qeth_bridgeport_an_set(card, 1); rc = qeth_bridgeport_an_set(card, 1);
if (rc) if (rc)
card->options.sbp.hostnotification = 0; card->options.sbp.hostnotification = 0;
} else } else {
qeth_bridgeport_an_set(card, 0); qeth_bridgeport_an_set(card, 0);
}
mutex_unlock(&card->sbp_lock);
} }
/* VNIC CHARS support */ /* VNIC CHARS support */

View File

@ -4169,12 +4169,18 @@ static inline void skb_ext_reset(struct sk_buff *skb)
skb->active_extensions = 0; skb->active_extensions = 0;
} }
} }
static inline bool skb_has_extensions(struct sk_buff *skb)
{
return unlikely(skb->active_extensions);
}
#else #else
static inline void skb_ext_put(struct sk_buff *skb) {} static inline void skb_ext_put(struct sk_buff *skb) {}
static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_reset(struct sk_buff *skb) {}
static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
#endif /* CONFIG_SKB_EXTENSIONS */ #endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset_ct(struct sk_buff *skb) static inline void nf_reset_ct(struct sk_buff *skb)

View File

@ -356,6 +356,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx); void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int tls_sw_sendpage(struct sock *sk, struct page *page, int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags); int offset, size_t size, int flags);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);

View File

@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
if (!offdevs_inited) { if (!offdevs_inited) {
err = rhashtable_init(&offdevs, &offdevs_params); err = rhashtable_init(&offdevs, &offdevs_params);
if (err) if (err) {
up_write(&bpf_devs_lock);
return ERR_PTR(err); return ERR_PTR(err);
}
offdevs_inited = true; offdevs_inited = true;
} }
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);

View File

@ -89,7 +89,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
struct clip_vcc **walk; struct clip_vcc **walk;
if (!entry) { if (!entry) {
pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return; return;
} }
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@ -109,10 +109,10 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
error = neigh_update(entry->neigh, NULL, NUD_NONE, error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN, 0); NEIGH_UPDATE_F_ADMIN, 0);
if (error) if (error)
pr_crit("neigh_update failed with %d\n", error); pr_err("neigh_update failed with %d\n", error);
goto out; goto out;
} }
pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
out: out:
netif_tx_unlock_bh(entry->neigh->dev); netif_tx_unlock_bh(entry->neigh->dev);
} }

View File

@ -923,21 +923,23 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
"rx-%u", index); "rx-%u", index);
if (error) if (error)
return error; goto err;
dev_hold(queue->dev); dev_hold(queue->dev);
if (dev->sysfs_rx_queue_group) { if (dev->sysfs_rx_queue_group) {
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
if (error) { if (error)
kobject_put(kobj); goto err;
return error;
}
} }
kobject_uevent(kobj, KOBJ_ADD); kobject_uevent(kobj, KOBJ_ADD);
return error; return error;
err:
kobject_put(kobj);
return error;
} }
#endif /* CONFIG_SYSFS */ #endif /* CONFIG_SYSFS */
@ -1461,21 +1463,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
"tx-%u", index); "tx-%u", index);
if (error) if (error)
return error; goto err;
dev_hold(queue->dev); dev_hold(queue->dev);
#ifdef CONFIG_BQL #ifdef CONFIG_BQL
error = sysfs_create_group(kobj, &dql_group); error = sysfs_create_group(kobj, &dql_group);
if (error) { if (error)
kobject_put(kobj); goto err;
return error;
}
#endif #endif
kobject_uevent(kobj, KOBJ_ADD); kobject_uevent(kobj, KOBJ_ADD);
return 0; return 0;
err:
kobject_put(kobj);
return error;
} }
#endif /* CONFIG_SYSFS */ #endif /* CONFIG_SYSFS */

View File

@ -2195,6 +2195,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_MAC]) { if (tb[IFLA_VF_MAC]) {
struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
if (ivm->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac) if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf, err = ops->ndo_set_vf_mac(dev, ivm->vf,
@ -2206,6 +2208,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_VLAN]) { if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
if (ivv->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan) if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
@ -2238,6 +2242,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (len == 0) if (len == 0)
return -EINVAL; return -EINVAL;
if (ivvl[0]->vf >= INT_MAX)
return -EINVAL;
err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
ivvl[0]->qos, ivvl[0]->vlan_proto); ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0) if (err < 0)
@ -2248,6 +2254,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
struct ifla_vf_info ivf; struct ifla_vf_info ivf;
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_get_vf_config) if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
@ -2266,6 +2274,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_RATE]) { if (tb[IFLA_VF_RATE]) {
struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate) if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf, err = ops->ndo_set_vf_rate(dev, ivt->vf,
@ -2278,6 +2288,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_SPOOFCHK]) { if (tb[IFLA_VF_SPOOFCHK]) {
struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
if (ivs->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk) if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
@ -2289,6 +2301,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_LINK_STATE]) { if (tb[IFLA_VF_LINK_STATE]) {
struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
if (ivl->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_link_state) if (ops->ndo_set_vf_link_state)
err = ops->ndo_set_vf_link_state(dev, ivl->vf, err = ops->ndo_set_vf_link_state(dev, ivl->vf,
@ -2302,6 +2316,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
if (ivrssq_en->vf >= INT_MAX)
return -EINVAL;
if (ops->ndo_set_vf_rss_query_en) if (ops->ndo_set_vf_rss_query_en)
err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
ivrssq_en->setting); ivrssq_en->setting);
@ -2312,6 +2328,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_TRUST]) { if (tb[IFLA_VF_TRUST]) {
struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (ops->ndo_set_vf_trust) if (ops->ndo_set_vf_trust)
err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
@ -2322,15 +2340,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_IB_NODE_GUID]) { if (tb[IFLA_VF_IB_NODE_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
if (!ops->ndo_set_vf_guid) if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
} }
if (tb[IFLA_VF_IB_PORT_GUID]) { if (tb[IFLA_VF_IB_PORT_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
if (ivt->vf >= INT_MAX)
return -EINVAL;
if (!ops->ndo_set_vf_guid) if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -1037,7 +1037,7 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_fib_multipath_hash_policy, .proc_handler = proc_fib_multipath_hash_policy,
.extra1 = SYSCTL_ZERO, .extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE, .extra2 = &two,
}, },
#endif #endif
{ {

View File

@ -1297,6 +1297,27 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
#define UDP_SKB_IS_STATELESS 0x80000000 #define UDP_SKB_IS_STATELESS 0x80000000
/* all head states (dst, sk, nf conntrack) except skb extensions are
* cleared by udp_rcv().
*
* We need to preserve secpath, if present, to eventually process
* IP_CMSG_PASSSEC at recvmsg() time.
*
* Other extensions can be cleared.
*/
static bool udp_try_make_stateless(struct sk_buff *skb)
{
if (!skb_has_extensions(skb))
return true;
if (!secpath_exists(skb)) {
skb_ext_reset(skb);
return true;
}
return false;
}
static void udp_set_dev_scratch(struct sk_buff *skb) static void udp_set_dev_scratch(struct sk_buff *skb)
{ {
struct udp_dev_scratch *scratch = udp_skb_scratch(skb); struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
@ -1308,11 +1329,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
scratch->is_linear = !skb_is_nonlinear(skb); scratch->is_linear = !skb_is_nonlinear(skb);
#endif #endif
/* all head states execept sp (dst, sk, nf) are always cleared by if (udp_try_make_stateless(skb))
* udp_rcv() and we need to preserve secpath, if present, to eventually
* process IP_CMSG_PASSSEC at recvmsg() time
*/
if (likely(!skb_sec_path(skb)))
scratch->_tsize_state |= UDP_SKB_IS_STATELESS; scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
} }

View File

@ -363,8 +363,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break; break;
case IPV6_TRANSPARENT: case IPV6_TRANSPARENT:
if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) &&
!ns_capable(net->user_ns, CAP_NET_RAW)) { !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
retv = -EPERM; retv = -EPERM;
break; break;
} }

View File

@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
* Router Reachability Probe MUST be rate-limited * Router Reachability Probe MUST be rate-limited
* to no more than one per minute. * to no more than one per minute.
*/ */
if (fib6_nh->fib_nh_gw_family) if (!fib6_nh->fib_nh_gw_family)
return; return;
nh_gw = &fib6_nh->fib_nh_gw6; nh_gw = &fib6_nh->fib_nh_gw6;

View File

@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
int err = -EINVAL; int err = -EINVAL;
int rem; int rem;
if (!nla || !n) if (!nla)
return NULL; return NULL;
keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
@ -170,6 +170,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
} }
parm = nla_data(pattr); parm = nla_data(pattr);
if (!parm->nkeys) {
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
return -EINVAL;
}
ksize = parm->nkeys * sizeof(struct tc_pedit_key); ksize = parm->nkeys * sizeof(struct tc_pedit_key);
if (nla_len(pattr) < sizeof(*parm) + ksize) { if (nla_len(pattr) < sizeof(*parm) + ksize) {
NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
@ -183,12 +187,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
index = parm->index; index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind); err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) { if (!err) {
if (!parm->nkeys) {
tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
ret = -EINVAL;
goto out_free;
}
ret = tcf_idr_create(tn, index, est, a, ret = tcf_idr_create(tn, index, est, a,
&act_pedit_ops, bind, false); &act_pedit_ops, bind, false);
if (ret) { if (ret) {

View File

@ -135,6 +135,10 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
if (opt_len < 0) if (opt_len < 0)
return opt_len; return opt_len;
opts_len += opt_len; opts_len += opt_len;
if (opts_len > IP_TUNNEL_OPTS_MAX) {
NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
return -EINVAL;
}
if (dst) { if (dst) {
dst_len -= opt_len; dst_len -= opt_len;
dst += opt_len; dst += opt_len;

View File

@ -922,7 +922,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
} }
/* Verify priority mapping uses valid tcs */ /* Verify priority mapping uses valid tcs */
for (i = 0; i < TC_BITMASK + 1; i++) { for (i = 0; i <= TC_BITMASK; i++) {
if (qopt->prio_tc_map[i] >= qopt->num_tc) { if (qopt->prio_tc_map[i] >= qopt->num_tc) {
NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
return -EINVAL; return -EINVAL;
@ -1347,6 +1347,26 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
return err; return err;
} }
static int taprio_mqprio_cmp(const struct net_device *dev,
const struct tc_mqprio_qopt *mqprio)
{
int i;
if (!mqprio || mqprio->num_tc != dev->num_tc)
return -1;
for (i = 0; i < mqprio->num_tc; i++)
if (dev->tc_to_txq[i].count != mqprio->count[i] ||
dev->tc_to_txq[i].offset != mqprio->offset[i])
return -1;
for (i = 0; i <= TC_BITMASK; i++)
if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
return -1;
return 0;
}
static int taprio_change(struct Qdisc *sch, struct nlattr *opt, static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
@ -1398,6 +1418,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
admin = rcu_dereference(q->admin_sched); admin = rcu_dereference(q->admin_sched);
rcu_read_unlock(); rcu_read_unlock();
/* no changes - no new mqprio settings */
if (!taprio_mqprio_cmp(dev, mqprio))
mqprio = NULL;
if (mqprio && (oper || admin)) { if (mqprio && (oper || admin)) {
NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
err = -ENOTSUPP; err = -ENOTSUPP;
@ -1455,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
mqprio->offset[i]); mqprio->offset[i]);
/* Always use supplied priority mappings */ /* Always use supplied priority mappings */
for (i = 0; i < TC_BITMASK + 1; i++) for (i = 0; i <= TC_BITMASK; i++)
netdev_set_prio_tc_map(dev, i, netdev_set_prio_tc_map(dev, i,
mqprio->prio_tc_map[i]); mqprio->prio_tc_map[i]);
} }

View File

@ -908,6 +908,7 @@ static int __init tls_register(void)
{ {
tls_sw_proto_ops = inet_stream_ops; tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read; tls_sw_proto_ops.splice_read = tls_sw_splice_read;
tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
tls_device_init(); tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops); tcp_register_ulp(&tcp_tls_ulp_ops);

View File

@ -1204,6 +1204,17 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
return copied ? copied : ret; return copied ? copied : ret;
} }
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
MSG_NO_SHARED_FRAGS))
return -ENOTSUPP;
return tls_sw_do_sendpage(sk, page, offset, size, flags);
}
int tls_sw_sendpage(struct sock *sk, struct page *page, int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags) int offset, size_t size, int flags)
{ {