This is the 5.4.138 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEKa7oACgkQONu9yGCS
 aT4pag//XpTqY8Qv8aaYd4p88jw2rX/gV6/J3rRLFlbpWL8smmCFs83nGNo3xnJ6
 Avc8Bt/HhIPEdFbt12Og2ZlX/6zWMpa9YfaWOKNtafFUjjS+Lol+k9E7P7pOWobC
 N2Diq1PCLoSgbi0V/4bJrVyty8Y85ENoCXKNgpSyBAUqsTl3ToVNqaLAt+Z7r5W3
 JUN/khdQ8Ve/lcUUExL3ahqsjKSciDZZheC2DMjkvu0+8NXjkAcwINPSoT9oloOf
 dBiMC/iE7/CJbMdWGe/dTmjeoQfBRrwqYefm/FvDmLfriiADT0HxD6Nkda/03KgW
 eSI7dGw7jkg16KaYnSWnUZba9pr+/Dq8GmsUjKRZa+CbVmH8FBBBDuiyG4lOYB/t
 U4ZjeUR0Kaue3YTVb9WavaDLPDFwTgW7OFbdmmnPM98YDSeZwaHQKgT5Kw7M+VqD
 4i0eMhnPr5FTodQJ/uMMvKFJ9uOeoU8WjGFQeNZGa15m6fLCwDSUoVNSMwVJbHKC
 yxSQ/uEVkgapfdXnb5G8j5dzGXuvuQYyoNF5pmzJpSuTLuN646ewP+crNR33CqIT
 FRG+tEoTAqMLt6n6s5pd9G0Xc7MNTSzy4G5ijuFwiwqdog/ZtqET6mP+bRe2bgb2
 OnDPXkcdMPuNiKp341hDDDcpmJfPwS8W+hfciG3dx55Um7Ajv/A=
 =y8rJ
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmEL2VEACgkQ7G51OISz
 Hs1x5Q//XTVTZ1jW7VIqvkd7B3h+FUbO/GDFufvrUzokFH731ZRryHcmszbXg/Yh
 B+YSGyRA/hnikXppDgPfn2zzSzlkOWOcXk94RgdcGTMOFwZ6T+8NayC52kAlXgE4
 qQO4fqWJeDLKxE9TCpk2aMOml6HDyvyv0lEuQle2dkSSMZI4FLfCmEb6398jvi0K
 iF/JtLHTKXmEcFfwQoqqL7gGWQhAo76l+NGw91qK5whlEPEj2J8XKGwt8/0HqYLD
 A397ssNsbtWckpgn40DuxKSh2trztZcjRcjzG902wJLm7Cr3P8ns2HL+L/McN+uj
 hiKuyY7A84ZXDbbr8SjKAjeBS6zXKddRij819uNXl/xVYBIiX89XMg1tfnbGCSy5
 GR6WQXQx1J8eMXCk8wLHOxwRAlwlCoZdq3sCcW2xpaxD8N+B0ZymszxHB/TquHj6
 WuJ7HW79FNjWnK5QFoT0wKMjrotsDnn+P0MMzuR9pNs9MleLrwLNaZCKYWyRsbeX
 36OreETjhezPHv4TsfaeKK5vgsSOE6TFiclz0JzcFdxtPhMLR3bmerig8GZ22BuY
 j3Ib60Z1iO0/ZcZB1cFb6ZQO/HofCQuAWuZI06LRIGzU9uM9j7PKnL+lF5N/xm9A
 cZgbtmDKVxGQLCFOFr+vOTFiwvyM7y9n9QgHAJ0L7StMzsYfgEs=
 =q+oi
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.138' into 5.4-2.3.x-imx

This is the 5.4.138 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-08-05 12:27:59 +00:00
commit ac38cd4203
41 changed files with 374 additions and 139 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 137
SUBLEVEL = 138
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -75,7 +75,7 @@
#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor);
EXPORT_SYMBOL(shared_processor);
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;

View File

@ -4,6 +4,8 @@
#include <asm/ldt.h>
struct task_struct;
/* misc architecture specific prototypes */
void syscall_init(void);

View File

@ -91,7 +91,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);

View File

@ -43,13 +43,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
u8 vectors[KVM_MAX_VCPU_ID];
u8 vectors[KVM_MAX_VCPU_ID + 1];
};

View File

@ -430,13 +430,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
static bool irq_is_legacy(struct acpi_resource_irq *irq)
{
return irq->triggering == ACPI_EDGE_SENSITIVE &&
irq->polarity == ACPI_ACTIVE_HIGH &&
irq->shareable == ACPI_EXCLUSIVE;
}
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@ -475,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
irq->shareable, irq_is_legacy(irq));
irq->shareable, true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;

View File

@ -3829,7 +3829,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
wacom_wac->shared->touch->product == 0xF6) {
input_dev->evbit[0] |= BIT_MASK(EV_SW);
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
wacom_wac->shared->has_mute_touch_switch = true;
wacom_wac->has_mute_touch_switch = true;
}
/* fall through */

View File

@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
return ret;
}
static u8 hi3110_cmd(struct spi_device *spi, u8 command)
static int hi3110_cmd(struct spi_device *spi, u8 command)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);

View File

@ -255,6 +255,8 @@ struct ems_usb {
unsigned int free_slots; /* remember number of available slots */
struct ems_cpc_msg active_params; /* active controller parameters */
void *rxbuf[MAX_RX_URBS];
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
static void ems_usb_read_interrupt_callback(struct urb *urb)
@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
&urb->transfer_dma);
&buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
buf, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
break;
}
dev->rxbuf[i] = buf;
dev->rxbuf_dma[i] = buf_dma;
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < MAX_RX_URBS; ++i)
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
dev->rxbuf[i], dev->rxbuf_dma[i]);
usb_kill_anchored_urbs(&dev->tx_submitted);
atomic_set(&dev->active_tx_urbs, 0);

View File

@ -195,6 +195,8 @@ struct esd_usb2 {
int net_count;
u32 version;
int rxinitdone;
void *rxbuf[MAX_RX_URBS];
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
struct esd_usb2_net_priv {
@ -544,6 +546,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf = NULL;
dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@ -553,7 +556,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
}
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
&urb->transfer_dma);
&buf_dma);
if (!buf) {
dev_warn(dev->udev->dev.parent,
"No memory left for USB buffer\n");
@ -561,6 +564,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
goto freeurb;
}
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
@ -573,8 +578,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
goto freeurb;
}
dev->rxbuf[i] = buf;
dev->rxbuf_dma[i] = buf_dma;
freeurb:
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
@ -662,6 +671,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
for (i = 0; i < MAX_RX_URBS; ++i)
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
dev->rxbuf[i], dev->rxbuf_dma[i]);
for (i = 0; i < dev->net_count; i++) {
priv = dev->nets[i];
if (priv) {

View File

@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
break;
}
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
buf, MCBA_USB_RX_BUFF_SIZE,

View File

@ -137,7 +137,8 @@ struct usb_8dev_priv {
u8 *cmd_msg_buffer;
struct mutex usb_8dev_cmd_lock;
void *rxbuf[MAX_RX_URBS];
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
/* tx frame */
@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
u8 *buf;
dma_addr_t buf_dma;
/* create a URB, and a buffer for it */
urb = usb_alloc_urb(0, GFP_KERNEL);
@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
}
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
&urb->transfer_dma);
&buf_dma);
if (!buf) {
netdev_err(netdev, "No memory left for USB buffer\n");
usb_free_urb(urb);
@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
usb_rcvbulkpipe(priv->udev,
USB_8DEV_ENDP_DATA_RX),
@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
break;
}
priv->rxbuf[i] = buf;
priv->rxbuf_dma[i] = buf_dma;
/* Drop reference, USB core will take care of freeing it */
usb_free_urb(urb);
}
@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
usb_kill_anchored_urbs(&priv->rx_submitted);
for (i = 0; i < MAX_RX_URBS; ++i)
usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
priv->rxbuf[i], priv->rxbuf_dma[i]);
usb_kill_anchored_urbs(&priv->tx_submitted);
atomic_set(&priv->active_tx_urbs, 0);

View File

@ -367,7 +367,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
void __iomem *ioaddr;
i = pci_enable_device(pdev);
i = pcim_enable_device(pdev);
if (i) return i;
pci_set_master(pdev);
@ -389,7 +389,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
goto err_out_free_res;
goto err_out_netdev;
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
@ -468,8 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_cleardev:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
return -ENODEV;
@ -1535,7 +1533,6 @@ static void w840_remove1(struct pci_dev *pdev)
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
pci_release_regions(pdev);
pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}

View File

@ -977,7 +977,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
default:
/* if we got here and link is up something bad is afoot */
netdev_info(netdev,
"WARNING: Link is up but PHY type 0x%x is not recognized.\n",
"WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
hw_link_info->phy_type);
}
@ -5087,6 +5087,10 @@ flags_complete:
dev_warn(&pf->pdev->dev,
"Device configuration forbids SW from starting the LLDP agent.\n");
return -EINVAL;
case I40E_AQ_RC_EAGAIN:
dev_warn(&pf->pdev->dev,
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
"Starting FW LLDP agent failed: error: %s, %s\n",

View File

@ -4403,11 +4403,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
}
/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* i40e_vsi_enable_tx - Start a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
@ -4416,7 +4415,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q,
false /*is xdp*/, enable);
false /*is xdp*/, true);
if (ret)
break;
@ -4425,7 +4424,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_control_wait_tx_q(vsi->seid, pf,
pf_q + vsi->alloc_queue_pairs,
true /*is xdp*/, enable);
true /*is xdp*/, true);
if (ret)
break;
}
@ -4523,32 +4522,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
}
/**
* i40e_vsi_control_rx - Start or stop a VSI's rings
* i40e_vsi_enable_rx - Start a VSI's rings
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret = 0;
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
ret = i40e_control_wait_rx_q(pf, pf_q, enable);
ret = i40e_control_wait_rx_q(pf, pf_q, true);
if (ret) {
dev_info(&pf->pdev->dev,
"VSI seid %d Rx ring %d %sable timeout\n",
vsi->seid, pf_q, (enable ? "en" : "dis"));
"VSI seid %d Rx ring %d enable timeout\n",
vsi->seid, pf_q);
break;
}
}
/* Due to HW errata, on Rx disable only, the register can indicate done
* before it really is. Needs 50ms to be sure
*/
if (!enable)
mdelay(50);
return ret;
}
@ -4561,29 +4553,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
int ret = 0;
/* do rx first for enable and last for disable */
ret = i40e_vsi_control_rx(vsi, true);
ret = i40e_vsi_enable_rx(vsi);
if (ret)
return ret;
ret = i40e_vsi_control_tx(vsi, true);
ret = i40e_vsi_enable_tx(vsi);
return ret;
}
#define I40E_DISABLE_TX_GAP_MSEC 50
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int pf_q, err, q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
/* do rx first for enable and last for disable
* Ignore return value, we need to shutdown whatever we can
*/
i40e_vsi_control_tx(vsi, false);
i40e_vsi_control_rx(vsi, false);
q_end = vsi->base_queue + vsi->num_queue_pairs;
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
err = i40e_control_wait_rx_q(pf, pf_q, false);
if (err)
dev_info(&pf->pdev->dev,
"VSI seid %d Rx ring %d dissable timeout\n",
vsi->seid, pf_q);
}
msleep(I40E_DISABLE_TX_GAP_MSEC);
pf_q = vsi->base_queue;
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
}
/**
@ -6868,6 +6878,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
}
if (vsi->num_queue_pairs <
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
dev_err(&vsi->back->pdev->dev,
"Failed to create traffic channel, insufficient number of queues.\n");
return -EINVAL;
}
if (sum_max_rate > i40e_get_link_speed(vsi)) {
@ -12883,6 +12895,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = __i40e_setup_tc,
.ndo_select_queue = i40e_lan_select_queue,
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,

View File

@ -3521,6 +3521,56 @@ dma_error:
return -1;
}
static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
const struct sk_buff *skb,
u16 num_tx_queues)
{
u32 jhash_initval_salt = 0xd631614b;
u32 hash;
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16)skb->protocol ^ skb->hash;
hash = jhash_1word(hash, jhash_initval_salt);
return (u16)(((u64)hash * num_tx_queues) >> 32);
}
u16 i40e_lan_select_queue(struct net_device *netdev,
struct sk_buff *skb,
struct net_device __always_unused *sb_dev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw;
u16 qoffset;
u16 qcount;
u8 tclass;
u16 hash;
u8 prio;
/* is DCB enabled at all? */
if (vsi->tc_config.numtc == 1)
return i40e_swdcb_skb_tx_hash(netdev, skb,
netdev->real_num_tx_queues);
prio = skb->priority;
hw = &vsi->back->hw;
tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
/* sanity check */
if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
tclass = 0;
/* select a queue assigned for the given TC */
qcount = vsi->tc_config.tc_info[tclass].qcount;
hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
qoffset = vsi->tc_config.tc_info[tclass].qoffset;
return qoffset + hash;
}
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
* @xdp: data to transmit

View File

@ -481,6 +481,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);

View File

@ -3526,6 +3526,7 @@ slave_start:
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
err = -EINVAL;
goto err_close;
}
}

View File

@ -444,12 +444,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
netdev = __dev_get_by_index(net, ifindex);
netdev = dev_get_by_index(net, ifindex);
if (!netdev)
return ERR_PTR(-ENODEV);
priv = netdev_priv(netdev);
return priv->mdev;
mdev = priv->mdev;
dev_put(netdev);
/* Mirred tc action holds a refcount on the ifindex net_device (see
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
* after dev_put(netdev), while we're in the context of adding a tc flow.
*
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
* stored in a hairpin object, which exists until all flows, that refer to it, get
* removed.
*
* On the other hand, after a hairpin object has been created, the peer net_device may
* be removed/unbound while there are still some hairpin flows that are using it. This
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
* NETDEV_UNREGISTER event of the peer net_device.
*/
return mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
@ -648,6 +668,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
err = PTR_ERR(peer_mdev);
goto create_pair_err;
}
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
@ -786,6 +810,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
return PTR_ERR(peer_mdev);
}
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;

View File

@ -968,17 +968,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *next_ft, *first_ft;
int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
if (list_empty(&prio->node.children)) {
first_ft = list_first_entry_or_null(&prio->node.children,
struct mlx5_flow_table, node.list);
if (!first_ft || first_ft->level > ft->level) {
err = connect_prev_fts(dev, ft, prio);
if (err)
return err;
next_ft = find_next_chained_ft(prio);
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@ -2026,7 +2028,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
node.list) == ft))
return 0;
next_ft = find_next_chained_ft(prio);
next_ft = find_next_ft(ft);
err = connect_fwd_rules(dev, next_ft, ft);
if (err)
return err;

View File

@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
#endif
/* setup various bits in PCI command register */
ret = pci_enable_device(pci_dev);
ret = pcim_enable_device(pci_dev);
if(ret) return ret;
i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
ioaddr = pci_iomap(pci_dev, 0, 0);
if (!ioaddr) {
ret = -ENOMEM;
goto err_out_cleardev;
goto err_out;
}
sis_priv = netdev_priv(net_dev);
@ -579,8 +579,6 @@ err_unmap_tx:
sis_priv->tx_ring_dma);
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
pci_release_regions(pci_dev);
err_out:
free_netdev(net_dev);
return ret;
@ -2489,7 +2487,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
sis_priv->tx_ring_dma);
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);
}
#ifdef CONFIG_PM

View File

@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
err = niu_pci_vpd_scan_props(np, here, end);
if (err < 0)
return err;
/* ret == 1 is not an error */
if (err == 1)
return -EINVAL;
return 0;
}
return 0;
}

View File

@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
if (!IS_ERR(skb))
dev_kfree_skb(skb);
skb = ERR_PTR(-ENODEV);
return;
}
dev->cb(dev->nfc_digital_dev, dev->arg, skb);

View File

@ -105,6 +105,7 @@ struct mvebu_pcie_port {
struct mvebu_pcie_window memwin;
struct mvebu_pcie_window iowin;
u32 saved_pcie_stat;
struct resource regs;
};
static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
@ -149,7 +150,9 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
/*
* Setup PCIE BARs and Address Decode Wins:
* BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
* BAR[0] -> internal registers (needed for MSI)
* BAR[1] -> covers all DRAM banks
* BAR[2] -> Disabled
* WIN[0-3] -> DRAM bank[0-3]
*/
static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
@ -203,6 +206,12 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
PCIE_BAR_CTRL_OFF(1));
/*
* Point BAR[0] to the device's internal registers.
*/
mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
}
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
@ -708,14 +717,13 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
struct device_node *np,
struct mvebu_pcie_port *port)
{
struct resource regs;
int ret = 0;
ret = of_address_to_resource(np, 0, &regs);
ret = of_address_to_resource(np, 0, &port->regs);
if (ret)
return ERR_PTR(ret);
return devm_ioremap_resource(&pdev->dev, &regs);
return devm_ioremap_resource(&pdev->dev, &port->regs);
}
#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)

View File

@ -273,7 +273,7 @@ static void end_compressed_bio_write(struct bio *bio)
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
cb->start, cb->start + cb->len - 1,
bio->bi_status == BLK_STS_OK);
!cb->errors);
cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb);

View File

@ -1266,6 +1266,7 @@ again:
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;

View File

@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
}
}
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
loff_t isize = i_size_read(inode);
/*
* The "start" and "end" values are NOT necessarily part of
@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
goto out;
/* No page cache for EOF blocks, issue zero out to disk. */
if (end > isize) {
/*
* zeroout eof blocks in last cluster starting from
* "isize" even "start" > "isize" because it is
* complicated to zeroout just at "start" as "start"
* may be not aligned with block size, buffer write
* would be required to do that, but out of eof buffer
* write is not supported.
*/
ret = ocfs2_zeroout_partial_cluster(inode, isize,
end - isize);
if (ret) {
mlog_errno(ret);
goto out;
}
if (start >= isize)
goto out;
end = isize;
}
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@ -1855,45 +1915,6 @@ out:
return ret;
}
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
/*
* Parts of this function taken from xfs_change_file_space()
*/
@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
orig_isize = i_size_read(inode);
switch (sr->l_whence) {
case 0: /*SEEK_SET*/
break;
@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos;
break;
case 2: /*SEEK_END*/
sr->l_start += orig_isize;
sr->l_start += i_size_read(inode);
break;
default:
ret = -EINVAL;
@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
ret = -EINVAL;
}
orig_isize = i_size_read(inode);
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,

View File

@ -15,9 +15,11 @@
#include <linux/if_ether.h>
/* Lengths of frame formats */
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
#define LLC_PDU_LEN_S 4
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
#define LLC_PDU_LEN_S 4
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
/* header and 1 control byte and XID info */
#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
/* Known SAP addresses */
#define LLC_GLOBAL_SAP 0xFF
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
@ -50,9 +52,10 @@
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
#define LLC_PDU_TYPE_MASK 0x03
#define LLC_PDU_TYPE_I 0 /* first bit */
#define LLC_PDU_TYPE_S 1 /* first two bits */
#define LLC_PDU_TYPE_U 3 /* first two bits */
#define LLC_PDU_TYPE_I 0 /* first bit */
#define LLC_PDU_TYPE_S 1 /* first two bits */
#define LLC_PDU_TYPE_U 3 /* first two bits */
#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
#define LLC_PDU_TYPE_IS_I(pdu) \
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
u8 ssap, u8 dsap, u8 cr)
{
const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
int hlen = 4; /* default value for I and S types */
struct llc_pdu_un *pdu;
switch (type) {
case LLC_PDU_TYPE_U:
hlen = 3;
break;
case LLC_PDU_TYPE_U_XID:
hlen = 6;
break;
}
skb_push(skb, hlen);
skb_reset_network_header(skb);
pdu = llc_pdu_un_hdr(skb);
@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
xid_info->type = svcs_supported;
xid_info->rw = rx_window << 1; /* size of receive window */
skb_put(skb, sizeof(struct llc_xid_info));
/* no need to push/put since llc_pdu_header_init() has already
* pushed 3 + 3 bytes
*/
}
/**

View File

@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
static bool j1939_session_deactivate(struct j1939_session *session)
{
struct j1939_priv *priv = session->priv;
bool active;
j1939_session_list_lock(session->priv);
j1939_session_list_lock(priv);
/* This function should be called with a session ref-count of at
* least 2.
*/
WARN_ON_ONCE(kref_read(&session->kref) < 2);
active = j1939_session_deactivate_locked(session);
j1939_session_list_unlock(session->priv);
j1939_session_list_unlock(priv);
return active;
}
@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
if (!session->transmission)
j1939_tp_schedule_txtimer(session, 0);
} else {
j1939_tp_set_rxtimeout(session, 250);
j1939_tp_set_rxtimeout(session, 750);
}
session->last_cmd = 0xff;
consume_skb(se_skb);

View File

@ -548,10 +548,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
}
rtnl_lock();
lock_sock(sk);
if (ro->bound && ro->ifindex)
if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
if (!dev) {
if (count > 1)
kfree(filter);
err = -ENODEV;
goto out_fil;
}
}
if (ro->bound) {
/* (try to) register the new filters */
@ -590,6 +598,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
rtnl_unlock();
break;
@ -602,10 +611,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
err_mask &= CAN_ERR_MASK;
rtnl_lock();
lock_sock(sk);
if (ro->bound && ro->ifindex)
if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
if (!dev) {
err = -ENODEV;
goto out_err;
}
}
/* remove current error mask */
if (ro->bound) {
@ -629,6 +644,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
dev_put(dev);
release_sock(sk);
rtnl_unlock();
break;

View File

@ -391,7 +391,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
skb_reset_network_header(skb);
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {

View File

@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
{
u8 rc = LLC_PDU_LEN_U;
if (addr->sllc_test || addr->sllc_xid)
if (addr->sllc_test)
rc = LLC_PDU_LEN_U;
else if (addr->sllc_xid)
/* We need to expand header to sizeof(struct llc_xid_info)
* since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
* as XID PDU. In llc_ui_sendmsg() we reserved header size and then
* filled all other space with user data. If we won't reserve this
* bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
*/
rc = LLC_PDU_LEN_U_XID;
else if (sk->sk_type == SOCK_STREAM)
rc = LLC_PDU_LEN_I;
return rc;

View File

@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);

View File

@ -660,8 +660,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
return false;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
if (tstamp) {
s32 timeout = ct->timeout - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
tstamp->stop -= jiffies_to_nsecs(-timeout);
}
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
portid, report) < 0) {

View File

@ -147,7 +147,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
break;
default:
return -EAFNOSUPPORT;
if (tb[NFTA_NAT_REG_ADDR_MIN])
return -EAFNOSUPPORT;
break;
}
priv->family = family;

View File

@ -231,6 +231,8 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
index++;
if (index < s_i)
continue;
if (IS_ERR(p))
continue;
if (jiffy_since &&
time_after(jiffy_since,

View File

@ -1175,7 +1175,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
if (af->from_addr_param(&paddr, param, peer_port, 0))
if (!af->from_addr_param(&paddr, param, peer_port, 0))
return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);

View File

@ -2501,7 +2501,7 @@ static int tipc_listen(struct socket *sock, int len)
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
struct sock *sk = sock->sk;
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
/* True wake-one mechanism for incoming connections: only
@ -2510,12 +2510,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* anymore, the common case will execute the loop only once.
*/
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
timeo = schedule_timeout(timeo);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@ -2527,7 +2527,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
if (signal_pending(current))
break;
}
finish_wait(sk_sleep(sk), &wait);
return err;
}

View File

@ -1250,16 +1250,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
* be grouped with this beacon for updates ...
*/
if (!cfg80211_combine_bsses(rdev, new)) {
kfree(new);
bss_ref_put(rdev, new);
goto drop;
}
}
if (rdev->bss_entries >= bss_entries_limit &&
!cfg80211_bss_expire_oldest(rdev)) {
if (!list_empty(&new->hidden_list))
list_del(&new->hidden_list);
kfree(new);
bss_ref_put(rdev, new);
goto drop;
}

View File

@ -214,8 +214,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
if (!(prot & PROT_EXEC))
dso__set_loaded(dso);
}
nsinfo__put(dso->nsinfo);
dso->nsinfo = nsi;
dso__put(dso);
}

View File

@ -3570,6 +3570,16 @@ struct compat_kvm_dirty_log {
};
};
struct compat_kvm_clear_dirty_log {
__u32 slot;
__u32 num_pages;
__u64 first_page;
union {
compat_uptr_t dirty_bitmap; /* one bit per page */
__u64 padding2;
};
};
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@ -3579,6 +3589,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CLEAR_DIRTY_LOG: {
struct compat_kvm_clear_dirty_log compat_log;
struct kvm_clear_dirty_log log;
if (copy_from_user(&compat_log, (void __user *)arg,
sizeof(compat_log)))
return -EFAULT;
log.slot = compat_log.slot;
log.num_pages = compat_log.num_pages;
log.first_page = compat_log.first_page;
log.padding2 = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
break;
}
#endif
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;