This is the 5.4.26 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5xvRcACgkQONu9yGCS
 aT5GJhAAlahoiTrrDWQH5njl2uNVxOqZl2r6jiaX7+CrcPUg5tIznqUo9qht2s+V
 zCqUcuq9e1Q6WFeqHPx0qwaROaLppcKLuTqg4hrH2KPhuZ4yYjTfjFa8++Q7poPL
 7tQVYxC6r04IdoZnbAOrRduAoMFMgTq9WZCdOpY3860LJrelOPPUh6+N8YnzXTIs
 q0+6nBNtlMYAlQLbQI9nggOtxf6yBor4Y9mfgk1Spe8vbNyZBFCvtRTntlhJI5ME
 u+xictLY3dbvUNFeuJSlQZd22tqTvw2Gxc5Efy2GHjokNb7SKMEswnWy1K6BKP5b
 NZwLtn85mgCWbq7Eo4Bf5idA2EpsyeoJTr9bRmx5mWJ3hFcUaMj6bNEUrfybsZKg
 8/lbP5zM459NyLF+arCy5lNkNBridQt/vDciGiYDzHEVi88PJRNwL6nN01XK+ByU
 aCcQghNAzjMYdxDwotWGKKyTelC+Ck8FlJkIoIgEPYvvTRdaDk+BP/mHaH7KQNax
 AXbSND0MmtloxHgecvmJASU/628uhv8hxzjcCW+Gnzwk55nH2rwZ+VC4ciJgI/hj
 u5x1zaumj67WPqAZ7dZIBrJxpKfDMMFxTQFXLMgP+C+UjJviG4dUw1IVk0v5jTqx
 XG+Gk13ovv62QtKW7wTsuZVrjHXo1zB8OsQrDdKG+GXTkf53bP4=
 =1Qwi
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.26' into 5.4-1.0.0-imx

This is the 5.4.26 stable release
This commit is contained in:
Gary Bisson 2020-04-15 14:32:30 +02:00
commit fdd6c8657f
117 changed files with 1038 additions and 407 deletions

View File

@ -850,3 +850,11 @@ business doing so.
d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
very suspect (and won't work in modules). Such uses are very likely to
be misspelled d_alloc_anon().
---
**mandatory**
[should've been added in 2016] stale comment in finish_open() nonwithstanding,
failure exits in ->atomic_open() instances should *NOT* fput() the file,
no matter what. Everything is handled by the caller.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 25
SUBLEVEL = 26
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -29,6 +29,8 @@
.endm
#define ASM_NL ` /* use '`' to mark new line in macro */
#define __ALIGN .align 4
#define __ALIGN_STR __stringify(__ALIGN)
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm

View File

@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
/*
* NB and Last level cache counters (MSRs) are shared across all cores
* that share the same NB / Last level cache. Interrupts can be directed
* to a single target core, however, event counts generated by processes
* running on other cores cannot be masked out. So we do not support
* sampling and per-thread events.
* that share the same NB / Last level cache. On family 16h and below,
* Interrupts can be directed to a single target core, however, event
* counts generated by processes running on other cores cannot be masked
* out. So we do not support sampling and per-thread events via
* CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
/* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
};
static struct pmu amd_llc_pmu = {
@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)

View File

@ -489,17 +489,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
return;
if ((val & 3UL) == 1UL) {
/* PPIN available but disabled: */
/* PPIN locked in disabled mode */
return;
}
/* If PPIN is disabled, but not locked, try to enable: */
if (!(val & 3UL)) {
/* If PPIN is disabled, try to enable */
if (!(val & 2UL)) {
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
rdmsrl_safe(MSR_PPIN_CTL, &val);
}
if ((val & 3UL) == 2UL)
/* Is the enable bit set? */
if (val & 2UL)
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
}
}

View File

@ -5197,6 +5197,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
ctxt->intercept = x86_intercept_none;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {

View File

@ -223,7 +223,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
return;
kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vmx->nested.hv_evmcs_vmptr = -1ull;
vmx->nested.hv_evmcs_vmptr = 0;
vmx->nested.hv_evmcs = NULL;
}
@ -1828,7 +1828,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
return 1;
if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
if (unlikely(!vmx->nested.hv_evmcs ||
evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
if (!vmx->nested.hv_evmcs)
vmx->nested.current_vmptr = -1ull;

View File

@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
return 0;
}
/*
* The EFI runtime services data area is not covered by walk_mem_res(), but must
* be mapped encrypted when SEV is active.
*/
static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
{
if (!sev_active())
return;
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
desc->flags |= IORES_MAP_ENCRYPTED;
}
static int __ioremap_collect_map_flags(struct resource *res, void *arg)
{
struct ioremap_desc *desc = arg;
@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
* To avoid multiple resource walks, this function walks resources marked as
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
*
* After that, deal with misc other ranges in __ioremap_check_other() which do
* not fall into the above category.
*/
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
struct ioremap_desc *desc)
@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
memset(desc, 0, sizeof(struct ioremap_desc));
walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
__ioremap_check_other(addr, desc);
}
/*

View File

@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
return false;
/* is something in flight? */
if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
return false;
return true;

View File

@ -335,10 +335,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
{
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!pdev->dma_mask)
pdev->dma_mask = DMA_BIT_MASK(32);
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dma_mask;
if (!pdev->dev.dma_mask) {
pdev->platform_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->platform_dma_mask;
}
};
/**
@ -634,20 +634,8 @@ struct platform_device *platform_device_register_full(
pdev->dev.of_node_reused = pdevinfo->of_node_reused;
if (pdevinfo->dma_mask) {
/*
* This memory isn't freed when the device is put,
* I don't have a nice idea for that though. Conceptually
* dma_mask in struct device should not be a pointer.
* See http://thread.gmane.org/gmane.linux.kernel.pci/9081
*/
pdev->dev.dma_mask =
kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
goto err;
kmemleak_ignore(pdev->dev.dma_mask);
*pdev->dev.dma_mask = pdevinfo->dma_mask;
pdev->platform_dma_mask = pdevinfo->dma_mask;
pdev->dev.dma_mask = &pdev->platform_dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
@ -672,7 +660,6 @@ struct platform_device *platform_device_register_full(
if (ret) {
err:
ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
platform_device_put(pdev);
return ERR_PTR(ret);
}

View File

@ -339,10 +339,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
blk_mq_stop_hw_queue(hctx);
/* Don't stop the queue if -ENOMEM: we may have failed to
* bounce the buffer due to global resource outage.
*/
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR;

View File

@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
else
io.slave_addr = slave_addr;
io.irq = platform_get_irq(pdev, 0);
io.irq = platform_get_irq_optional(pdev, 0);
if (io.irq > 0)
io.irq_setup = ipmi_std_irq_setup;
else
@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
io.irq = tmp;
io.irq_setup = acpi_gpe_irq_setup;
} else {
int irq = platform_get_irq(pdev, 0);
int irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
io.irq = irq;

View File

@ -83,13 +83,16 @@ static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@ -116,13 +119,16 @@ static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
char *str = buf;
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
@ -133,12 +139,15 @@ static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
unsigned long size = sizeof(var->Data);
int ret;
if (!entry || !buf)
return -EINVAL;
var->DataSize = 1024;
if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
var->DataSize = size;
if (ret)
return -EIO;
memcpy(buf, var->Data, var->DataSize);
@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
u8 *data;
int err;
if (!entry || !buf)
return -EINVAL;
if (in_compat_syscall()) {
struct compat_efi_variable *compat;
@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
struct compat_efi_variable *compat;
unsigned long datasize = sizeof(var->Data);
size_t size;
int ret;
if (!entry || !buf)
return 0;
var->DataSize = 1024;
if (efivar_entry_get(entry, &entry->var.Attributes,
&entry->var.DataSize, entry->var.Data))
ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
var->DataSize = datasize;
if (ret)
return -EIO;
if (in_compat_syscall()) {

View File

@ -365,8 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_type=
grph_obj_type =
uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;

View File

@ -439,7 +439,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
if (unlikely(entry->alignment &&
!is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*

View File

@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
/* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;

View File

@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
* service if no active vgpu.
*/
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
mutex_unlock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt);

View File

@ -560,19 +560,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
static void irq_semaphore_cb(struct irq_work *wrk)
{
struct i915_request *rq =
container_of(wrk, typeof(*rq), semaphore_work);
i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
i915_request_put(rq);
}
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *request =
container_of(fence, typeof(*request), semaphore);
struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
i915_request_get(rq);
init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
irq_work_queue(&rq->semaphore_work);
}
break;
case FENCE_FREE:
i915_request_put(request);
i915_request_put(rq);
break;
}
@ -1215,9 +1227,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}

View File

@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
#include <linux/lockdep.h>
#include "gt/intel_context_types.h"
@ -147,6 +148,7 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.

View File

@ -233,6 +233,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
static inline bool is_power_of_2_u64(u64 n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{

View File

@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
i2c_del_adapter(&dev->adapter);
devm_free_irq(&pdev->dev, dev->irq, dev);
pci_free_irq_vectors(pdev);
}

View File

@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
if (ret == -ENOENT)
retdesc = ERR_PTR(-EPROBE_DEFER);
if (ret != -EPROBE_DEFER)
if (PTR_ERR(retdesc) != -EPROBE_DEFER)
dev_err(dev, "error trying to get descriptor: %d\n", ret);
return retdesc;

View File

@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
struct i2c_client *client;
dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
return dev ? i2c_verify_client(dev) : NULL;
if (!dev)
return NULL;
client = i2c_verify_client(dev);
if (!client)
put_device(dev);
return client;
}
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,

View File

@ -4421,7 +4421,7 @@ int amd_iommu_activate_guest_mode(void *data)
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
return modify_irte_ga(ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, NULL);
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
@ -4447,7 +4447,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
return modify_irte_ga(ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry, NULL);
ir_data->irq_2_irte.index, entry, ir_data);
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);

View File

@ -176,15 +176,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
start -= iova_offset(iovad, start);
num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
for (i = 0; i < num_pages; i++) {
msi_page[i].phys = start;
msi_page[i].iova = start;
INIT_LIST_HEAD(&msi_page[i].list);
list_add(&msi_page[i].list, &cookie->msi_page_list);
msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return -ENOMEM;
msi_page->phys = start;
msi_page->iova = start;
INIT_LIST_HEAD(&msi_page->list);
list_add(&msi_page->list, &cookie->msi_page_list);
start += iovad->granule;
}

View File

@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/numa.h>
#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
BUG_ON(dev->is_virtfn);
/*
* Ignore devices that have a domain number higher than what can
* be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
*/
if (pci_domain_nr(dev->bus) > U16_MAX)
return NULL;
/* Only generate path[] for device addition event */
if (event == BUS_NOTIFY_ADD_DEVICE)
for (tmp = dev; tmp; tmp = tmp->bus->self)
@ -440,12 +448,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
/* Check for NUL termination within the designated length */
if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
pr_warn(FW_BUG
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return -EINVAL;
}
pr_info("ANDD device: %x name: %s\n", andd->device_number,
@ -471,14 +480,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
return 0;
}
}
WARN_TAINT(
1, TAINT_FIRMWARE_WORKAROUND,
pr_warn(FW_BUG
"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
drhd->reg_base_addr,
rhsa->base_address,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return 0;
}
@ -827,14 +836,14 @@ int __init dmar_table_init(void)
static void warn_invalid_dmar(u64 addr, const char *message)
{
WARN_TAINT_ONCE(
1, TAINT_FIRMWARE_WORKAROUND,
pr_warn_once(FW_BUG
"Your BIOS is broken; DMAR reported at address %llx%s!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
addr, message,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
}
static int __ref

View File

@ -4129,10 +4129,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
TAINT_FIRMWARE_WORKAROUND,
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@ -5023,6 +5024,7 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops();
down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
@ -5030,6 +5032,7 @@ int __init intel_iommu_init(void)
iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
iommu_device_register(&iommu->iommu);
}
up_read(&dmar_global_lock);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
@ -5040,7 +5043,6 @@ int __init intel_iommu_init(void)
down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
up_read(&dmar_global_lock);
/* Finally, we enable the DMA remapping hardware. */
for_each_iommu(iommu, drhd) {
@ -5049,6 +5051,8 @@ int __init intel_iommu_init(void)
iommu_disable_protect_mem_regions(iommu);
}
up_read(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
intel_iommu_enabled = 1;
@ -5523,8 +5527,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte)
phys = dma_pte_addr(pte);
if (pte && dma_pte_present(pte))
phys = dma_pte_addr(pte) +
(iova & (BIT_MASK(level_to_offset_bits(level) +
VTD_PAGE_SHIFT) - 1));
return phys;
}

View File

@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
static const struct of_device_id wf_ad7417_of_id[] = {
{ .compatible = "ad7417", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
static struct i2c_driver wf_ad7417_driver = {
.driver = {
.name = "wf_ad7417",
.of_match_table = wf_ad7417_of_id,
},
.probe = wf_ad7417_probe,
.remove = wf_ad7417_remove,

View File

@ -582,9 +582,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
static const struct of_device_id wf_fcu_of_id[] = {
{ .compatible = "fcu", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
static struct i2c_driver wf_fcu_driver = {
.driver = {
.name = "wf_fcu",
.of_match_table = wf_fcu_of_id,
},
.probe = wf_fcu_probe,
.remove = wf_fcu_remove,

View File

@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wf_lm75_sensor *lm;
int rc, ds1775 = id->driver_data;
int rc, ds1775;
const char *name, *loc;
if (id)
ds1775 = id->driver_data;
else
ds1775 = !!of_device_get_match_data(&client->dev);
DBG("wf_lm75: creating %s device at address 0x%02x\n",
ds1775 ? "ds1775" : "lm75", client->addr);
@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
static const struct of_device_id wf_lm75_of_id[] = {
{ .compatible = "lm75", .data = (void *)0},
{ .compatible = "ds1775", .data = (void *)1 },
{ }
};
MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
static struct i2c_driver wf_lm75_driver = {
.driver = {
.name = "wf_lm75",
.of_match_table = wf_lm75_of_id,
},
.probe = wf_lm75_probe,
.remove = wf_lm75_remove,

View File

@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
static const struct of_device_id wf_lm87_of_id[] = {
{ .compatible = "lm87cimt", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
static struct i2c_driver wf_lm87_driver = {
.driver = {
.name = "wf_lm87",
.of_match_table = wf_lm87_of_id,
},
.probe = wf_lm87_probe,
.remove = wf_lm87_remove,

View File

@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
static const struct of_device_id wf_max6690_of_id[] = {
{ .compatible = "max6690", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
static struct i2c_driver wf_max6690_driver = {
.driver = {
.name = "wf_max6690",
.of_match_table = wf_max6690_of_id,
},
.probe = wf_max6690_probe,
.remove = wf_max6690_remove,

View File

@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wf_sat_id);
static const struct of_device_id wf_sat_of_id[] = {
{ .compatible = "smu-sat", },
{ }
};
MODULE_DEVICE_TABLE(of, wf_sat_of_id);
static struct i2c_driver wf_sat_driver = {
.driver = {
.name = "wf_smu_sat",
.of_match_table = wf_sat_of_id,
},
.probe = wf_sat_probe,
.remove = wf_sat_remove,

View File

@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
return 0;
}
static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
{
int ret;
ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (ret < 0) {
pr_warn("%s: enable PCI MSI failed, error=%d\n",
mmc_hostname(slot->host->mmc), ret);
return;
}
slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
}
static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);
@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
{
struct sdhci_host *host = slot->host;
gli_pcie_enable_msi(slot);
slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
sdhci_enable_v4_mode(host);

View File

@ -50,11 +50,6 @@ struct arp_pkt {
};
#pragma pack()
static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
{
return (struct arp_pkt *)skb_network_header(skb);
}
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
bool strict_match);
@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
spin_unlock(&bond->mode_lock);
}
static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
static struct slave *rlb_choose_channel(struct sk_buff *skb,
struct bonding *bond,
const struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct arp_pkt *arp = arp_pkt(skb);
struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
*/
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond);
tx_slave = rlb_choose_channel(skb, bond, arp);
if (tx_slave)
bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
tx_slave->dev->addr_len);
@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
tx_slave = rlb_choose_channel(skb, bond);
tx_slave = rlb_choose_channel(skb, bond, arp);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.

View File

@ -884,6 +884,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_DATA_BITTIMING_CONST]
= { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
};
static int can_validate(struct nlattr *tb[], struct nlattr *data[],

View File

@ -1083,6 +1083,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
chip->g2_irq.masked = ~0;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
mv88e6xxx_reg_unlock(chip);
if (err)
return err;
chip->g2_irq.domain = irq_domain_add_simple(
chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
@ -1092,7 +1099,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
irq_create_mapping(chip->g2_irq.domain, irq);
chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
chip->g2_irq.masked = ~0;
chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
MV88E6XXX_G1_STS_IRQ_DEVICE);

View File

@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
return -ENOSPC;
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index > RXCHK_BRCM_TAG_MAX)
if (index >= RXCHK_BRCM_TAG_MAX)
return -ENOSPC;
/* Location is the classification ID, and index is the position

View File

@ -10891,13 +10891,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
struct bnxt *bp = netdev_priv(dev);
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu;
bnxt_set_ring_params(bp);
if (netif_running(dev))
return bnxt_open_nic(bp, false, false);
return bnxt_open_nic(bp, true, false);
return 0;
}

View File

@ -2005,8 +2005,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
const struct firmware *fw;
int rc, hwrm_err = 0;
u32 item_len;
int rc = 0;
u16 index;
bnxt_hwrm_fw_set_time(bp);
@ -2050,15 +2050,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
memcpy(kmem, fw->data, fw->size);
modify.host_src_addr = cpu_to_le64(dma_handle);
hwrm_err = hwrm_send_message(bp, &modify,
sizeof(modify),
FLASH_PACKAGE_TIMEOUT);
rc = hwrm_send_message(bp, &modify, sizeof(modify),
FLASH_PACKAGE_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
dma_handle);
}
}
release_firmware(fw);
if (rc || hwrm_err)
if (rc)
goto err_exit;
if ((install_type & 0xffff) == 0)
@ -2067,20 +2066,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
if (hwrm_err) {
rc = _hwrm_send_message(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
if (rc) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
if (resp->error_code && error_code ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
hwrm_err = _hwrm_send_message(bp, &install,
sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
rc = _hwrm_send_message(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
}
if (hwrm_err)
if (rc)
goto flash_pkg_exit;
}
@ -2092,7 +2090,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
if (hwrm_err == -EACCES)
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}

View File

@ -2682,15 +2682,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
if (cycle > 0xFFFF) {
dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
if (cycle > 0xFFFF) {
dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}

View File

@ -2417,10 +2417,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
if (!mac->support_autoneg && mac->speed == speed &&
mac->duplex == duplex)
return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);

View File

@ -519,6 +519,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
if (tx_queue->channel)
tx_queue->channel = channel;
tx_queue->buffer = NULL;
tx_queue->cb_page = NULL;
memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
}

View File

@ -24,6 +24,7 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
* hardware to truncate packets on reception.
*/
if (netdev_uses_dsa(dev))
if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
value &= ~GMAC_CONTROL_ACS;
if (mtu > 1500)

View File

@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
}
if (dev)
dev_put(dev);
cond_resched();
}
}
@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* In this mode we dont care about multicast and broadcast traffic */
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
ntohs(skb->protocol));
kfree_skb(skb);
goto out;
}
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
* will have L2; which need to discarded and processed further
* in the net-ns of the main-device.
*/
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
ntohs(skb->protocol));
kfree_skb(skb);
goto out;
}
skb_pull(skb, sizeof(*ethh));
skb->mac_header = (typeof(skb->mac_header))~0U;
skb_reset_network_header(skb);

View File

@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
ipvlan_ht_addr_add(ipvlan, addr);
rcu_read_unlock();
return dev_uc_add(phy_dev, phy_dev->dev_addr);
return 0;
}
static int ipvlan_stop(struct net_device *dev)
@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
dev_uc_del(phy_dev, phy_dev->dev_addr);
rcu_read_lock();
list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);

View File

@ -2882,6 +2882,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev)
dev_uc_sync(real_dev, dev);
}
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
{
return make_sci(dev->dev_addr, port);
}
static int macsec_set_mac_address(struct net_device *dev, void *p)
{
struct macsec_dev *macsec = macsec_priv(dev);
@ -2903,6 +2908,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
ether_addr_copy(dev->dev_addr, addr->sa_data);
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
return 0;
}
@ -2977,6 +2983,7 @@ static const struct device_type macsec_type = {
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@ -3176,11 +3183,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
{
return make_sci(dev->dev_addr, port);
}
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);

View File

@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
consume_skb(skb);
cond_resched();
}
}

View File

@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (2)",
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,

View File

@ -761,7 +761,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
phy_trigger_machine(phydev);
}
if (phy_clear_interrupt(phydev))
/* did_interrupt() may have cleared the interrupt already */
if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
goto phy_err;
return IRQ_HANDLED;

View File

@ -240,23 +240,13 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!drv || !phydrv->suspend)
return false;
/* netdev is NULL has three cases:
* - phy is not found
* - phy is found, match to general phy driver
* - phy is found, match to specifical phy driver
*
* Case 1: phy is not found, cannot communicate by MDIO bus.
* Case 2: phy is found:
* if phy dev driver probe/bind err, netdev is not __open__
* status, mdio bus is unregistered.
* if phy is detached, phy had entered suspended status.
* Case 3: phy is found, phy is detached, phy had entered suspended
* status.
*
* So, in here, it shouldn't set phy to suspend by calling mdio bus.
*/
/* PHY not attached? May suspend if the PHY has not already been
* suspended as part of a prior call to phy_disconnect() ->
* phy_detach() -> phy_suspend() because the parent netdev might be the
* MDIO bus driver and clock gated at this point.
*/
if (!netdev)
return false;
goto out;
if (netdev->wol_enabled)
return false;
@ -276,7 +266,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (device_may_wakeup(&netdev->dev))
return false;
return true;
out:
return !phydev->suspended;
}
static int mdio_bus_phy_suspend(struct device *dev)
@ -294,6 +285,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
phydev->suspended_by_mdio_bus = 1;
return phy_suspend(phydev);
}
@ -302,9 +295,11 @@ static int mdio_bus_phy_resume(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
int ret;
if (!mdio_bus_phy_may_suspend(phydev))
if (!phydev->suspended_by_mdio_bus)
goto no_resume;
phydev->suspended_by_mdio_bus = 0;
ret = phy_resume(phydev);
if (ret < 0)
return ret;

View File

@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
struct cstate *cs = lcs->next;
unsigned long deltaS, deltaA;
short changes = 0;
int hlen;
int nlen, hlen;
unsigned char new_seq[16];
unsigned char *cp = new_seq;
struct iphdr *ip;
@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
return isize;
ip = (struct iphdr *) icp;
if (ip->version != 4 || ip->ihl < 5)
return isize;
/* Bail if this packet isn't TCP, or is an IP fragment */
if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
comp->sls_o_tcp++;
return isize;
}
/* Extract TCP header */
nlen = ip->ihl * 4;
if (isize < nlen + sizeof(*th))
return isize;
th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
hlen = ip->ihl*4 + th->doff*4;
th = (struct tcphdr *)(icp + nlen);
if (th->doff < sizeof(struct tcphdr) / 4)
return isize;
hlen = nlen + th->doff * 4;
/* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
* some other control bit is set). Also uncompressible if

View File

@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
[TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
[TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
[TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
[TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
[TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
};
static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)

View File

@ -3006,6 +3006,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
break;
}
return data;
@ -4419,7 +4421,10 @@ static void r8153_init(struct r8152 *tp)
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
break;
}
data = r8153_phy_status(tp, 0);
@ -4545,7 +4550,10 @@ static void r8153b_init(struct r8152 *tp)
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
msleep(20);
if (test_bit(RTL8152_UNPLUG, &tp->flags))
break;
}
data = r8153_phy_status(tp, 0);

View File

@ -309,7 +309,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
}
/* PHY_SKU section is mandatory in B0 */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
IWL_ERR(mvm,
"Can't parse phy_sku in B0, empty sections\n");
return NULL;

View File

@ -448,10 +448,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
offset += q->buf_offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
q->buf_size);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
offset += q->buf_offset;
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
}
if (more)
return;

View File

@ -2025,7 +2025,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
return PTR_ERR(pctldev->p);
}
kref_get(&pctldev->p->users);
pctldev->hog_default =
pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
if (IS_ERR(pctldev->hog_default)) {

View File

@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
static const unsigned int sdio_clk_pins[] = { GPIOX_4 };
static const unsigned int sdio_cmd_pins[] = { GPIOX_5 };
static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
static const unsigned int nand_ce0_pins[] = { BOOT_8 };

View File

@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
if (IS_ERR(falcon_info.clk[*bank])) {
dev_err(&ppdev->dev, "failed to get clock\n");
of_node_put(np)
of_node_put(np);
return PTR_ERR(falcon_info.clk[*bank]);
}
falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,

View File

@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void)
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
INIT_LIST_HEAD(&block->format_list);
spin_lock_init(&block->format_lock);
timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (dasd_ese_needs_format(cqr->block, irb)) {
if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
device->discipline->ese_read(cqr);
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
return;
}
fcqr = device->discipline->ese_format(device, cqr);
fcqr = device->discipline->ese_format(device, cqr, irb);
if (IS_ERR(fcqr)) {
if (PTR_ERR(fcqr) == -EINVAL) {
cqr->status = DASD_CQR_ERROR;
return;
}
/*
* If we can't format now, let the request go
* one extra round. Maybe we can format later.
*/
cqr->status = DASD_CQR_QUEUED;
dasd_schedule_device_bh(device);
return;
} else {
fcqr->status = DASD_CQR_QUEUED;
cqr->status = DASD_CQR_QUEUED;
@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
struct request *req;
blk_status_t error = BLK_STS_OK;
unsigned int proc_bytes;
int status;
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
proc_bytes = cqr->proc_bytes;
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status < 0)
error = errno_to_blk_status(status);
@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
blk_mq_end_request(req, error);
blk_mq_run_hw_queues(req->q, true);
} else {
blk_mq_complete_request(req);
/*
* Partial completed requests can happen with ESE devices.
* During read we might have gotten a NRF error and have to
* complete a request partially.
*/
if (proc_bytes) {
blk_update_request(req, BLK_STS_OK,
blk_rq_bytes(req) - proc_bytes);
blk_mq_requeue_request(req, true);
} else {
blk_mq_complete_request(req);
}
}
}

View File

@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
geo->head |= head;
}
/*
* calculate failing track from sense data depending if
* it is an EAV device or not
*/
static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
sector_t *track)
{
struct dasd_eckd_private *private = device->private;
u8 *sense = NULL;
u32 cyl;
u8 head;
sense = dasd_get_sense(irb);
if (!sense) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no sense data\n");
return -EINVAL;
}
if (!(sense[27] & DASD_SENSE_BIT_2)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no valid track data\n");
return -EINVAL;
}
if (sense[27] & DASD_SENSE_BIT_3) {
/* enhanced addressing */
cyl = sense[30] << 20;
cyl |= (sense[31] & 0xF0) << 12;
cyl |= sense[28] << 8;
cyl |= sense[29];
} else {
cyl = sense[29] << 8;
cyl |= sense[30];
}
head = sense[31] & 0x0F;
*track = cyl * private->rdc_data.trk_per_cyl + head;
return 0;
}
static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
struct dasd_device *device)
{
@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base,
0, NULL);
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
struct dasd_block *block)
{
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
goto out;
}
}
list_add_tail(&to_format->list, &block->format_list);
out:
spin_unlock_irqrestore(&block->format_lock, flags);
return rc;
}
static void clear_format_track(struct dasd_format_entry *format,
struct dasd_block *block)
{
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
/*
* Callback function to free ESE format requests.
*/
@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
struct dasd_eckd_private *private = device->private;
struct dasd_format_entry *format = data;
clear_format_track(format, cqr->basedev->block);
private->count--;
dasd_ffree_request(cqr, device);
}
static struct dasd_ccw_req *
dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
struct irb *irb)
{
struct dasd_eckd_private *private;
struct dasd_format_entry *format;
struct format_data_t fdata;
unsigned int recs_per_trk;
struct dasd_ccw_req *fcqr;
@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
struct request *req;
sector_t first_trk;
sector_t last_trk;
sector_t curr_trk;
int rc;
req = cqr->callback_data;
base = cqr->block->base;
block = cqr->block;
base = block->base;
private = base->private;
block = base->block;
blksize = block->bp_block;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
format = &startdev->format_entry;
first_trk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return ERR_PTR(rc);
fdata.start_unit = first_trk;
fdata.stop_unit = last_trk;
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, startdev,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return ERR_PTR(-EINVAL);
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
if (test_and_set_format_track(format, block))
return ERR_PTR(-EEXIST);
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
fdata.blksize = blksize;
fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
return fcqr;
fcqr->callback = dasd_eckd_ese_format_cb;
fcqr->callback_data = (void *) format;
return fcqr;
}
@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
/*
* When data is read from an unformatted area of an ESE volume, this function
* returns zeroed data and thereby mimics a read of zero data.
*
* The first unformatted track is the one that got the NRF error, the address is
* encoded in the sense data.
*
* All tracks before have returned valid data and should not be touched.
* All tracks after the unformatted track might be formatted or not. This is
* currently not known, remember the processed data and return the remainder of
* the request to the blocklayer in __dasd_cleanup_cqr().
*/
static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_eckd_private *private;
sector_t first_trk, last_trk;
sector_t first_blk, last_blk;
unsigned int blksize, off;
unsigned int recs_per_trk;
struct dasd_device *base;
struct req_iterator iter;
struct dasd_block *block;
unsigned int skip_block;
unsigned int blk_count;
struct request *req;
struct bio_vec bv;
sector_t curr_trk;
sector_t end_blk;
char *dst;
int rc;
req = (struct request *) cqr->callback_data;
base = cqr->block->base;
blksize = base->block->bp_block;
block = cqr->block;
private = base->private;
skip_block = 0;
blk_count = 0;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk = last_blk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return rc;
/* sanity check if the current track from sense data is valid */
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, base,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return -EINVAL;
}
/*
* if not the first track got the NRF error we have to skip over valid
* blocks
*/
if (curr_trk != first_trk)
skip_block = curr_trk * recs_per_trk - first_blk;
/* we have no information beyond the current track */
end_blk = (curr_trk + 1) * recs_per_trk;
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv.bv_page) + bv.bv_offset;
for (off = 0; off < bv.bv_len; off += blksize) {
if (dst && rq_data_dir(req) == READ) {
if (first_blk + blk_count >= end_blk) {
cqr->proc_bytes = blk_count * blksize;
return 0;
}
if (dst && !skip_block) {
dst += off;
memset(dst, 0, blksize);
} else {
skip_block--;
}
blk_count++;
}
}
return 0;
}
/*

View File

@ -187,6 +187,7 @@ struct dasd_ccw_req {
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
unsigned int proc_bytes; /* bytes for partial completion */
};
/*
@ -387,8 +388,9 @@ struct dasd_discipline {
int (*ext_pool_warn_thrshld)(struct dasd_device *);
int (*ext_pool_oos)(struct dasd_device *);
int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
void (*ese_read)(struct dasd_ccw_req *);
struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
struct dasd_ccw_req *, struct irb *);
int (*ese_read)(struct dasd_ccw_req *, struct irb *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@ -474,6 +476,11 @@ struct dasd_profile {
spinlock_t lock;
};
struct dasd_format_entry {
struct list_head list;
sector_t track;
};
struct dasd_device {
/* Block device stuff. */
struct dasd_block *block;
@ -539,6 +546,7 @@ struct dasd_device {
struct dentry *debugfs_dentry;
struct dentry *hosts_dentry;
struct dasd_profile profile;
struct dasd_format_entry format_entry;
};
struct dasd_block {
@ -564,6 +572,9 @@ struct dasd_block {
struct dentry *debugfs_dentry;
struct dasd_profile profile;
struct list_head format_list;
spinlock_t format_lock;
};
struct dasd_attention_data {

View File

@ -958,8 +958,8 @@ out_iput:
iput(vb->vb_dev_info.inode);
out_kern_unmount:
kern_unmount(balloon_mnt);
#endif
out_del_vqs:
#endif
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);

View File

@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
vq->split.queue_dma_addr);
kfree(vq->split.desc_state);
}
}
if (!vq->packed_ring)
kfree(vq->split.desc_state);
list_del(&_vq->list);
kfree(vq);
}

View File

@ -560,7 +560,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
fput(file);
rc = -ENOMEM;
}

View File

@ -578,6 +578,15 @@ int fscrypt_drop_inode(struct inode *inode)
return 0;
mk = ci->ci_master_key->payload.data[0];
/*
* With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
* protected by the key were cleaned by sync_filesystem(). But if
* userspace is still using the files, inodes can be dirtied between
* then and now. We mustn't lose any writes, so skip dirty inodes here.
*/
if (inode->i_state & I_DIRTY_ALL)
return 0;
/*
* Note: since we aren't holding ->mk_secret_sem, the result here can
* immediately become outdated. But there's no correctness problem with

View File

@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_iqueue *fiq = &fc->iq;
bool async;
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
async = req->args->end;
/*
* test_and_set_bit() implies smp_mb() between bit
* changing and below intr_entry check. Pairs with
@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
}
if (async)
if (test_bit(FR_ASYNC, &req->flags))
req->args->end(fc, req->args, req->out.h.error);
put_request:
fuse_put_request(fc, req);
@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
req->in.h.opcode = args->opcode;
req->in.h.nodeid = args->nodeid;
req->args = args;
if (args->end)
__set_bit(FR_ASYNC, &req->flags);
}
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)

View File

@ -301,6 +301,7 @@ struct fuse_io_priv {
* FR_SENT: request is in userspace, waiting for an answer
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
*/
enum fuse_req_flag {
FR_ISREPLY,
@ -314,6 +315,7 @@ enum fuse_req_flag {
FR_SENT,
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
};
/**

View File

@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!(file->f_mode & FMODE_OPENED))
return finish_no_open(file, d);
dput(d);
return 0;
return excl && (flags & O_CREAT) ? -EEXIST : 0;
}
BUG_ON(d != NULL);

View File

@ -860,9 +860,6 @@ cleanup_file:
* the return value of d_splice_alias(), then the caller needs to perform dput()
* on it after finish_open().
*
* On successful return @file is a fully instantiated open file. After this, if
* an error occurs in ->atomic_open(), it needs to clean up with fput().
*
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,

View File

@ -62,6 +62,7 @@ struct css_task_iter {
struct list_head *mg_tasks_head;
struct list_head *dying_tasks_head;
struct list_head *cur_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
struct task_struct *cur_task;

View File

@ -69,8 +69,9 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
dmar_rcu_check())
#define for_each_active_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
@ -81,7 +82,8 @@ extern struct list_head dmar_drhd_units;
if (i=drhd->iommu, drhd->ignored) {} else
#define for_each_iommu(i, drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
dmar_rcu_check()) \
if (i=drhd->iommu, 0) {} else
static inline bool dmar_rcu_check(void)

View File

@ -2,15 +2,10 @@
#ifndef _INET_DIAG_H_
#define _INET_DIAG_H_ 1
#include <net/netlink.h>
#include <uapi/linux/inet_diag.h>
struct net;
struct sock;
struct inet_hashinfo;
struct nlattr;
struct nlmsghdr;
struct sk_buff;
struct netlink_callback;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
static inline size_t inet_diag_msg_attrs_size(void)
{
return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
#if IS_ENABLED(CONFIG_IPV6)
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(1) /* INET_DIAG_SKV6ONLY */
#endif
+ nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(4); /* INET_DIAG_CLASS_ID */
}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns, bool net_admin);

View File

@ -339,6 +339,7 @@ struct phy_c45_device_ids {
* is_gigabit_capable: Set to true if PHY supports 1000Mbps
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* loopback_enabled: Set true if this phy has been loopbacked successfully.
* state: state of the PHY for management purposes
@ -375,6 +376,7 @@ struct phy_device {
unsigned is_gigabit_capable:1;
unsigned has_fixups:1;
unsigned suspended:1;
unsigned suspended_by_mdio_bus:1;
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
@ -527,6 +529,7 @@ struct phy_driver {
/*
* Checks if the PHY generated an interrupt.
* For multi-PHY devices with shared PHY interrupt pin
* Set interrupt bits have to be cleared.
*/
int (*did_interrupt)(struct phy_device *phydev);

View File

@ -24,7 +24,7 @@ struct platform_device {
int id;
bool id_auto;
struct device dev;
u64 dma_mask;
u64 platform_dma_mask;
u32 num_resources;
struct resource *resource;

View File

@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
[FRA_PRIORITY] = { .type = NLA_U32 }, \
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_TUN_ID] = { .type = NLA_U64 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \

View File

@ -4461,12 +4461,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
}
} while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
if (!list_empty(&cset->tasks))
if (!list_empty(&cset->tasks)) {
it->task_pos = cset->tasks.next;
else if (!list_empty(&cset->mg_tasks))
it->cur_tasks_head = &cset->tasks;
} else if (!list_empty(&cset->mg_tasks)) {
it->task_pos = cset->mg_tasks.next;
else
it->cur_tasks_head = &cset->mg_tasks;
} else {
it->task_pos = cset->dying_tasks.next;
it->cur_tasks_head = &cset->dying_tasks;
}
it->tasks_head = &cset->tasks;
it->mg_tasks_head = &cset->mg_tasks;
@ -4524,10 +4528,14 @@ repeat:
else
it->task_pos = it->task_pos->next;
if (it->task_pos == it->tasks_head)
if (it->task_pos == it->tasks_head) {
it->task_pos = it->mg_tasks_head->next;
if (it->task_pos == it->mg_tasks_head)
it->cur_tasks_head = it->mg_tasks_head;
}
if (it->task_pos == it->mg_tasks_head) {
it->task_pos = it->dying_tasks_head->next;
it->cur_tasks_head = it->dying_tasks_head;
}
if (it->task_pos == it->dying_tasks_head)
css_task_iter_advance_css_set(it);
} else {
@ -4546,11 +4554,12 @@ repeat:
goto repeat;
/* and dying leaders w/o live member threads */
if (!atomic_read(&task->signal->live))
if (it->cur_tasks_head == it->dying_tasks_head &&
!atomic_read(&task->signal->live))
goto repeat;
} else {
/* skip all dying ones */
if (task->flags & PF_EXITING)
if (it->cur_tasks_head == it->dying_tasks_head)
goto repeat;
}
}
@ -4659,6 +4668,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
struct kernfs_open_file *of = s->private;
struct css_task_iter *it = of->priv;
if (pos)
(*pos)++;
return css_task_iter_next(it);
}
@ -4674,7 +4686,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
* from position 0, so we can simply keep iterating on !0 *pos.
*/
if (!it) {
if (WARN_ON_ONCE((*pos)++))
if (WARN_ON_ONCE((*pos)))
return ERR_PTR(-EINVAL);
it = kzalloc(sizeof(*it), GFP_KERNEL);
@ -4682,10 +4694,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
return ERR_PTR(-ENOMEM);
of->priv = it;
css_task_iter_start(&cgrp->self, iter_flags, it);
} else if (!(*pos)++) {
} else if (!(*pos)) {
css_task_iter_end(it);
css_task_iter_start(&cgrp->self, iter_flags, it);
}
} else
return it->cur_task;
return cgroup_procs_next(s, NULL, NULL);
}
@ -6381,6 +6394,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
return;
}
/* Don't associate the sock with unrelated interrupted task's cgroup. */
if (in_interrupt())
return;
rcu_read_lock();
while (true) {

View File

@ -1417,14 +1417,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
return;
rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
/* pwq which will be used unless @work is executing elsewhere */
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
else
if (wq->flags & WQ_UNBOUND) {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
} else {
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
}
/*
* If @work was previously on a different pool, it might still be

View File

@ -6792,19 +6792,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!mem_cgroup_sockets_enabled)
return;
/*
* Socket cloning can throw us here with sk_memcg already
* filled. It won't however, necessarily happen from
* process context. So the test for root memcg given
* the current task's memcg won't help us in this case.
*
* Respecting the original socket's memcg is a better
* decision in this case.
*/
if (sk->sk_memcg) {
css_get(&sk->sk_memcg->css);
/* Do not associate the sock with unrelated interrupted task's memcg. */
if (in_interrupt())
return;
}
rcu_read_lock();
memcg = mem_cgroup_from_task(current);

View File

@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
/* interface already disabled by batadv_iv_ogm_iface_disable */
if (!*ogm_buff)
return;
/* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface() where the originator mac is set and

View File

@ -3222,34 +3222,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
struct genl_info *info,
union devlink_param_value *value)
{
struct nlattr *param_data;
int len;
if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
!info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
return -EINVAL;
switch (param->type) {
case DEVLINK_PARAM_TYPE_U8:
value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
if (nla_len(param_data) != sizeof(u8))
return -EINVAL;
value->vu8 = nla_get_u8(param_data);
break;
case DEVLINK_PARAM_TYPE_U16:
value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
if (nla_len(param_data) != sizeof(u16))
return -EINVAL;
value->vu16 = nla_get_u16(param_data);
break;
case DEVLINK_PARAM_TYPE_U32:
value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
if (nla_len(param_data) != sizeof(u32))
return -EINVAL;
value->vu32 = nla_get_u32(param_data);
break;
case DEVLINK_PARAM_TYPE_STRING:
len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
len = strnlen(nla_data(param_data), nla_len(param_data));
if (len == nla_len(param_data) ||
len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
return -EINVAL;
strcpy(value->vstr,
nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
strcpy(value->vstr, nla_data(param_data));
break;
case DEVLINK_PARAM_TYPE_BOOL:
value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
true : false;
if (param_data && nla_len(param_data))
return -EINVAL;
value->vbool = nla_get_flag(param_data);
break;
}
return 0;
@ -5797,6 +5804,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
[DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
[DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },

View File

@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
kfree(css_cls_state(css));
}
/*
* To avoid freezing of sockets creation for tasks with big number of threads
* and opened sockets lets release file_lock every 1000 iterated descriptors.
* New sockets will already have been created with new classid.
*/
struct update_classid_context {
u32 classid;
unsigned int batch;
};
#define UPDATE_CLASSID_BATCH 1000
static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file, &err);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
(unsigned long)v);
sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
spin_unlock(&cgroup_sk_update_lock);
}
if (--ctx->batch == 0) {
ctx->batch = UPDATE_CLASSID_BATCH;
return n + 1;
}
return 0;
}
static void update_classid_task(struct task_struct *p, u32 classid)
{
struct update_classid_context ctx = {
.classid = classid,
.batch = UPDATE_CLASSID_BATCH
};
unsigned int fd = 0;
do {
task_lock(p);
fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
task_unlock(p);
cond_resched();
} while (fd);
}
static void cgrp_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct task_struct *p;
cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)css_cls_state(css)->classid);
task_unlock(p);
update_classid_task(p, css_cls_state(css)->classid);
}
}
@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
css_task_iter_start(css, 0, &it);
while ((p = css_task_iter_next(&it))) {
task_lock(p);
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)cs->classid);
task_unlock(p);
update_classid_task(p, cs->classid);
cond_resched();
}
css_task_iter_end(&it);

View File

@ -1832,7 +1832,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
mem_cgroup_sk_alloc(newsk);
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
cgroup_sk_alloc(&newsk->sk_cgrp_data);
rcu_read_lock();

View File

@ -128,7 +128,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
/* port.c */
int dsa_port_set_state(struct dsa_port *dp, u8 state,
struct switchdev_trans *trans);
int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
void dsa_port_disable_rt(struct dsa_port *dp);
void dsa_port_disable(struct dsa_port *dp);
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);

View File

@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
}
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
if (!dp->bridge_dev)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
if (dp->pl)
phylink_start(dp->pl);
return 0;
}
void dsa_port_disable(struct dsa_port *dp)
int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
{
int err;
rtnl_lock();
err = dsa_port_enable_rt(dp, phy);
rtnl_unlock();
return err;
}
void dsa_port_disable_rt(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index;
if (dp->pl)
phylink_stop(dp->pl);
if (!dp->bridge_dev)
dsa_port_set_state_now(dp, BR_STATE_DISABLED);
@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
ds->ops->port_disable(ds, port);
}
void dsa_port_disable(struct dsa_port *dp)
{
rtnl_lock();
dsa_port_disable_rt(dp);
rtnl_unlock();
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
{
struct dsa_notifier_bridge_info info = {
@ -616,10 +640,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
goto err_phy_connect;
}
rtnl_lock();
phylink_start(dp->pl);
rtnl_unlock();
return 0;
err_phy_connect:
@ -630,9 +650,14 @@ err_phy_connect:
int dsa_port_link_register_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *phy_np;
if (!ds->ops->adjust_link)
return dsa_port_phylink_register(dp);
if (!ds->ops->adjust_link) {
phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
if (of_phy_is_fixed_link(dp->dn) || phy_np)
return dsa_port_phylink_register(dp);
return 0;
}
dev_warn(ds->dev,
"Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
@ -647,11 +672,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
if (!ds->ops->adjust_link) {
if (!ds->ops->adjust_link && dp->pl) {
rtnl_lock();
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
phylink_destroy(dp->pl);
dp->pl = NULL;
return;
}

View File

@ -90,12 +90,10 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_allmulti;
}
err = dsa_port_enable(dp, dev->phydev);
err = dsa_port_enable_rt(dp, dev->phydev);
if (err)
goto clear_promisc;
phylink_start(dp->pl);
return 0;
clear_promisc:
@ -119,9 +117,7 @@ static int dsa_slave_close(struct net_device *dev)
cancel_work_sync(&dp->xmit_work);
skb_queue_purge(&dp->xmit_queue);
phylink_stop(dp->pl);
dsa_port_disable(dp);
dsa_port_disable_rt(dp);
dev_mc_unsync(master, dev);
dev_uc_unsync(master, dev);

View File

@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
[IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
[IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
[IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },

View File

@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
/* Fills in tpi and returns header length to be pulled. */
/* Fills in tpi and returns header length to be pulled.
* Note that caller must use pskb_may_pull() before pulling GRE header.
*/
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs)
{
@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
u8 _val, *val;
val = skb_header_pointer(skb, nhs + hdr_len,
sizeof(_val), &_val);
if (!val)
return -EINVAL;
tpi->proto = proto;
if ((*(u8 *)options & 0xF0) != 0x40)
if ((*val & 0xF0) != 0x40)
hdr_len += 4;
}
tpi->hdr_len = hdr_len;

View File

@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
}
spin_unlock_bh(&queue->fastopenq.lock);
}
out:
release_sock(sk);
if (newsk && mem_cgroup_sockets_enabled) {
int amt;
/* atomically get the memory usage, set and charge the
* newsk->sk_memcg.
*/
lock_sock(newsk);
/* The socket has not been accepted yet, no need to look at
* newsk->sk_wmem_queued.
*/
amt = sk_mem_pages(newsk->sk_forward_alloc +
atomic_read(&newsk->sk_rmem_alloc));
mem_cgroup_sk_alloc(newsk);
if (newsk->sk_memcg && amt)
mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
release_sock(newsk);
}
if (req)
reqsk_put(req);
return newsk;

View File

@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
aux = handler->idiag_get_aux_size(sk, net_admin);
return nla_total_size(sizeof(struct tcp_info))
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ inet_diag_msg_attrs_size()
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
+ nla_total_size(TCP_CA_NAME_MAX)
+ nla_total_size(sizeof(struct tcpvegas_info))
@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
goto errout;
if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
ext & (1 << (INET_DIAG_TCLASS - 1))) {
u32 classid = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
#endif
/* Fallback to socket priority if class id isn't set.
* Classful qdiscs use it as direct reference to class.
* For cgroup2 classid is always zero.
*/
if (!classid)
classid = sk->sk_priority;
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
}
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto errout;
}
if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
ext & (1 << (INET_DIAG_TCLASS - 1))) {
u32 classid = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
#endif
/* Fallback to socket priority if class id isn't set.
* Classful qdiscs use it as direct reference to class.
* For cgroup2 classid is always zero.
*/
if (!classid)
classid = sk->sk_priority;
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
}
out:
nlmsg_end(skb, nlh);
return 0;

View File

@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
if (IS_ERR(sk))
return PTR_ERR(sk);
rep = nlmsg_new(sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) + 64,
rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
inet_diag_msg_attrs_size() +
nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
GFP_KERNEL);
if (!rep) {
sock_put(sk);

View File

@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
goto out;
err = -ENOMEM;
rep = nlmsg_new(sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) + 64,
rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
inet_diag_msg_attrs_size() +
nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
GFP_KERNEL);
if (!rep)
goto out;

View File

@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
}
static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
bool del_rt, bool del_peer)
{
struct fib6_info *f6i;
f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
ifp->prefix_len,
ifp->idev->dev, 0, RTF_DEFAULT, true);
if (f6i) {
if (del_rt)
@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
if (action != CLEANUP_PREFIX_RT_NOP) {
cleanup_prefix_route(ifp, expires,
action == CLEANUP_PREFIX_RT_DEL);
action == CLEANUP_PREFIX_RT_DEL, false);
}
/* clean up prefsrc entries */
@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_NONE) &&
(dev->type != ARPHRD_RAWIP)) {
/* Alas, we support only Ethernet autoconfiguration. */
idev = __in6_dev_get(dev);
if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
dev->flags & IFF_MULTICAST)
ipv6_mc_up(idev);
return;
}
@ -4586,12 +4592,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int modify_prefix_route(struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags)
unsigned long expires, u32 flags,
bool modify_peer)
{
struct fib6_info *f6i;
u32 prio;
f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
ifp->prefix_len,
ifp->idev->dev, 0, RTF_DEFAULT, true);
if (!f6i)
return -ENOENT;
@ -4602,7 +4610,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
ip6_del_rt(dev_net(ifp->idev->dev), f6i);
/* add new one */
addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
ifp->prefix_len,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
} else {
@ -4624,6 +4633,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
unsigned long timeout;
bool was_managetempaddr;
bool had_prefixroute;
bool new_peer = false;
ASSERT_RTNL();
@ -4655,6 +4665,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
cfg->preferred_lft = timeout;
}
if (cfg->peer_pfx &&
memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
if (!ipv6_addr_any(&ifp->peer_addr))
cleanup_prefix_route(ifp, expires, true, true);
new_peer = true;
}
spin_lock_bh(&ifp->lock);
was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
@ -4670,6 +4687,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
ifp->rt_priority = cfg->rt_priority;
if (new_peer)
ifp->peer_addr = *cfg->peer_pfx;
spin_unlock_bh(&ifp->lock);
if (!(ifp->flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ifp);
@ -4678,7 +4698,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
int rc = -ENOENT;
if (had_prefixroute)
rc = modify_prefix_route(ifp, expires, flags);
rc = modify_prefix_route(ifp, expires, flags, false);
/* prefix route could have been deleted; if so restore it */
if (rc == -ENOENT) {
@ -4686,6 +4706,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
}
if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
rc = modify_prefix_route(ifp, expires, flags, true);
if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
}
} else if (had_prefixroute) {
enum cleanup_prefix_rt_t action;
unsigned long rt_expires;
@ -4696,7 +4725,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
if (action != CLEANUP_PREFIX_RT_NOP) {
cleanup_prefix_route(ifp, rt_expires,
action == CLEANUP_PREFIX_RT_DEL);
action == CLEANUP_PREFIX_RT_DEL, false);
}
}
@ -5984,9 +6013,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
if (!ipv6_addr_any(&ifp->peer_addr))
addrconf_prefix_route(&ifp->peer_addr, 128, 0,
ifp->idev->dev, 0, 0,
GFP_ATOMIC);
addrconf_prefix_route(&ifp->peer_addr, 128,
ifp->rt_priority, ifp->idev->dev,
0, 0, GFP_ATOMIC);
break;
case RTM_DELADDR:
if (ifp->idev->cnf.forwarding)

View File

@ -183,9 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
} else if (sk->sk_protocol != IPPROTO_TCP)
} else if (sk->sk_protocol == IPPROTO_TCP) {
if (sk->sk_prot != &tcpv6_prot) {
retv = -EBUSY;
break;
}
break;
} else {
break;
}
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;

View File

@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
(*pos)++;
return NULL;
}

View File

@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
*pos = cpu + 1;
return per_cpu_ptr(snet->stats, cpu);
}
(*pos)++;
return NULL;
}

View File

@ -1309,6 +1309,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
lockdep_commit_lock_is_held(net));
if (nft_dump_stats(skb, stats))
goto nla_put_failure;
if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
nla_put_be32(skb, NFTA_CHAIN_FLAGS,
htonl(NFT_CHAIN_HW_OFFLOAD)))
goto nla_put_failure;
}
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@ -6970,13 +6975,8 @@ static void nf_tables_module_autoload(struct net *net)
list_splice_init(&net->nft.module_list, &module_list);
mutex_unlock(&net->nft.commit_mutex);
list_for_each_entry_safe(req, next, &module_list, list) {
if (req->done) {
list_del(&req->list);
kfree(req);
} else {
request_module("%s", req->module);
req->done = true;
}
request_module("%s", req->module);
req->done = true;
}
mutex_lock(&net->nft.commit_mutex);
list_splice(&module_list, &net->nft.module_list);
@ -7759,6 +7759,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
__nft_release_tables(net);
mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
WARN_ON_ONCE(!list_empty(&net->nft.module_list));
}
static struct pernet_operations nf_tables_net_ops = {

View File

@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
[NFCTH_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
[NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
[NFCTH_STATUS] = { .type = NLA_U32, },
};
static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {

View File

@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
.name = "nat",
.type = NFT_CHAIN_T_NAT,
.family = NFPROTO_INET,
.owner = THIS_MODULE,
.hook_mask = (1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |

View File

@ -121,6 +121,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
[NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
[NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
};
static int nft_payload_init(const struct nft_ctx *ctx,

Some files were not shown because too many files have changed in this diff Show More