Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net

Lots of overlapping changes, mostly trivial in nature.

The mlxsw conflict was resolving using the example
resolution at:

https://github.com/jpirko/linux_mlxsw/blob/combined_queue/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-08-05 13:04:31 -07:00
commit c1c8626fce
70 changed files with 540 additions and 218 deletions

View File

@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c
EZchip NPS platform support
M: Vineet Gupta <vgupta@synopsys.com>
M: Ofer Levi <oferle@mellanox.com>
S: Supported
F: arch/arc/plat-eznps
F: arch/arc/boot/dts/eznps.dts

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Merciless Moray
# *DOCUMENTATION*

View File

@ -50,6 +50,9 @@ config ARC
select HAVE_KERNEL_LZMA
select ARCH_HAS_PTE_SPECIAL
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config MIGHT_HAVE_PCI
bool

View File

@ -48,7 +48,9 @@
})
/* Largest line length for either L1 or L2 is 128 bytes */
#define ARCH_DMA_MINALIGN 128
#define SMP_CACHE_BYTES 128
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);

View File

@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(

View File

@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
unsigned int paddr = pfn << PAGE_SHIFT;
phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_page(page_address(page), u_vaddr);
__flush_dcache_page(page_address(page), page_address(page));
__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
__flush_dcache_page((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page));
}
@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
}
}
/*
* Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
* or equal to any cache line length.
*/
BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
"SMP_CACHE_BYTES must be >= any cache line length");
if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
panic("L2 Cache line [%d] > kernel Config [%d]\n",
l2_line_sz, SMP_CACHE_BYTES);
/* Note that SLC disable not formally supported till HS 3.0 */
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();

View File

@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
/*
* Cache operations depending on function and direction argument, inspired by
* https://lkml.org/lkml/2018/5/18/979
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
* | map == for_device | unmap == for_cpu
* |----------------------------------------------------------------
* TO_DEV | writeback writeback | none none
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
*
* [*] needed for CPU speculative prefetches
*
* NOTE: we don't check the validity of direction argument as it is done in
* upper layer functions (in include/linux/dma-mapping.h)
*/
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
dma_cache_wback(paddr, size);
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
dma_cache_inv(paddr, size);
switch (dir) {
case DMA_TO_DEVICE:
break;
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv(paddr, size);
break;
default:
break;
}
}

View File

@ -21,6 +21,7 @@
#error "Incorrect ctop.h include"
#endif
#include <linux/types.h>
#include <soc/nps/common.h>
/* core auxiliary registers */
@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
};
/* AUX registers definition */
struct nps_host_reg_aux_dpc {
union {
struct {
u32 ien:1, men:1, hen:1, reserved:29;
};
u32 value;
};
};
struct nps_host_reg_aux_udmc {
union {
struct {

View File

@ -15,6 +15,8 @@
*/
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <asm/arcregs.h>
@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
/* Verify and set the value of the mtm hs counter */
static int __init set_mtm_hs_ctr(char *ctr_str)
{
long hs_ctr;
int hs_ctr;
int ret;
ret = kstrtol(ctr_str, 0, &hs_ctr);
ret = kstrtoint(ctr_str, 0, &hs_ctr);
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",

View File

@ -488,9 +488,13 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (walk.nbytes)
pmull_gcm_encrypt_block(iv, iv, NULL,
if (walk.nbytes) {
kernel_neon_begin();
pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
num_rounds(&ctx->aes_key));
kernel_neon_end();
}
} else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
num_rounds(&ctx->aes_key));

View File

@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
{
int c;
c = atomic_dec_if_positive(&mm->context.copros);
/* Detect imbalance between add and remove */
WARN_ON(c < 0);
/*
* Need to broadcast a global flush of the full mm before
* decrementing active_cpus count, as the next TLBI may be
* local and the nMMU and/or PSL need to be cleaned up.
* Should be rare enough so that it's acceptable.
* When removing the last copro, we need to broadcast a global
* flush of the full mm, as the next TLBI may be local and the
* nMMU and/or PSL need to be cleaned up.
*
* Both the 'copros' and 'active_cpus' counts are looked at in
* flush_all_mm() to determine the scope (local/global) of the
* TLBIs, so we need to flush first before decrementing
* 'copros'. If this API is used by several callers for the
* same context, it can lead to over-flushing. It's hopefully
* not common enough to be a problem.
*
* Skip on hash, as we don't know how to do the proper flush
* for the time being. Invalidations will remain global if
* used on hash.
* used on hash. Note that we can't drop 'copros' either, as
* it could make some invalidations local with no flush
* in-between.
*/
if (c == 0 && radix_enabled()) {
if (radix_enabled()) {
flush_all_mm(mm);
dec_mm_active_cpus(mm);
c = atomic_dec_if_positive(&mm->context.copros);
/* Detect imbalance between add and remove */
WARN_ON(c < 0);
if (c == 0)
dec_mm_active_cpus(mm);
}
}
#else

View File

@ -42,6 +42,8 @@
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
#include "../../../drivers/pci/pci.h"
/* hose_spinlock protects accesses to the the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
@ -1014,7 +1016,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
/* Cardbus can call us to add new devices to a bus, so ignore
* those who are already fully discovered
*/
if (dev->is_added)
if (pci_dev_is_added(dev))
continue;
pcibios_setup_device(dev);

View File

@ -46,6 +46,7 @@
#include "powernv.h"
#include "pci.h"
#include "../../../../drivers/pci/pci.h"
#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
@ -3138,7 +3139,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
struct pci_dn *pdn;
int mul, total_vfs;
if (!pdev->is_physfn || pdev->is_added)
if (!pdev->is_physfn || pci_dev_is_added(pdev))
return;
pdn = pci_get_pdn(pdev);

View File

@ -71,6 +71,7 @@
#include <asm/security_features.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
@ -664,7 +665,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
if (!pdev->is_physfn || pdev->is_added)
if (!pdev->is_physfn || pci_dev_is_added(pdev))
return;
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);

View File

@ -1,3 +1,4 @@
#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
#include "../string.h"
@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data);
extern struct boot_params *boot_params;
int cmdline_find_option_bool(const char *option);
static unsigned long find_trampoline_placement(void)
{
unsigned long bios_start, ebda_start;
unsigned long trampoline_start;
struct boot_e820_entry *entry;
int i;
/*
* Find a suitable spot for the trampoline.
* This code is based on reserve_bios_regions().
*/
ebda_start = *(unsigned short *)0x40e << 4;
bios_start = *(unsigned short *)0x413 << 10;
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
bios_start = BIOS_START_MAX;
if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
bios_start = ebda_start;
bios_start = round_down(bios_start, PAGE_SIZE);
/* Find the first usable memory region under bios_start. */
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
entry = &boot_params->e820_table[i];
/* Skip all entries above bios_start. */
if (bios_start <= entry->addr)
continue;
/* Skip non-RAM entries. */
if (entry->type != E820_TYPE_RAM)
continue;
/* Adjust bios_start to the end of the entry if needed. */
if (bios_start > entry->addr + entry->size)
bios_start = entry->addr + entry->size;
/* Keep bios_start page-aligned. */
bios_start = round_down(bios_start, PAGE_SIZE);
/* Skip the entry if it's too small. */
if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
continue;
break;
}
/* Place the trampoline just below the end of low memory */
return bios_start - TRAMPOLINE_32BIT_SIZE;
}
struct paging_config paging_prepare(void *rmode)
{
struct paging_config paging_config = {};
unsigned long bios_start, ebda_start;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
boot_params = rmode;
@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode)
paging_config.l5_required = 1;
}
/*
* Find a suitable spot for the trampoline.
* This code is based on reserve_bios_regions().
*/
ebda_start = *(unsigned short *)0x40e << 4;
bios_start = *(unsigned short *)0x413 << 10;
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
bios_start = BIOS_START_MAX;
if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
bios_start = ebda_start;
/* Place the trampoline just below the end of low memory, aligned to 4k */
paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
paging_config.trampoline_start = find_trampoline_placement();
trampoline_32bit = (unsigned long *)paging_config.trampoline_start;

View File

@ -28,7 +28,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
#define UNCORE_EXTRA_PCI_DEV_MAX 3
#define UNCORE_EXTRA_PCI_DEV_MAX 4
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)

View File

@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
BDX_PCI_QPI_PORT2_FILTER,
HSWEP_PCI_PCU_3,
};
@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
},
{ /* QPI Port 0 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT0_FILTER),
},
{ /* QPI Port 1 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* QPI Port 2 filter */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
BDX_PCI_QPI_PORT2_FILTER),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),

View File

@ -7893,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
HRTIMER_MODE_REL_PINNED);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
vmx->nested.vpid02 = allocate_vpid();
vmx->nested.vmxon = true;
return 0;
@ -8480,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
/* Emulate the VMPTRST instruction */
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t vmcs_gva;
unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
gva_t gva;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
sizeof(gpa_t), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@ -10370,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
if (nested) {
if (nested)
nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
kvm_vcpu_apicv_active(&vmx->vcpu));
vmx->nested.vpid02 = allocate_vpid();
}
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@ -10391,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);

View File

@ -2155,11 +2155,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
if (part->policy && op_is_write(bio_op(bio))) {
char b[BDEVNAME_SIZE];
printk(KERN_ERR
WARN_ONCE(1,
"generic_make_request: Trying to write "
"to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno);
return true;
/* Older lvm-tools actually trigger this */
return false;
}
return false;

View File

@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* test and set the bit before assining ->rqs[].
*/
rq = tags->rqs[bitnr];
if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
if (rq && blk_mq_request_started(rq))
iter_data->fn(rq, iter_data->data, reserved);
return true;

View File

@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
return;
}
count -= initial;
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
: "d"(control_word), "b"(key), "c"(count - initial));
: "d"(control_word), "b"(key), "c"(count));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if (count < cbc_fetch_blocks)
return cbc_crypt(input, output, key, iv, control_word, count);
count -= initial;
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
: "d" (control_word), "b" (key), "c" (count-initial));
: "d" (control_word), "b" (key), "c" (count));
return iv;
}

View File

@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
/*
* The bridge resets its registers on unplug. So when we get a plug
* event and we're already supposed to be powered, cycle the bridge to
* restore its state.
*/
if (status == connector_status_connected &&
adv7511->connector.status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
if (status == connector_status_disconnected)

View File

@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane *plane = NULL;
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
int i, n_planes = 0;
@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (n_planes != 1)
return -EINVAL;
if (!new_plane_state->crtc)
if (!new_plane_state->crtc ||
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;

View File

@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
if (ctx->handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;

View File

@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
} else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&

View File

@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
if ((cmd->base.attr_mask & IB_QP_AV) &&
!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
ret = -EINVAL;
goto release_qp;
if ((cmd->base.attr_mask & IB_QP_AV)) {
if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
if (cmd->base.attr_mask & IB_QP_STATE &&
cmd->base.qp_state == IB_QPS_RTR) {
/* We are in INIT->RTR TRANSITION (if we are not,
* this transition will be rejected in subsequent checks).
* In the INIT->RTR transition, we cannot have IB_QP_PORT set,
* but the IB_QP_STATE flag is required.
*
* Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
* when IB_QP_AV is set, has required inclusion of a valid
* port number in the primary AV. (AVs are created and handled
* differently for infiniband and ethernet (RoCE) ports).
*
* Check the port number included in the primary AV against
* the port number in the qp struct, which was set (and saved)
* in the RST->INIT transition.
*/
if (cmd->base.dest.port_num != qp->real_qp->port) {
ret = -EINVAL;
goto release_qp;
}
} else {
/* We are in SQD->SQD. (If we are not, this transition will
* be rejected later in the verbs layer checks).
* Check for both IB_QP_PORT and IB_QP_AV, these can be set
* together in the SQD->SQD transition.
*
* If only IP_QP_AV was set, add in IB_QP_PORT as well (the
* verbs layer driver does not track primary port changes
* resulting from path migration. Thus, in SQD, if the primary
* AV is modified, the primary port should also be modified).
*
* Note that in this transition, the IB_QP_STATE flag
* is not allowed.
*/
if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
== (IB_QP_AV | IB_QP_PORT)) &&
cmd->base.port_num != cmd->base.dest.port_num) {
ret = -EINVAL;
goto release_qp;
}
if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
== IB_QP_AV) {
cmd->base.attr_mask |= IB_QP_PORT;
cmd->base.port_num = cmd->base.dest.port_num;
}
}
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
(!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
!rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
!rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}

View File

@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
*/
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
mutex_lock(&vsp1->drm->lock);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
drm_pipe->crc = cfg->crc;
mutex_lock(&vsp1->drm->lock);
vsp1_du_pipeline_setup_inputs(vsp1, pipe);
vsp1_du_pipeline_configure(pipe);
mutex_unlock(&vsp1->drm->lock);

View File

@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
rcu_assign_pointer(raw->progs, new_array);
bpf_prog_array_free(old_array);
bpf_prog_put(prog);
unlock:
mutex_unlock(&ir_raw_handler_lock);
return ret;

View File

@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data)
while (kfifo_out(&raw->kfifo, &ev, 1)) {
if (is_timing_event(ev)) {
if (ev.duration == 0)
dev_err(&dev->dev, "nonsensical timing event of duration 0");
dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
if (is_timing_event(raw->prev_ev) &&
!is_transition(&ev, &raw->prev_ev))
dev_err(&dev->dev, "two consecutive events of type %s",
TO_STR(ev.pulse));
dev_warn_once(&dev->dev, "two consecutive events of type %s",
TO_STR(ev.pulse));
if (raw->prev_ev.reset && ev.pulse == 0)
dev_err(&dev->dev, "timing event after reset should be pulse");
dev_warn_once(&dev->dev, "timing event after reset should be pulse");
}
list_for_each_entry(handler, &ir_raw_handler_list, list)
if (dev->enabled_protocols &

View File

@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t)
spin_unlock_irqrestore(&dev->keylock, flags);
}
static unsigned int repeat_period(int protocol)
{
if (protocol >= ARRAY_SIZE(protocols))
return 100;
return protocols[protocol].repeat_period;
}
/**
* rc_repeat() - signals that a key is still pressed
* @dev: the struct rc_dev descriptor of the device
@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev)
{
unsigned long flags;
unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(protocols[dev->last_protocol].repeat_period);
msecs_to_jiffies(repeat_period(dev->last_protocol));
struct lirc_scancode sc = {
.scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
.keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
if (dev->keypressed) {
dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
msecs_to_jiffies(protocols[protocol].repeat_period);
msecs_to_jiffies(repeat_period(protocol));
mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
}
spin_unlock_irqrestore(&dev->keylock, flags);

View File

@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
list_add(&resource->list, &block->resource_list);
}
static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
{
list_del(&resource->list);
}
static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
{
struct mlxsw_afa_resource *resource, *tmp;
list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
list_del(&resource->list);
resource->destructor(block, resource);
}
}
@ -565,6 +569,7 @@ static void
mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
{
mlxsw_afa_resource_del(&fwd_entry_ref->resource);
mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
kfree(fwd_entry_ref);
}
@ -614,6 +619,7 @@ static void
mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_counter *counter)
{
mlxsw_afa_resource_del(&counter->resource);
block->afa->ops->counter_index_put(block->afa->ops_priv,
counter->counter_index);
kfree(counter);
@ -661,8 +667,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
char *oneact;
char *actions;
if (WARN_ON(block->finished))
return NULL;
if (block->finished)
return ERR_PTR(-EINVAL);
if (block->cur_act_index + action_size >
block->afa->max_acts_per_set) {
struct mlxsw_afa_set *set;
@ -672,7 +678,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
*/
set = mlxsw_afa_set_create(false);
if (!set)
return NULL;
return ERR_PTR(-ENOBUFS);
set->prev = block->cur_set;
block->cur_act_index = 0;
block->cur_set->next = set;
@ -760,9 +766,9 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
MLXSW_AFA_VLAN_CODE,
MLXSW_AFA_VLAN_SIZE);
if (!act) {
if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append vlan_modify action");
return -ENOBUFS;
return PTR_ERR(act);
}
mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
@ -844,8 +850,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
return 0;
@ -858,8 +864,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
trap_id);
@ -874,8 +880,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
trap_id);
@ -894,6 +900,7 @@ static void
mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
struct mlxsw_afa_mirror *mirror)
{
mlxsw_afa_resource_del(&mirror->resource);
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
mirror->span_id,
@ -946,8 +953,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_TRAPDISC_CODE,
MLXSW_AFA_TRAPDISC_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@ -1043,9 +1050,9 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
MLXSW_AFA_FORWARD_SIZE);
if (!act) {
err = -ENOBUFS;
if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append forward action");
err = PTR_ERR(act);
goto err_append_action;
}
mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@ -1100,8 +1107,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
{
char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
counter_index);
return 0;
@ -1176,9 +1183,9 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_VIRFWD_CODE,
MLXSW_AFA_VIRFWD_SIZE);
if (!act) {
if (IS_ERR(act)) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append fid_set action");
return -ENOBUFS;
return PTR_ERR(act);
}
mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
return 0;
@ -1248,8 +1255,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_MCROUTER_CODE,
MLXSW_AFA_MCROUTER_SIZE);
if (!act)
return -ENOBUFS;
if (IS_ERR(act))
return PTR_ERR(act);
mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
expected_irif, min_mtu, rmid_valid, kvdl_index);
return 0;

View File

@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
case 0x010:
case 0x002:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:

View File

@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev)
return;
}
dev->is_added = 1;
pci_dev_assign_added(dev, true);
}
EXPORT_SYMBOL_GPL(pci_bus_add_device);
@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip already-added devices */
if (dev->is_added)
if (pci_dev_is_added(dev))
continue;
pci_bus_add_device(dev);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip if device attach failed */
if (!dev->is_added)
if (!pci_dev_is_added(dev))
continue;
child = dev->subordinate;
if (child)

View File

@ -107,7 +107,7 @@
#define CFG_WINDOW_TYPE 0
#define IO_WINDOW_TYPE 1
#define MEM_WINDOW_TYPE 2
#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024)
#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
#define MAX_PIO_WINDOWS 8
/* Parameters for the waiting for link up routine */

View File

@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Assume that newly added devices are powered on already. */
if (!dev->is_added)
if (!pci_dev_is_added(dev))
dev->current_state = PCI_D0;
}

View File

@ -288,6 +288,7 @@ struct pci_sriov {
/* pci_dev priv_flags */
#define PCI_DEV_DISCONNECTED 0
#define PCI_DEV_ADDED 1
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
}
static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
{
assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
}
static inline bool pci_dev_is_added(const struct pci_dev *dev)
{
return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else

View File

@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
dev = pci_scan_single_device(bus, devfn);
if (!dev)
return 0;
if (!dev->is_added)
if (!pci_dev_is_added(dev))
nr++;
for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!dev->is_added)
if (!pci_dev_is_added(dev))
nr++;
dev->multifunction = 1;
}

View File

@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
if (dev->is_added) {
if (pci_dev_is_added(dev)) {
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
dev->is_added = 0;
pci_dev_assign_added(dev, false);
}
if (dev->bus->self)

View File

@ -1443,7 +1443,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops)
{
struct inode *inode = mapping->host;
loff_t pos = bno >> inode->i_blkbits;
loff_t pos = bno << inode->i_blkbits;
unsigned blocksize = i_blocksize(inode);
if (filemap_write_and_wait(mapping))

View File

@ -115,6 +115,13 @@ struct dinode {
dxd_t _dxd; /* 16: */
union {
__le32 _rdev; /* 4: */
/*
* The fast symlink area
* is expected to overflow
* into _inlineea when
* needed (which will clear
* INLINEEA).
*/
u8 _fastsymlink[128];
} _u;
u8 _inlineea[128];

View File

@ -87,6 +87,7 @@ struct jfs_inode_info {
struct {
unchar _unused[16]; /* 16: */
dxd_t _dxd; /* 16: */
/* _inline may overflow into _inline_ea when needed */
unchar _inline[128]; /* 128: inline symlink */
/* _inline_ea may overlay the last part of
* file._xtroot if maxentry = XTROOTINITSLOT

View File

@ -967,8 +967,7 @@ static int __init init_jfs_fs(void)
jfs_inode_cachep =
kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
offsetof(struct jfs_inode_info, i_inline),
sizeof_field(struct jfs_inode_info, i_inline),
offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;

View File

@ -6466,34 +6466,34 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
if (data->arg.new_lock && !data->cancelled) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
break;
goto out_restart;
}
if (data->arg.new_lock_owner != 0) {
nfs_confirm_seqid(&lsp->ls_seqid, 0);
nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
goto out_done;
} else if (nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_done;
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {
if (nfs4_stateid_match(&data->arg.open_stateid,
if (!nfs4_stateid_match(&data->arg.open_stateid,
&lsp->ls_state->open_stateid))
goto out_done;
} else if (nfs4_stateid_match(&data->arg.lock_stateid,
goto out_restart;
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
&lsp->ls_stateid))
goto out_done;
goto out_restart;
}
if (!data->cancelled)
rpc_restart_call_prepare(task);
out_done:
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
return;
out_restart:
if (!data->cancelled)
rpc_restart_call_prepare(task);
goto out_done;
}
static void nfs4_lock_release(void *calldata)
@ -6502,7 +6502,7 @@ static void nfs4_lock_release(void *calldata)
dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
if (data->cancelled) {
if (data->cancelled && data->rpc_status == 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);

View File

@ -633,8 +633,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
/* the various vma->vm_userfaultfd_ctx still points to it */
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
}
up_write(&mm->mmap_sem);
userfaultfd_ctx_put(release_new_ctx);

View File

@ -368,7 +368,6 @@ struct pci_dev {
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int multifunction:1; /* Multi-function device */
unsigned int is_added:1;
unsigned int is_busmaster:1; /* Is busmaster */
unsigned int no_msi:1; /* May not use MSI */
unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */

View File

@ -428,6 +428,17 @@ static int shm_split(struct vm_area_struct *vma, unsigned long addr)
return 0;
}
static unsigned long shm_pagesize(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
if (sfd->vm_ops->pagesize)
return sfd->vm_ops->pagesize(vma);
return PAGE_SIZE;
}
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
@ -555,6 +566,7 @@ static const struct vm_operations_struct shm_vm_ops = {
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
.split = shm_split,
.pagesize = shm_pagesize,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,

View File

@ -1068,6 +1068,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
return 0;
/*
* No further action required for interrupts which are requested as
* threaded interrupts already
*/
if (new->handler == irq_default_primary_handler)
return 0;
new->flags |= IRQF_ONESHOT;
/*
@ -1075,7 +1082,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
* thread handler. We force thread them as well by creating a
* secondary action.
*/
if (new->handler != irq_default_primary_handler && new->thread_fn) {
if (new->handler && new->thread_fn) {
/* Allocate the secondary action */
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!new->secondary)

View File

@ -390,7 +390,7 @@ static inline void tick_irq_exit(void)
/* Make sure that timer wheel updates are propagated */
if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_interrupt())
if (!in_irq())
tick_nohz_irq_exit();
}
#endif

View File

@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & TIMER_SOFTIRQ;
return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
}
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)

View File

@ -3167,6 +3167,13 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
return 0;
}
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
* This is because under System V memory model, mappings created via
* shmget/shmat with "huge page" specified are backed by hugetlbfs files,
* their original vm_ops are overwritten with shm_vm_ops.
*/
const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,

View File

@ -4037,6 +4037,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
static DEFINE_IDR(mem_cgroup_idr);
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
idr_remove(&mem_cgroup_idr, memcg->id.id);
memcg->id.id = 0;
}
}
static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
{
VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
@ -4047,8 +4055,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
{
VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
if (atomic_sub_and_test(n, &memcg->id.ref)) {
idr_remove(&mem_cgroup_idr, memcg->id.id);
memcg->id.id = 0;
mem_cgroup_id_remove(memcg);
/* Memcg ID pins CSS */
css_put(&memcg->css);
@ -4185,8 +4192,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return memcg;
fail:
if (memcg->id.id > 0)
idr_remove(&mem_cgroup_idr, memcg->id.id);
mem_cgroup_id_remove(memcg);
__mem_cgroup_free(memcg);
return NULL;
}
@ -4245,6 +4251,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
return &memcg->css;
fail:
mem_cgroup_id_remove(memcg);
mem_cgroup_free(memcg);
return ERR_PTR(-ENOMEM);
}

View File

@ -1139,13 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
l2tp_session_get(sock_net(sk), tunnel,
stats.session_id);
if (session && session->pwtype == L2TP_PWTYPE_PPP) {
err = pppol2tp_session_ioctl(session, cmd,
arg);
l2tp_session_dec_refcount(session);
} else {
if (!session) {
err = -EBADR;
break;
}
if (session->pwtype != L2TP_PWTYPE_PPP) {
l2tp_session_dec_refcount(session);
err = -EBADR;
break;
}
err = pppol2tp_session_ioctl(session, cmd, arg);
l2tp_session_dec_refcount(session);
break;
}
#ifdef CONFIG_XFRM

View File

@ -1013,8 +1013,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->ngroups == 0)
groups = 0;
else
groups &= (1ULL << nlk->ngroups) - 1;
else if (nlk->ngroups < 8*sizeof(groups))
groups &= (1UL << nlk->ngroups) - 1;
bound = nlk->bound;
if (bound) {

View File

@ -278,7 +278,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
/* force immediate tx of current consumer cursor, but
* under send_lock to guarantee arrival in seqno-order
*/
smc_tx_sndbuf_nonempty(conn);
if (smc->sk.sk_state != SMC_INIT)
smc_tx_sndbuf_nonempty(conn);
}
}

View File

@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
if (err) {
xdp_return_buff(xdp);
if (err)
xs->rx_dropped++;
}
return err;
}

View File

@ -399,5 +399,6 @@
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
#define __NR_rseq 387
#define __NR_io_pgetevents 388
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MCSAFE_TEST_H_
#define _MCSAFE_TEST_H_
.macro MCSAFE_TEST_CTL
.endm
.macro MCSAFE_TEST_SRC reg count target
.endm
.macro MCSAFE_TEST_DST reg count target
.endm
#endif /* _MCSAFE_TEST_H_ */

View File

@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/mcsafe_test.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
@ -183,12 +184,15 @@ ENTRY(memcpy_orig)
ENDPROC(memcpy_orig)
#ifndef CONFIG_UML
MCSAFE_TEST_CTL
/*
* memcpy_mcsafe_unrolled - memory copy with machine check exception handling
* __memcpy_mcsafe - memory copy with machine check exception handling
* Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks.
*/
ENTRY(memcpy_mcsafe_unrolled)
ENTRY(__memcpy_mcsafe)
cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words
@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)
subl $8, %ecx
negl %ecx
subl %ecx, %edx
.L_copy_leading_bytes:
.L_read_leading_bytes:
movb (%rsi), %al
MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
.L_write_leading_bytes:
movb %al, (%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .L_copy_leading_bytes
jnz .L_read_leading_bytes
.L_8byte_aligned:
/* Figure out how many whole cache lines (64-bytes) to copy */
movl %edx, %ecx
andl $63, %edx
shrl $6, %ecx
jz .L_no_whole_cache_lines
/* Loop copying whole cache lines */
.L_cache_w0: movq (%rsi), %r8
.L_cache_w1: movq 1*8(%rsi), %r9
.L_cache_w2: movq 2*8(%rsi), %r10
.L_cache_w3: movq 3*8(%rsi), %r11
movq %r8, (%rdi)
movq %r9, 1*8(%rdi)
movq %r10, 2*8(%rdi)
movq %r11, 3*8(%rdi)
.L_cache_w4: movq 4*8(%rsi), %r8
.L_cache_w5: movq 5*8(%rsi), %r9
.L_cache_w6: movq 6*8(%rsi), %r10
.L_cache_w7: movq 7*8(%rsi), %r11
movq %r8, 4*8(%rdi)
movq %r9, 5*8(%rdi)
movq %r10, 6*8(%rdi)
movq %r11, 7*8(%rdi)
leaq 64(%rsi), %rsi
leaq 64(%rdi), %rdi
decl %ecx
jnz .L_cache_w0
/* Are there any trailing 8-byte words? */
.L_no_whole_cache_lines:
movl %edx, %ecx
andl $7, %edx
shrl $3, %ecx
jz .L_no_whole_words
/* Copy trailing words */
.L_copy_trailing_words:
.L_read_words:
movq (%rsi), %r8
mov %r8, (%rdi)
leaq 8(%rsi), %rsi
leaq 8(%rdi), %rdi
MCSAFE_TEST_SRC %rsi 8 .E_read_words
MCSAFE_TEST_DST %rdi 8 .E_write_words
.L_write_words:
movq %r8, (%rdi)
addq $8, %rsi
addq $8, %rdi
decl %ecx
jnz .L_copy_trailing_words
jnz .L_read_words
/* Any trailing bytes? */
.L_no_whole_words:
@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)
/* Copy trailing bytes */
movl %edx, %ecx
.L_copy_trailing_bytes:
.L_read_trailing_bytes:
movb (%rsi), %al
MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
.L_write_trailing_bytes:
movb %al, (%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .L_copy_trailing_bytes
jnz .L_read_trailing_bytes
/* Copy successful. Return zero */
.L_done_memcpy_trap:
xorq %rax, %rax
ret
ENDPROC(memcpy_mcsafe_unrolled)
EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
.section .fixup, "ax"
/* Return -EFAULT for any failure */
.L_memcpy_mcsafe_fail:
mov $-EFAULT, %rax
/*
* Return number of bytes not copied for any failure. Note that
* there is no "tail" handling since the source buffer is 8-byte
* aligned and poison is cacheline aligned.
*/
.E_read_words:
shll $3, %ecx
.E_leading_bytes:
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
ret
/*
* For write fault handling, given the destination is unaligned,
* we handle faults on multi-byte writes with a byte-by-byte
* copy up to the write-protected page.
*/
.E_write_words:
shll $3, %ecx
addl %edx, %ecx
movl %ecx, %edx
jmp mcsafe_handle_tail
.previous
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
#endif

View File

@ -35,6 +35,7 @@
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@ -91,7 +92,8 @@ static bool map_is_map_of_progs(__u32 type)
static void *alloc_value(struct bpf_map_info *info)
{
if (map_is_per_cpu(info->type))
return malloc(info->value_size * get_possible_cpus());
return malloc(round_up(info->value_size, 8) *
get_possible_cpus());
else
return malloc(info->value_size);
}
@ -273,9 +275,10 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
do_dump_btf(&d, info, key, value);
}
} else {
unsigned int i, n;
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info->key_size);
@ -288,7 +291,7 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
jsonw_int_field(json_wtr, "cpu", i);
jsonw_name(json_wtr, "value");
print_hex_data_json(value + i * info->value_size,
print_hex_data_json(value + i * step,
info->value_size);
jsonw_end_object(json_wtr);
@ -319,9 +322,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
printf("\n");
} else {
unsigned int i, n;
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
printf("key:\n");
fprint_hex(stdout, key, info->key_size, " ");
@ -329,7 +333,7 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
for (i = 0; i < n; i++) {
printf("value (CPU %02d):%c",
i, info->value_size > 16 ? '\n' : ' ');
fprint_hex(stdout, value + i * info->value_size,
fprint_hex(stdout, value + i * step,
info->value_size, " ");
printf("\n");
}

View File

@ -143,6 +143,8 @@ enum perf_event_sample_format {
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
};
/*

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include "../../util/intel-pt.h"

View File

@ -2,6 +2,7 @@
#include <stdbool.h>
#include <errno.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include "../../perf.h"

View File

@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-y += futex-lock-pi.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o

View File

@ -6,6 +6,7 @@
#define altinstr_replacement text
#define globl p2align 4; .globl
#define _ASM_EXTABLE_FAULT(x, y)
#define _ASM_EXTABLE(x, y)
#include "../../arch/x86/lib/memcpy_64.S"
/*

View File

@ -0,0 +1,24 @@
/*
* From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
* of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
* happy.
*/
#include <linux/types.h>
unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
{
for (; len; --len, to++, from++) {
/*
* Call the assembly routine back directly since
* memcpy_mcsafe() may silently fallback to memcpy.
*/
unsigned long rem = __memcpy_mcsafe(to, from, 1);
if (rem)
break;
}
return len;
}

View File

@ -5,6 +5,7 @@
#include <time.h>
#include <stdbool.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
extern bool test_attr__enabled;

View File

@ -2,6 +2,7 @@
#ifndef __PERF_HEADER_H
#define __PERF_HEADER_H
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <sys/types.h>
#include <stdbool.h>

View File

@ -10,6 +10,7 @@
#define __PERF_NAMESPACES_H
#include <sys/types.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/refcount.h>
#include <linux/types.h>

View File

@ -115,14 +115,14 @@ ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o
ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65
ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF obj test_lwt_seg6local.o sec add_egr_x dev veth4
ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF obj test_lwt_seg6local.o sec pop_egr dev veth6
ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
ip netns exec ns4 ip -6 addr add fc42::1 dev lo
ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87
ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF obj test_lwt_seg6local.o sec inspect_t dev veth8
ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo
ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo

View File

@ -402,7 +402,7 @@ int main(int argc, char *argv[])
exit(1);
}
fd = socket(AF_INET6, SOCK_STREAM, 0);
fd = socket(cfg_family, SOCK_STREAM, 0);
if (fd == -1) {
perror("socket");
exit(1);