This is the 5.4.136 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEBQAcACgkQONu9yGCS
 aT4FRBAAgFrHSPHhtwcZ2uqAehzajAp7AbKxf1WejxPg/0YH2bE6nbhuLyDWqH5F
 mhyDpXVltW7xaFYZAEg9CPr6czwHAul4Bql4DH57KbO+/Q5BrS0VguepP0TPcVI5
 H8KztBrJCL5TsrOsvB+EXHtqDkEuhX957Qwa6PkBJs12x2Vq3EmazGGKSZSCGKuy
 v5gM8wztC3NzzOhVDZ2MPbh8RTrbGUEaRFi6B/XNlcEWMAxyqDJlJInbzimIFL6T
 eOYZ7z+IdrV0I0Eq0tqUmnhONQZxscs/hX1yv7evZtfG7LbT3v4nJu7c6O4FnLwV
 61B5aK4aytX7rTLVU+FRxP7MTmvNit71AY8SMSOx+bNLGBtrFstMv+f950j8npq1
 683wCAlDD2hw3zOc6rzbXhdowKtIaFirqDEDiYOy/K5r0liaEtQboOmlBO2WDFYy
 q5HsoCIpNWH2Os4LlA3PYVChEzO5yQJksUgRgUhcNMA0y+8hE1/C91HxNy8HPyHf
 tIeRHIpdvHETzSbNIYe9b9iQK0f3S2YLI+sdMtrlEXYFpvlD/w2DsVlzr/IRKP1x
 N1LVskeB7PVzJEImZPTGVrbPu/a/FHtFpx3dgiST72t18rHgCFdxW7pCI05jegLr
 C72SSES2v3QIIRoPAO6NF/E8ltmT6lnor1AcNeGz5I4rvPB01u8=
 =pPb8
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmEKo6UACgkQ7G51OISz
 Hs2X4Q/8CD6BDb4N5T1HeiFyZROk2IZK/PU80vpSTUBwn6x/27xKP0nisApawgqP
 5T1Dnucgp42H9cMRnjHikiDa08i0tAoyzwIHJG+1/DG2iHpGfRo9Iu+BZyIWaK2E
 c+VOr1AHQw2iC9QSmHF4sbNFwBdJNLNXnR3od2AUH/G0gRGUEjchN8rgkQJh3pmO
 6RHddTdrXLP31EOc0LEH2pNK8E49e6Ipo/5OY4lc3b0BxF9lhCGLbL8e+E5gTRAF
 eIpwhKAnqiRHBPjzCMyZPzAfdUQwywk4gnPxy2NTq8O7vSY9NVxmHMAu29/duIK0
 UQbW9q1Vv+BIyXUimawh/cxoouRpV3Owue2p21nCIRVS2v0Wo4c98PTvxBgEy+UR
 MBMVeb1I9XwhPS9SLABADfn7mz9BAWLb+YVkbQHFgMZ3kHT3bT1qe8EbT+VXyeBd
 2pviLgXKCsVwJQHxHv2GAJcyLoDhMynFRdaIxa/7CoPadAH3Jj/t5K8frWP5+Cbj
 iLVJW65S9zQRvHqkd6sOU17l/zHOF0AB9WgVS3PhO2nIxC74ZlSJr/ATe9yB69LU
 JlDDxZRHF2QsI/07IFw5t91ex+wgUNGMBnTan+hmZh4xoC8Syl+nGfbKZqn9Qhhk
 bv7Sk2FZTmPlsvFRASEnHINyYg8nt4vaHAz5HBOBjJhyJXKK4cE=
 =YNbT
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.136' into 5.4-2.3.x-imx

This is the 5.4.136 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-08-04 14:26:43 +00:00
commit d71473b588
105 changed files with 849 additions and 482 deletions

View File

@ -45,14 +45,24 @@ how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
tagged pointers in this context is allowed with the exception of
``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
tagged pointers in this context is allowed with these exceptions:
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- ``brk()``, ``mmap()`` and the ``new_address`` argument to
``mremap()`` as these have the potential to alias with existing
user addresses.
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
incorrectly accept valid tagged pointers for the ``brk()``,
``mmap()`` and ``mremap()`` system calls.
- The ``range.start``, ``start`` and ``dst`` arguments to the
``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from
``userfaultfd()``, as fault addresses subsequently obtained by reading
the file descriptor will be untagged, which may otherwise confuse
tag-unaware programs.
NOTE: This behaviour changed in v5.14 and so some earlier kernels may
incorrectly accept valid tagged pointers for this system call.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to

View File

@ -191,7 +191,7 @@ Documentation written by Tom Zanussi
with the event, in nanoseconds. May be
modified by .usecs to have timestamps
interpreted as microseconds.
cpu int the cpu on which the event occurred.
common_cpu int the cpu on which the event occurred.
====================== ==== =======================================
Extended error information

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 135
SUBLEVEL = 136
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -62,15 +62,11 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = NULL;
struct page *pg;
pmd_t *pmd;
pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
if (pg) {
pgtable_pmd_page_ctor(pg);
pmd = (pmd_t *)page_address(pg);
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
}
return pmd;
}

View File

@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}

View File

@ -2306,8 +2306,10 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;

View File

@ -232,6 +232,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
return H_BAD_MODE;
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
@ -253,6 +256,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_hv.vcpu_token >= NR_CPUS)
return H_PARAMETER;
/*
* L1 must have set up a suspended state to enter the L2 in a
* transactional state, and only in that case. These have to be
* filtered out here to prevent causing a TM Bad Thing in the
* host HRFID. We could synthesize a TM Bad Thing back to the L1
* here but there doesn't seem like much point.
*/
if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
if (!MSR_TM_ACTIVE(l2_regs.msr))
return H_BAD_MODE;
} else {
if (l2_regs.msr & MSR_TS_MASK)
return H_BAD_MODE;
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
return H_BAD_MODE;
}
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)

View File

@ -240,6 +240,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
/*
* Don't overflow our args array: ensure there is room for
* at least rets[0] (even if the call specifies 0 nret).
*
* Each handler must then check for the correct nargs and nret
* values, but they may always return failure in rets[0].
*/
rc = -EINVAL;
goto fail;
}
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@ -267,9 +278,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
fail:
/*
* We only get here if the guest has called RTAS with a bogus
* args pointer. That means we can't get to the args, and so we
* can't fail the RTAS call. So fail right out to userspace,
* which should kill the guest.
* args pointer or nargs/nret values that would overflow the
* array. That means we can't get to the args, and so we can't
* fail the RTAS call. So fail right out to userspace, which
* should kill the guest.
*
* SLOF should actually pass the hcall return value from the
* rtas handler call in r3, so enter_rtas could be modified to
* return a failure indication in r3 and we could return such
* errors to the guest rather than failing to host userspace.
* However old guests that don't test for failure could then
* continue silently after errors, so for now we won't do this.
*/
return rc;
}

View File

@ -2035,9 +2035,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_enable_cap cap;
r = -EFAULT;
vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
vcpu_load(vcpu);
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu_put(vcpu);
break;
@ -2061,9 +2061,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
vcpu_load(vcpu);
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu_put(vcpu);
break;

View File

@ -9,16 +9,6 @@
#include <asm/errno.h>
#include <asm/sigp.h>
#ifdef CC_USING_EXPOLINE
.pushsection .dma.text.__s390_indirect_jump_r14,"axG"
__dma__s390_indirect_jump_r14:
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.popsection
#endif
.section .dma.text,"ax"
/*
* Simplified version of expoline thunk. The normal thunks can not be used here,
@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14:
* affects a few functions that are not performance-relevant.
*/
.macro BR_EX_DMA_r14
#ifdef CC_USING_EXPOLINE
jg __dma__s390_indirect_jump_r14
#else
br %r14
#endif
larl %r1,0f
ex 0,0(%r1)
j .
0: br %r14
.endm
/*

View File

@ -27,6 +27,7 @@ void ftrace_caller(void);
extern char ftrace_graph_caller_end;
extern unsigned long ftrace_plt;
extern void *ftrace_func;
struct dyn_arch_ftrace { };

View File

@ -57,6 +57,7 @@
* > brasl %r0,ftrace_caller # offset 0
*/
void *ftrace_func __read_mostly = ftrace_stub;
unsigned long ftrace_plt;
static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
@ -166,6 +167,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}

View File

@ -61,13 +61,13 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
aghik %r2,%r0,-MCOUNT_INSN_SIZE
lgrl %r4,function_trace_op
lgrl %r1,ftrace_trace_function
lgrl %r1,ftrace_func
#else
lgr %r2,%r0
aghi %r2,-MCOUNT_INSN_SIZE
larl %r4,function_trace_op
lg %r4,0(%r4)
larl %r1,ftrace_trace_function
larl %r1,ftrace_func
lg %r1,0(%r1)
#endif
lgr %r3,%r14

View File

@ -114,7 +114,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
{
u32 r1 = reg2hex[b1];
if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1])
jit->seen_reg[r1] = 1;
}

View File

@ -4239,8 +4239,6 @@ again:
static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
{
bool need_wait;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
lockdep_assert_held_write(&rbd_dev->lock_rwsem);
@ -4252,11 +4250,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
*/
rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
rbd_assert(!completion_done(&rbd_dev->releasing_wait));
need_wait = !list_empty(&rbd_dev->running_list);
downgrade_write(&rbd_dev->lock_rwsem);
if (need_wait)
wait_for_completion(&rbd_dev->releasing_wait);
up_read(&rbd_dev->lock_rwsem);
if (list_empty(&rbd_dev->running_list))
return true;
up_write(&rbd_dev->lock_rwsem);
wait_for_completion(&rbd_dev->releasing_wait);
down_write(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
@ -4342,15 +4340,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
/*
* we already know that the remote client is
* the owner
*/
up_write(&rbd_dev->lock_rwsem);
return;
dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
__func__, rbd_dev, cid.gid, cid.handle);
} else {
rbd_set_owner_cid(rbd_dev, &cid);
}
rbd_set_owner_cid(rbd_dev, &cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);
@ -4375,14 +4369,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
down_write(&rbd_dev->lock_rwsem);
if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
__func__, rbd_dev, cid.gid, cid.handle,
rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
up_write(&rbd_dev->lock_rwsem);
return;
} else {
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
}
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
downgrade_write(&rbd_dev->lock_rwsem);
} else {
down_read(&rbd_dev->lock_rwsem);

View File

@ -975,6 +975,7 @@ static int __init efi_memreserve_map_root(void)
static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
{
struct resource *res, *parent;
int ret;
res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res)
@ -987,7 +988,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
/* we expect a conflict with a 'System RAM' region */
parent = request_resource_conflict(&iomem_resource, res);
return parent ? request_resource(parent, res) : 0;
ret = parent ? request_resource(parent, res) : 0;
/*
* Given that efi_mem_reserve_iomem() can be called at any
* time, only call memblock_reserve() if the architecture
* keeps the infrastructure around.
*/
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
memblock_reserve(addr, size);
return ret;
}
int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)

View File

@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void)
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
goto out;
} else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
pr_warn(FW_BUG "TPM Final Events table invalid\n");
goto out;
}

View File

@ -826,6 +826,9 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {

View File

@ -453,7 +453,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
kfree(ts->dsi);
return 0;
}

View File

@ -47,7 +47,7 @@ struct bma180_part_info {
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
u8 bw_reg, bw_mask;
u8 bw_reg, bw_mask, bw_offset;
u8 scale_reg, scale_mask;
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
@ -103,6 +103,7 @@ struct bma180_part_info {
#define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */
#define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */
#define BMA250_BW_OFFSET 8
#define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */
#define BMA250_LOWPOWER_MASK BIT(6)
#define BMA250_DATA_INTEN_MASK BIT(4)
@ -241,7 +242,8 @@ static int bma180_set_bw(struct bma180_data *data, int val)
for (i = 0; i < data->part_info->num_bw; ++i) {
if (data->part_info->bw_table[i] == val) {
ret = bma180_set_bits(data, data->part_info->bw_reg,
data->part_info->bw_mask, i);
data->part_info->bw_mask,
i + data->part_info->bw_offset);
if (ret) {
dev_err(&data->client->dev,
"failed to set bandwidth\n");
@ -633,32 +635,53 @@ static const struct iio_chan_spec bma250_channels[] = {
static const struct bma180_part_info bma180_part_info[] = {
[BMA180] = {
bma180_channels, ARRAY_SIZE(bma180_channels),
bma180_scale_table, ARRAY_SIZE(bma180_scale_table),
bma180_bw_table, ARRAY_SIZE(bma180_bw_table),
BMA180_CTRL_REG0, BMA180_RESET_INT,
BMA180_CTRL_REG0, BMA180_SLEEP,
BMA180_BW_TCS, BMA180_BW,
BMA180_OFFSET_LSB1, BMA180_RANGE,
BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER,
BMA180_CTRL_REG3, BMA180_NEW_DATA_INT,
BMA180_RESET,
bma180_chip_config,
bma180_chip_disable,
.channels = bma180_channels,
.num_channels = ARRAY_SIZE(bma180_channels),
.scale_table = bma180_scale_table,
.num_scales = ARRAY_SIZE(bma180_scale_table),
.bw_table = bma180_bw_table,
.num_bw = ARRAY_SIZE(bma180_bw_table),
.int_reset_reg = BMA180_CTRL_REG0,
.int_reset_mask = BMA180_RESET_INT,
.sleep_reg = BMA180_CTRL_REG0,
.sleep_mask = BMA180_SLEEP,
.bw_reg = BMA180_BW_TCS,
.bw_mask = BMA180_BW,
.scale_reg = BMA180_OFFSET_LSB1,
.scale_mask = BMA180_RANGE,
.power_reg = BMA180_TCO_Z,
.power_mask = BMA180_MODE_CONFIG,
.lowpower_val = BMA180_LOW_POWER,
.int_enable_reg = BMA180_CTRL_REG3,
.int_enable_mask = BMA180_NEW_DATA_INT,
.softreset_reg = BMA180_RESET,
.chip_config = bma180_chip_config,
.chip_disable = bma180_chip_disable,
},
[BMA250] = {
bma250_channels, ARRAY_SIZE(bma250_channels),
bma250_scale_table, ARRAY_SIZE(bma250_scale_table),
bma250_bw_table, ARRAY_SIZE(bma250_bw_table),
BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK,
BMA250_POWER_REG, BMA250_SUSPEND_MASK,
BMA250_BW_REG, BMA250_BW_MASK,
BMA250_RANGE_REG, BMA250_RANGE_MASK,
BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1,
BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK,
BMA250_RESET_REG,
bma250_chip_config,
bma250_chip_disable,
.channels = bma250_channels,
.num_channels = ARRAY_SIZE(bma250_channels),
.scale_table = bma250_scale_table,
.num_scales = ARRAY_SIZE(bma250_scale_table),
.bw_table = bma250_bw_table,
.num_bw = ARRAY_SIZE(bma250_bw_table),
.int_reset_reg = BMA250_INT_RESET_REG,
.int_reset_mask = BMA250_INT_RESET_MASK,
.sleep_reg = BMA250_POWER_REG,
.sleep_mask = BMA250_SUSPEND_MASK,
.bw_reg = BMA250_BW_REG,
.bw_mask = BMA250_BW_MASK,
.bw_offset = BMA250_BW_OFFSET,
.scale_reg = BMA250_RANGE_REG,
.scale_mask = BMA250_RANGE_MASK,
.power_reg = BMA250_POWER_REG,
.power_mask = BMA250_LOWPOWER_MASK,
.lowpower_val = 1,
.int_enable_reg = BMA250_INT_ENABLE_REG,
.int_enable_mask = BMA250_DATA_INTEN_MASK,
.softreset_reg = BMA250_RESET_REG,
.chip_config = bma250_chip_config,
.chip_disable = bma250_chip_disable,
},
};

View File

@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config)
com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER;
com.cmd.hdr.Length = 6;
memcpy(&com.cmd.ConfigureBuffers.config, config, 6);
memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6);
com.in_len = 6;
com.out_len = 0;

View File

@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS {
struct FW_CONFIGURE_FREE_BUFFERS {
struct FW_HEADER hdr;
u8 UVI1_BufferLength;
u8 UVI2_BufferLength;
u8 TVO_BufferLength;
u8 AUD1_BufferLength;
u8 AUD2_BufferLength;
u8 TVA_BufferLength;
struct {
u8 UVI1_BufferLength;
u8 UVI2_BufferLength;
u8 TVO_BufferLength;
u8 AUD1_BufferLength;
u8 AUD2_BufferLength;
u8 TVA_BufferLength;
} __packed config;
} __attribute__ ((__packed__));
struct FW_CONFIGURE_UART {

View File

@ -3192,7 +3192,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@ -3907,7 +3907,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,

View File

@ -9239,6 +9239,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
{
int rc = 0;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
rc = -ENODEV;
goto half_open_err;
}
rc = bnxt_alloc_mem(bp, false);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@ -9987,12 +9993,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (netif_running(bp->dev)) {
int rc;
if (!silent)
if (silent) {
bnxt_close_nic(bp, false, false);
bnxt_open_nic(bp, false, false);
} else {
bnxt_ulp_stop(bp);
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
if (!silent && !rc)
bnxt_ulp_start(bp);
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
bnxt_ulp_start(bp, rc);
}
}
}
@ -10732,6 +10741,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
bp->fw_reset_timestamp = jiffies;
rtnl_lock();
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
rtnl_unlock();
goto fw_reset_abort;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
@ -12144,10 +12157,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
if (!err) {
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
bnxt_ulp_start(bp);
}
bnxt_ulp_start(bp, err);
}
if (result != PCI_ERS_RESULT_RECOVERED) {

View File

@ -186,7 +186,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
edev->ulp_tbl[ulp_id].msix_requested = 0;
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
@ -274,6 +274,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@ -284,7 +285,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
}
}
void bnxt_ulp_start(struct bnxt *bp)
void bnxt_ulp_start(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
@ -293,6 +294,11 @@ void bnxt_ulp_start(struct bnxt *bp)
if (!edev)
return;
edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
if (err)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
@ -467,13 +473,14 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
if (!edev)
return ERR_PTR(-ENOMEM);
edev->en_ops = &bnxt_en_ops_tbl;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
bp->edev = edev;
}
edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
return bp->edev;
}

View File

@ -64,6 +64,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);

View File

@ -1187,7 +1187,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@ -2901,12 +2902,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@ -3623,7 +3618,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
int ret;
u32 reg;
if (!netif_running(dev))
return 0;
@ -3655,12 +3649,6 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
if (priv->wolopts)
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);

View File

@ -160,12 +160,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg &= ~EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
return 0;
}

View File

@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
/* for VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {

View File

@ -2245,6 +2245,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
if (!is_uld(adap))
return;
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@ -6152,10 +6155,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);
}
detach_ulds(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@ -6163,10 +6169,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))

View File

@ -638,6 +638,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
if (!is_uld(adap))
return;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)

View File

@ -1170,13 +1170,16 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
goto abort_with_wq;
goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
gve_clear_probe_in_progress(priv);
queue_work(priv->gve_wq, &priv->service_task);
return 0;
abort_with_gve_init:
gve_teardown_priv_resources(priv);
abort_with_wq:
destroy_workqueue(priv->gve_wq);

View File

@ -131,7 +131,7 @@
/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
#define PPE_BUF_SIZE_SHIFT 6
#define PPE_TX_BUF_HOLD BIT(31)
#define CACHE_LINE_MASK 0x3F
#define SOC_CACHE_LINE_MASK 0x3F
#else
#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#if defined(CONFIG_HI13X1_GMAC)
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
| TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK);
desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK);
desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
#else
desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
desc->send_addr = (__force u32)cpu_to_be32(phys);

View File

@ -2119,6 +2119,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
return ret;
}
return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
false);
}

View File

@ -7401,6 +7401,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -2230,6 +2230,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -3765,6 +3765,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -940,6 +940,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@ -948,7 +949,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
if (num_q_vectors > MAX_Q_VECTORS) {
num_q_vectors = MAX_Q_VECTORS;
dev_warn(&adapter->pdev->dev,
"The number of queue vectors (%d) is higher than max allowed (%d)\n",
adapter->num_q_vectors, MAX_Q_VECTORS);
}
for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@ -1687,14 +1694,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@ -3469,6 +3477,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@ -4657,6 +4666,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;

View File

@ -504,7 +504,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
return 0;
return -EOPNOTSUPP;
}
/* forward declaration */

View File

@ -256,6 +256,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@ -4310,8 +4312,8 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);

View File

@ -1827,7 +1827,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
if (ring_uses_build_skb(rx_ring)) {
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1;
unsigned long offset = (unsigned long)(skb->data) & mask;
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
@ -11207,6 +11208,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -6850,7 +6850,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;

View File

@ -694,7 +694,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0;
if (nvme_ns_has_pi(ns))
cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
else
cmnd->write_zeroes.control = 0;
return BLK_STS_OK;
}

View File

@ -2590,7 +2590,9 @@ static void nvme_reset_work(struct work_struct *work)
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
if (dev->ctrl.state != NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
dev->ctrl.state);
result = -ENODEV;
goto out;
}
@ -2954,7 +2956,6 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);

View File

@ -5348,7 +5348,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
(pdev->device == 0x7340 && pdev->revision != 0xc5))
(pdev->device == 0x7340 && pdev->revision != 0xc5) ||
(pdev->device == 0x7341 && pdev->revision != 0x00))
return;
pci_info(pdev, "disabling ATS\n");
@ -5363,6 +5364,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */

View File

@ -180,13 +180,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
if (state->period != cstate->period ||
state->duty_cycle != cstate->duty_cycle) {
ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
state->period);
if (ret)
return ret;
}
ret = sprd_pwm_config(spc, pwm, state->duty_cycle,
state->period);
if (ret)
return ret;
sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1);
} else if (cstate->enabled) {

View File

@ -366,9 +366,8 @@ static struct hi6421_regulator_info
static int hi6421_regulator_enable(struct regulator_dev *rdev)
{
struct hi6421_regulator_pdata *pdata;
struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev);
pdata = dev_get_drvdata(rdev->dev.parent);
/* hi6421 spec requires regulator enablement must be serialized:
* - Because when BUCK, LDO switching from off to on, it will have
* a huge instantaneous current; so you can not turn on two or
@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
u32 reg_val;
struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_IDLE;
@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev)
static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
u32 reg_val;
struct hi6421_regulator_info *info;
unsigned int reg_val;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
regmap_read(rdev->regmap, rdev->desc->enable_reg, &reg_val);
if (reg_val & info->mode_mask)
return REGULATOR_MODE_STANDBY;
@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev)
static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
u32 new_mode;
struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev,
static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
u32 new_mode;
struct hi6421_regulator_info *info;
unsigned int new_mode;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
switch (mode) {
case REGULATOR_MODE_NORMAL:
new_mode = 0;
@ -459,7 +462,9 @@ static unsigned int
hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev,
int input_uV, int output_uV, int load_uA)
{
struct hi6421_regulator_info *info = rdev_get_drvdata(rdev);
struct hi6421_regulator_info *info;
info = container_of(rdev->desc, struct hi6421_regulator_info, desc);
if (load_uA > info->eco_microamp)
return REGULATOR_MODE_NORMAL;
@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
mutex_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) {
/* assign per-regulator data */
info = &hi6421_regulator_info[i];
config.dev = pdev->dev.parent;
config.driver_data = info;
config.driver_data = pdata;
config.regmap = pmic->regmap;
rdev = devm_regulator_register(&pdev->dev, &info->desc,

View File

@ -432,39 +432,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
struct iscsi_transport *t = iface->transport;
int param;
int param_type;
int param = -1;
if (attr == &dev_attr_iface_enabled.attr)
param = ISCSI_NET_PARAM_IFACE_ENABLE;
else if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
else if (attr == &dev_attr_iface_vlan_enabled.attr)
param = ISCSI_NET_PARAM_VLAN_ENABLED;
else if (attr == &dev_attr_iface_mtu.attr)
param = ISCSI_NET_PARAM_MTU;
else if (attr == &dev_attr_iface_port.attr)
param = ISCSI_NET_PARAM_PORT;
else if (attr == &dev_attr_iface_ipaddress_state.attr)
param = ISCSI_NET_PARAM_IPADDR_STATE;
else if (attr == &dev_attr_iface_delayed_ack_en.attr)
param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf.attr)
param = ISCSI_NET_PARAM_TCP_WSF;
else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
else if (attr == &dev_attr_iface_cache_id.attr)
param = ISCSI_NET_PARAM_CACHE_ID;
else if (attr == &dev_attr_iface_redirect_en.attr)
param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
@ -501,6 +472,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN;
else if (attr == &dev_attr_iface_initiator_name.attr)
param = ISCSI_IFACE_PARAM_INITIATOR_NAME;
if (param != -1)
return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
else if (attr == &dev_attr_iface_vlan_enabled.attr)
param = ISCSI_NET_PARAM_VLAN_ENABLED;
else if (attr == &dev_attr_iface_mtu.attr)
param = ISCSI_NET_PARAM_MTU;
else if (attr == &dev_attr_iface_port.attr)
param = ISCSI_NET_PARAM_PORT;
else if (attr == &dev_attr_iface_ipaddress_state.attr)
param = ISCSI_NET_PARAM_IPADDR_STATE;
else if (attr == &dev_attr_iface_delayed_ack_en.attr)
param = ISCSI_NET_PARAM_DELAYED_ACK_EN;
else if (attr == &dev_attr_iface_tcp_nagle_disable.attr)
param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf_disable.attr)
param = ISCSI_NET_PARAM_TCP_WSF_DISABLE;
else if (attr == &dev_attr_iface_tcp_wsf.attr)
param = ISCSI_NET_PARAM_TCP_WSF;
else if (attr == &dev_attr_iface_tcp_timer_scale.attr)
param = ISCSI_NET_PARAM_TCP_TIMER_SCALE;
else if (attr == &dev_attr_iface_tcp_timestamp_en.attr)
param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN;
else if (attr == &dev_attr_iface_cache_id.attr)
param = ISCSI_NET_PARAM_CACHE_ID;
else if (attr == &dev_attr_iface_redirect_en.attr)
param = ISCSI_NET_PARAM_REDIRECT_EN;
else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
param = ISCSI_NET_PARAM_IPV4_ADDR;
@ -591,32 +594,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
return 0;
}
switch (param) {
case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
case ISCSI_IFACE_PARAM_HDRDGST_EN:
case ISCSI_IFACE_PARAM_DATADGST_EN:
case ISCSI_IFACE_PARAM_IMM_DATA_EN:
case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
case ISCSI_IFACE_PARAM_ERL:
case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
case ISCSI_IFACE_PARAM_FIRST_BURST:
case ISCSI_IFACE_PARAM_MAX_R2T:
case ISCSI_IFACE_PARAM_MAX_BURST:
case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
case ISCSI_IFACE_PARAM_INITIATOR_NAME:
param_type = ISCSI_IFACE_PARAM;
break;
default:
param_type = ISCSI_NET_PARAM;
}
return t->attr_is_visible(param_type, param);
return t->attr_is_visible(ISCSI_NET_PARAM, param);
}
static struct attribute *iscsi_iface_attrs[] = {

View File

@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
ret = -ENXIO;
@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "spi_register_master failed\n");

View File

@ -65,8 +65,7 @@ struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *, int);
int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *,
struct spi_transfer *);
int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *);
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
@ -573,11 +572,10 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
}
static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi,
struct spi_transfer *t)
struct spi_device *spi)
{
u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
u32 clk = t->speed_hz, delay;
u32 clk, delay;
/* Clear BL field and set the right value */
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
@ -591,7 +589,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
/* set clock speed */
ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
ctrl |= mx51_ecspi_clkdiv(spi_imx, t->speed_hz, &clk);
ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk);
spi_imx->spi_bus_clk = clk;
/*
@ -715,13 +713,12 @@ static int mx31_prepare_message(struct spi_imx_data *spi_imx,
}
static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi,
struct spi_transfer *t)
struct spi_device *spi)
{
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
MX31_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@ -820,14 +817,13 @@ static int mx21_prepare_message(struct spi_imx_data *spi_imx,
}
static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi,
struct spi_transfer *t)
struct spi_device *spi)
{
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
unsigned int clk;
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, t->speed_hz, max, &clk)
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk)
<< MX21_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@ -896,13 +892,12 @@ static int mx1_prepare_message(struct spi_imx_data *spi_imx,
}
static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi,
struct spi_transfer *t)
struct spi_device *spi)
{
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
MX1_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@ -1240,6 +1235,16 @@ static int spi_imx_setupxfer(struct spi_device *spi,
if (!t)
return 0;
if (!t->speed_hz) {
if (!spi->max_speed_hz) {
dev_err(&spi->dev, "no speed_hz provided!\n");
return -EINVAL;
}
dev_dbg(&spi->dev, "using spi->max_speed_hz!\n");
spi_imx->spi_bus_clk = spi->max_speed_hz;
} else
spi_imx->spi_bus_clk = t->speed_hz;
spi_imx->bits_per_word = t->bits_per_word;
/*
@ -1281,7 +1286,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->slave_burst = t->len;
}
spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
return 0;
}

View File

@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mtk_spi_setup_packet(master);
cnt = xfer->len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
if (xfer->tx_buf)
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
if (xfer->rx_buf)
ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt);
remainder = xfer->len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
if (xfer->tx_buf) {
memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}
if (xfer->rx_buf) {
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
memcpy(xfer->rx_buf + (cnt * 4), &reg_val, remainder);
}
}
mtk_spi_enable_transfer(master);

View File

@ -1908,35 +1908,48 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
if (!spi->dma_tx)
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
else
master->dma_tx = spi->dma_tx;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
ret = PTR_ERR(spi->dma_tx);
spi->dma_tx = NULL;
if (ret == -EPROBE_DEFER)
goto err_clk_disable;
dev_warn(&pdev->dev, "failed to request tx dma channel\n");
} else {
master->dma_tx = spi->dma_tx;
}
spi->dma_rx = dma_request_chan(spi->dev, "rx");
if (IS_ERR(spi->dma_rx)) {
ret = PTR_ERR(spi->dma_rx);
spi->dma_rx = NULL;
if (ret == -EPROBE_DEFER)
goto err_dma_release;
spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
if (!spi->dma_rx)
dev_warn(&pdev->dev, "failed to request rx dma channel\n");
else
} else {
master->dma_rx = spi->dma_rx;
}
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
pm_runtime_set_active(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "spi master registration failed: %d\n",
ret);
goto err_dma_release;
goto err_pm_disable;
}
if (!master->cs_gpios) {
dev_err(&pdev->dev, "no CS gpios available\n");
ret = -EINVAL;
goto err_dma_release;
goto err_pm_disable;
}
for (i = 0; i < master->num_chipselect; i++) {
@ -1960,13 +1973,15 @@ static int stm32_spi_probe(struct platform_device *pdev)
return 0;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
if (spi->dma_rx)
dma_release_channel(spi->dma_rx);
pm_runtime_disable(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(spi->clk);
err_master_put:
@ -1980,9 +1995,14 @@ static int stm32_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct stm32_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
spi_unregister_master(master);
spi->cfg->disable(spi);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
@ -1990,7 +2010,6 @@ static int stm32_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
pm_runtime_disable(&pdev->dev);
pinctrl_pm_select_sleep_state(&pdev->dev);

View File

@ -25,7 +25,7 @@
#include "target_core_alua.h"
static sense_reason_t
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
}
static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
{
struct se_device *dev = cmd->se_dev;
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);
sense_reason_t ret;
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
if ((flags & 0x04) || (flags & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
}
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if (flags[0] & 0x10) {
if (flags & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
*/
if (flags[0] & 0x08) {
if (flags & 0x08) {
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
if (ret)
return ret;
@ -686,10 +686,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_
}
static sense_reason_t
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
u32 sectors, bool is_write)
{
u8 protect = cdb[1] >> 5;
int sp_ops = cmd->se_sess->sup_prot_ops;
int pi_prot_type = dev->dev_attrib.pi_prot_type;
bool fabric_prot = false;
@ -737,7 +736,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
/* Fallthrough */
default:
pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
"PROTECT: 0x%02x\n", cdb[0], protect);
"PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
return TCM_INVALID_CDB_FIELD;
}
@ -812,7 +811,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@ -826,7 +825,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@ -840,7 +839,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
if (ret)
return ret;
@ -861,7 +860,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@ -875,7 +874,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@ -890,7 +889,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
if (ret)
return ret;
@ -949,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
ret = sbc_setup_write_same(cmd, &cdb[10], ops);
ret = sbc_setup_write_same(cmd, cdb[10], ops);
if (ret)
return ret;
break;
@ -1048,7 +1047,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
ret = sbc_setup_write_same(cmd, &cdb[1], ops);
ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
@ -1066,7 +1065,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
ret = sbc_setup_write_same(cmd, &cdb[1], ops);
ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;

View File

@ -46,6 +46,7 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
#define USB_PING_RESPONSE_TIME 400 /* ns */
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
@ -180,8 +181,9 @@ int usb_device_supports_lpm(struct usb_device *udev)
}
/*
* Set the Maximum Exit Latency (MEL) for the host to initiate a transition from
* either U1 or U2.
* Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
* U1/U2, send a PING to the device and receive a PING_RESPONSE.
* See USB 3.1 section C.1.5.2
*/
static void usb_set_lpm_mel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
@ -191,35 +193,37 @@ static void usb_set_lpm_mel(struct usb_device *udev,
unsigned int hub_exit_latency)
{
unsigned int total_mel;
unsigned int device_mel;
unsigned int hub_mel;
/*
* Calculate the time it takes to transition all links from the roothub
* to the parent hub into U0. The parent hub must then decode the
* packet (hub header decode latency) to figure out which port it was
* bound for.
*
* The Hub Header decode latency is expressed in 0.1us intervals (0x1
* means 0.1us). Multiply that by 100 to get nanoseconds.
* tMEL1. time to transition path from host to device into U0.
* MEL for parent already contains the delay up to parent, so only add
* the exit latency for the last link (pick the slower exit latency),
* and the hub header decode latency. See USB 3.1 section C 2.2.1
* Store MEL in nanoseconds
*/
total_mel = hub_lpm_params->mel +
(hub->descriptor->u.ss.bHubHdrDecLat * 100);
max(udev_exit_latency, hub_exit_latency) * 1000 +
hub->descriptor->u.ss.bHubHdrDecLat * 100;
/*
* How long will it take to transition the downstream hub's port into
* U0? The greater of either the hub exit latency or the device exit
* latency.
*
* The BOS U1/U2 exit latencies are expressed in 1us intervals.
* Multiply that by 1000 to get nanoseconds.
* tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
* each link + wHubDelay for each hub. Add only for last link.
* tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
* Multiply by 2 to include it as well.
*/
device_mel = udev_exit_latency * 1000;
hub_mel = hub_exit_latency * 1000;
if (device_mel > hub_mel)
total_mel += device_mel;
else
total_mel += hub_mel;
total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
USB_TP_TRANSMISSION_DELAY) * 2;
/*
* tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
* after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
* to cover the delay if the PING_RESPONSE is queued behind a Max Packet
* Size DP.
* Note these delays should be added only once for the entire path, so
* add them to the MEL of the device connected to the roothub.
*/
if (!hub->hdev->parent)
total_mel += USB_PING_RESPONSE_TIME + 2100;
udev_lpm_params->mel = total_mel;
}
@ -4022,6 +4026,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
return 0;
}
/*
* Don't allow device intiated U1/U2 if the system exit latency + one bus
* interval is greater than the minimum service interval of any active
* periodic endpoint. See USB 3.2 section 9.4.9
*/
static bool usb_device_may_initiate_lpm(struct usb_device *udev,
enum usb3_link_state state)
{
unsigned int sel; /* us */
int i, j;
if (state == USB3_LPM_U1)
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
else if (state == USB3_LPM_U2)
sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
else
return false;
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *intf;
struct usb_endpoint_descriptor *desc;
unsigned int interval;
intf = udev->actconfig->interface[i];
if (!intf)
continue;
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
desc = &intf->cur_altsetting->endpoint[j].desc;
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = (1 << (desc->bInterval - 1)) * 125;
if (sel + 125 > interval)
return false;
}
}
}
return true;
}
/*
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
* U1/U2 entry.
@ -4094,20 +4139,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* U1/U2_ENABLE
*/
if (udev->actconfig &&
usb_set_device_initiated_lpm(udev, state, true) == 0) {
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
} else {
/* Don't request U1/U2 entry if the device
* cannot transition to U1/U2.
*/
usb_set_lpm_timeout(udev, state, 0);
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
usb_device_may_initiate_lpm(udev, state)) {
if (usb_set_device_initiated_lpm(udev, state, true)) {
/*
* Request to enable device initiated U1/U2 failed,
* better to turn off lpm in this case.
*/
usb_set_lpm_timeout(udev, state, 0);
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
return;
}
}
}
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
* U1/U2 entry.

View File

@ -502,10 +502,6 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
/* Fibocom L850-GL LTE Modem */
{ USB_DEVICE(0x2cb7, 0x0007), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },

View File

@ -2748,12 +2748,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
return;
}
/* Zlp for all endpoints, for ep0 only in DATA IN stage */
/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
if (hs_ep->send_zlp) {
dwc2_hsotg_program_zlp(hsotg, hs_ep);
hs_ep->send_zlp = 0;
/* transfer will be completed on next complete interrupt */
return;
if (!using_desc_dma(hsotg)) {
dwc2_hsotg_program_zlp(hsotg, hs_ep);
/* transfer will be completed on next complete interrupt */
return;
}
}
if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {

View File

@ -153,8 +153,6 @@ struct max3421_hcd {
*/
struct urb *curr_urb;
enum scheduling_pass sched_pass;
struct usb_device *loaded_dev; /* dev that's loaded into the chip */
int loaded_epnum; /* epnum whose toggles are loaded */
int urb_done; /* > 0 -> no errors, < 0: errno */
size_t curr_len;
u8 hien;
@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
* Caller must NOT hold HCD spinlock.
*/
static void
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
int force_toggles)
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
{
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
int old_epnum, same_ep, rcvtog, sndtog;
struct usb_device *old_dev;
int rcvtog, sndtog;
u8 hctl;
old_dev = max3421_hcd->loaded_dev;
old_epnum = max3421_hcd->loaded_epnum;
same_ep = (dev == old_dev && epnum == old_epnum);
if (same_ep && !force_toggles)
return;
if (old_dev && !same_ep) {
/* save the old end-points toggles: */
u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
/* no locking: HCD (i.e., we) own toggles, don't we? */
usb_settoggle(old_dev, old_epnum, 0, rcvtog);
usb_settoggle(old_dev, old_epnum, 1, sndtog);
}
/* setup new endpoint's toggle bits: */
rcvtog = usb_gettoggle(dev, epnum, 0);
sndtog = usb_gettoggle(dev, epnum, 1);
hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
max3421_hcd->loaded_epnum = epnum;
spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
/*
@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum,
* address-assignment so it's best to just always load the
* address whenever the end-point changed/was forced.
*/
max3421_hcd->loaded_dev = dev;
spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
}
@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd)
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct urb *urb, *curr_urb = NULL;
struct max3421_ep *max3421_ep;
int epnum, force_toggles = 0;
int epnum;
struct usb_host_endpoint *ep;
struct list_head *pos;
unsigned long flags;
@ -777,7 +752,6 @@ done:
usb_settoggle(urb->dev, epnum, 0, 1);
usb_settoggle(urb->dev, epnum, 1, 1);
max3421_ep->pkt_state = PKT_STATE_SETUP;
force_toggles = 1;
} else
max3421_ep->pkt_state = PKT_STATE_TRANSFER;
}
@ -785,7 +759,7 @@ done:
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
max3421_ep->last_active = max3421_hcd->frame_number;
max3421_set_address(hcd, urb->dev, epnum, force_toggles);
max3421_set_address(hcd, urb->dev, epnum);
max3421_set_speed(hcd, urb->dev);
max3421_next_transfer(hcd, 0);
return 1;
@ -1380,6 +1354,16 @@ max3421_urb_done(struct usb_hcd *hcd)
status = 0;
urb = max3421_hcd->curr_urb;
if (urb) {
/* save the old end-points toggles: */
u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
int epnum = usb_endpoint_num(&urb->ep->desc);
/* no locking: HCD (i.e., we) own toggles, don't we? */
usb_settoggle(urb->dev, epnum, 0, rcvtog);
usb_settoggle(urb->dev, epnum, 1, sndtog);
max3421_hcd->curr_urb = NULL;
spin_lock_irqsave(&max3421_hcd->lock, flags);
usb_hcd_unlink_urb_from_ep(hcd, urb);

View File

@ -1555,11 +1555,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
* Inform the usbcore about resume-in-progress by returning
* a non-zero value even if there are no status changes.
*/
spin_lock_irqsave(&xhci->lock, flags);
status = bus_state->resuming_ports;
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(ports[i]->addr);

View File

@ -440,6 +440,26 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
return NULL;
}
if (ep_index >= EP_CTX_PER_DEV) {
xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
return NULL;
}
if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
return NULL;
}
return &xhci->devs[slot_id]->eps[ep_index];
}
/* Get the right ring for the given slot_id, ep_index and stream_id.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
@ -450,7 +470,10 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return NULL;
/* Common case: no streams */
if (!(ep->ep_state & EP_HAS_STREAMS))
return ep->ring;
@ -743,11 +766,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
memset(&deq_state, 0, sizeof(deq_state));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
vdev = xhci->devs[slot_id];
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_stop_ep(ep_ctx);
ep = &xhci->devs[slot_id]->eps[ep_index];
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
@ -1068,9 +1094,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
dev = xhci->devs[slot_id];
ep = &dev->eps[ep_index];
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
dev = xhci->devs[slot_id];
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
@ -1143,9 +1171,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
}
cleanup:
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
dev->eps[ep_index].queued_deq_seg = NULL;
dev->eps[ep_index].queued_deq_ptr = NULL;
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
@ -1154,10 +1182,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
struct xhci_virt_device *vdev;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
vdev = xhci->devs[slot_id];
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_reset_ep(ep_ctx);
@ -1187,7 +1220,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ep->ep_state &= ~EP_HALTED;
}
/* if this was a soft reset, then restart */
@ -2353,14 +2386,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
slot_id);
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep) {
xhci_err(xhci, "ERROR Invalid Transfer event\n");
goto err_out;
}
ep = &xdev->eps[ep_index];
xdev = xhci->devs[slot_id];
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);

View File

@ -993,6 +993,7 @@ struct xhci_interval_bw_table {
unsigned int ss_bw_out;
};
#define EP_CTX_PER_DEV 31
struct xhci_virt_device {
struct usb_device *udev;
@ -1007,7 +1008,7 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[31];
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;

View File

@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
if (chan) {
dmaengine_terminate_all(chan);
usbhsf_dma_unmap(pkt);
} else {
if (usbhs_pipe_is_dir_in(pipe))
usbhsf_rx_irq_ctrl(pipe, 0);
else
usbhsf_tx_irq_ctrl(pipe, 0);
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);

View File

@ -156,6 +156,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@ -203,8 +204,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
{ USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */
{ USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */
{ USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */
{ USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */
{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */

View File

@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_UC15 0x9090
/* These u-blox products use Qualcomm's vendor ID */
#define UBLOX_PRODUCT_R410M 0x90b2
#define UBLOX_PRODUCT_R6XX 0x90fa
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = {
/* u-blox products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
.driver_info = RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },

View File

@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
/* Reported-by: Julian Sikorski <belegdol@gmail.com> */
UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
"LaCie",
"Rugged USB3-FW",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?

View File

@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
static int afs_deliver_yfs_cb_callback(struct afs_call *);
#define CM_NAME(name) \
char afs_SRXCB##name##_name[] __tracepoint_string = \
"CB." #name
/*
* CB.CallBack operation type
*/
static CM_NAME(CallBack);
static const struct afs_call_type afs_SRXCBCallBack = {
.name = afs_SRXCBCallBack_name,
.name = "CB.CallBack",
.deliver = afs_deliver_cb_callback,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_CallBack,
@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = {
/*
* CB.InitCallBackState operation type
*/
static CM_NAME(InitCallBackState);
static const struct afs_call_type afs_SRXCBInitCallBackState = {
.name = afs_SRXCBInitCallBackState_name,
.name = "CB.InitCallBackState",
.deliver = afs_deliver_cb_init_call_back_state,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_InitCallBackState,
@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = {
/*
* CB.InitCallBackState3 operation type
*/
static CM_NAME(InitCallBackState3);
static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
.name = afs_SRXCBInitCallBackState3_name,
.name = "CB.InitCallBackState3",
.deliver = afs_deliver_cb_init_call_back_state3,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_InitCallBackState,
@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
/*
* CB.Probe operation type
*/
static CM_NAME(Probe);
static const struct afs_call_type afs_SRXCBProbe = {
.name = afs_SRXCBProbe_name,
.name = "CB.Probe",
.deliver = afs_deliver_cb_probe,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_Probe,
@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = {
/*
* CB.ProbeUuid operation type
*/
static CM_NAME(ProbeUuid);
static const struct afs_call_type afs_SRXCBProbeUuid = {
.name = afs_SRXCBProbeUuid_name,
.name = "CB.ProbeUuid",
.deliver = afs_deliver_cb_probe_uuid,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_ProbeUuid,
@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = {
/*
* CB.TellMeAboutYourself operation type
*/
static CM_NAME(TellMeAboutYourself);
static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
.name = afs_SRXCBTellMeAboutYourself_name,
.name = "CB.TellMeAboutYourself",
.deliver = afs_deliver_cb_tell_me_about_yourself,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_TellMeAboutYourself,
@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
/*
* YFS CB.CallBack operation type
*/
static CM_NAME(YFS_CallBack);
static const struct afs_call_type afs_SRXYFSCB_CallBack = {
.name = afs_SRXCBYFS_CallBack_name,
.name = "YFSCB.CallBack",
.deliver = afs_deliver_yfs_cb_callback,
.destructor = afs_cm_destructor,
.work = SRXAFSCB_CallBack,

View File

@ -5768,6 +5768,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
continue;
ret = btrfs_trim_free_extents(device, &group_trimmed);
if (ret) {
dev_failed++;

View File

@ -543,7 +543,7 @@ again:
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (inode_need_compress(inode, start, end)) {
if (nr_pages > 1 && inode_need_compress(inode, start, end)) {
WARN_ON(pages);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {

View File

@ -76,7 +76,7 @@ enum hugetlb_param {
static const struct fs_parameter_spec hugetlb_param_specs[] = {
fsparam_u32 ("gid", Opt_gid),
fsparam_string("min_size", Opt_min_size),
fsparam_u32 ("mode", Opt_mode),
fsparam_u32oct("mode", Opt_mode),
fsparam_string("nr_inodes", Opt_nr_inodes),
fsparam_string("pagesize", Opt_pagesize),
fsparam_string("size", Opt_size),

View File

@ -836,7 +836,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
flags = FOLL_FORCE | (write ? FOLL_WRITE : 0);
while (count > 0) {
int this_len = min_t(int, count, PAGE_SIZE);
size_t this_len = min_t(size_t, count, PAGE_SIZE);
if (write && copy_from_user(page, buf, this_len)) {
copied = -EFAULT;

View File

@ -1272,23 +1272,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
}
static __always_inline int validate_range(struct mm_struct *mm,
__u64 *start, __u64 len)
__u64 start, __u64 len)
{
__u64 task_size = mm->task_size;
*start = untagged_addr(*start);
if (*start & ~PAGE_MASK)
if (start & ~PAGE_MASK)
return -EINVAL;
if (len & ~PAGE_MASK)
return -EINVAL;
if (!len)
return -EINVAL;
if (*start < mmap_min_addr)
if (start < mmap_min_addr)
return -EINVAL;
if (*start >= task_size)
if (start >= task_size)
return -EINVAL;
if (len > task_size - *start)
if (len > task_size - start)
return -EINVAL;
return 0;
}
@ -1338,7 +1336,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
}
ret = validate_range(mm, &uffdio_register.range.start,
ret = validate_range(mm, uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
@ -1527,7 +1525,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
goto out;
ret = validate_range(mm, &uffdio_unregister.start,
ret = validate_range(mm, uffdio_unregister.start,
uffdio_unregister.len);
if (ret)
goto out;
@ -1678,7 +1676,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
goto out;
ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
if (ret)
goto out;
@ -1718,7 +1716,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
sizeof(uffdio_copy)-sizeof(__s64)))
goto out;
ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
if (ret)
goto out;
/*
@ -1774,7 +1772,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
sizeof(uffdio_zeropage)-sizeof(__s64)))
goto out;
ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len);
if (ret)
goto out;

View File

@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**

View File

@ -111,6 +111,34 @@ enum afs_vl_operation {
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
enum afs_cm_operation {
afs_CB_CallBack = 204, /* AFS break callback promises */
afs_CB_InitCallBackState = 205, /* AFS initialise callback state */
afs_CB_Probe = 206, /* AFS probe client */
afs_CB_GetLock = 207, /* AFS get contents of CM lock table */
afs_CB_GetCE = 208, /* AFS get cache file description */
afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */
afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */
afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */
afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */
};
enum yfs_cm_operation {
yfs_CB_Probe = 206, /* YFS probe client */
yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */
yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */
yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */
yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */
yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */
yfs_CB_GetServerPrefs = 215,
yfs_CB_GetCellServDV = 216,
yfs_CB_GetLocalCell = 217,
yfs_CB_GetCacheConfig = 218,
yfs_CB_GetCellByNum = 65537,
yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */
yfs_CB_CallBack = 64204,
};
enum afs_edit_dir_op {
afs_edit_dir_create,
afs_edit_dir_create_error,
@ -312,6 +340,32 @@ enum afs_cb_break_reason {
EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
#define afs_cm_operations \
EM(afs_CB_CallBack, "CB.CallBack") \
EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \
EM(afs_CB_Probe, "CB.Probe") \
EM(afs_CB_GetLock, "CB.GetLock") \
EM(afs_CB_GetCE, "CB.GetCE") \
EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \
EM(afs_CB_GetXStats, "CB.GetXStats") \
EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \
E_(afs_CB_ProbeUuid, "CB.ProbeUuid")
#define yfs_cm_operations \
EM(yfs_CB_Probe, "YFSCB.Probe") \
EM(yfs_CB_GetLock, "YFSCB.GetLock") \
EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \
EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \
EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \
EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \
EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \
EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \
EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \
EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \
EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \
EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
E_(yfs_CB_CallBack, "YFSCB.CallBack")
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@ -442,6 +496,8 @@ afs_call_traces;
afs_server_traces;
afs_fs_operations;
afs_vl_operations;
afs_cm_operations;
yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
@ -522,20 +578,21 @@ TRACE_EVENT(afs_cb_call,
TP_STRUCT__entry(
__field(unsigned int, call )
__field(const char *, name )
__field(u32, op )
__field(u16, service_id )
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->name = call->type->name;
__entry->op = call->operation_ID;
__entry->service_id = call->service_id;
),
TP_printk("c=%08x %s o=%u",
TP_printk("c=%08x %s",
__entry->call,
__entry->name,
__entry->op)
__entry->service_id == 2501 ?
__print_symbolic(__entry->op, yfs_cm_operations) :
__print_symbolic(__entry->op, afs_cm_operations))
);
TRACE_EVENT(afs_call,

View File

@ -3221,10 +3221,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
if (unlikely(!head))
return true;
return reader->read == rb_page_commit(reader) &&
(commit == reader ||
(commit == head &&
head->read == rb_page_commit(commit)));
/* Reader should exhaust content in reader page */
if (reader->read != rb_page_commit(reader))
return false;
/*
* If writers are committing on the reader page, knowing all
* committed content has been read, the ring buffer is empty.
*/
if (commit == reader)
return true;
/*
* If writers are committing on a page other than reader page
* and head page, there should always be content to read.
*/
if (commit != head)
return false;
/*
* Writers are committing on the head page, we just need
* to care about there're committed data, and the reader will
* swap reader page with head page when it is to read data.
*/
return rb_page_commit(commit) == 0;
}
/**

View File

@ -4975,6 +4975,10 @@ static const char readme_msg[] =
"\t [:name=histname1]\n"
"\t [:<handler>.<action>]\n"
"\t [if <filter>]\n\n"
"\t Note, special fields can be used as well:\n"
"\t common_timestamp - to record current timestamp\n"
"\t common_cpu - to record the CPU the event happened on\n"
"\n"
"\t When a matching event is hit, an entry is added to a hash\n"
"\t table using the key(s) and value(s) named, and the value of a\n"
"\t sum called 'hitcount' is incremented. Keys and values\n"

View File

@ -2001,7 +2001,7 @@ static const char *hist_field_name(struct hist_field *field,
field->flags & HIST_FIELD_FL_ALIAS)
field_name = hist_field_name(field->operands[0], ++level);
else if (field->flags & HIST_FIELD_FL_CPU)
field_name = "cpu";
field_name = "common_cpu";
else if (field->flags & HIST_FIELD_FL_EXPR ||
field->flags & HIST_FIELD_FL_VAR_REF) {
if (field->system) {
@ -2873,14 +2873,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
hist_data->enable_timestamps = true;
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
hist_data->attrs->ts_in_usecs = true;
} else if (strcmp(field_name, "cpu") == 0)
} else if (strcmp(field_name, "common_cpu") == 0)
*flags |= HIST_FIELD_FL_CPU;
else {
field = trace_find_event_field(file->event_call, field_name);
if (!field || !field->size) {
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
field = ERR_PTR(-EINVAL);
goto out;
/*
* For backward compatibility, if field_name
* was "cpu", then we treat this the same as
* common_cpu.
*/
if (strcmp(field_name, "cpu") == 0) {
*flags |= HIST_FIELD_FL_CPU;
} else {
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
errpos(field_name));
field = ERR_PTR(-EINVAL);
goto out;
}
}
}
out:
@ -5641,7 +5651,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
seq_printf(m, "%s=", hist_field->var.name);
if (hist_field->flags & HIST_FIELD_FL_CPU)
seq_puts(m, "cpu");
seq_puts(m, "common_cpu");
else if (field_name) {
if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
hist_field->flags & HIST_FIELD_FL_ALIAS)

View File

@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg,
goto err;
ret = -EINVAL;
if (unlikely(msg->msg_iter.iov->iov_base == NULL))
if (unlikely(msg->msg_iter.nr_segs == 0) ||
unlikely(msg->msg_iter.iov->iov_base == NULL))
goto err;
noblock = msg->msg_flags & MSG_DONTWAIT;

View File

@ -815,7 +815,7 @@ static int dn_auto_bind(struct socket *sock)
static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
{
struct dn_scp *scp = DN_SK(sk);
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
if (scp->state != DN_CR)
@ -825,11 +825,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
dn_send_conn_conf(sk, allocation);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
if (scp->state == DN_CC)
*timeo = schedule_timeout(*timeo);
*timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
lock_sock(sk);
err = 0;
if (scp->state == DN_RUN)
@ -843,9 +843,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
err = -EAGAIN;
if (!*timeo)
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
remove_wait_queue(sk_sleep(sk), &wait);
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
} else if (scp->state != DN_CC) {
@ -857,7 +856,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
static int dn_wait_run(struct sock *sk, long *timeo)
{
struct dn_scp *scp = DN_SK(sk);
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = 0;
if (scp->state == DN_RUN)
@ -866,11 +865,11 @@ static int dn_wait_run(struct sock *sk, long *timeo)
if (!*timeo)
return -EALREADY;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
if (scp->state == DN_CI || scp->state == DN_CC)
*timeo = schedule_timeout(*timeo);
*timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
lock_sock(sk);
err = 0;
if (scp->state == DN_RUN)
@ -884,9 +883,8 @@ static int dn_wait_run(struct sock *sk, long *timeo)
err = -ETIMEDOUT;
if (!*timeo)
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
remove_wait_queue(sk_sleep(sk), &wait);
out:
if (err == 0) {
sk->sk_socket->state = SS_CONNECTED;
@ -1031,16 +1029,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
{
DEFINE_WAIT(wait);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sk_buff *skb = NULL;
int err = 0;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
add_wait_queue(sk_sleep(sk), &wait);
for(;;) {
release_sock(sk);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb == NULL) {
*timeo = schedule_timeout(*timeo);
*timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo);
skb = skb_dequeue(&sk->sk_receive_queue);
}
lock_sock(sk);
@ -1055,9 +1053,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
err = -EAGAIN;
if (!*timeo)
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
remove_wait_queue(sk_sleep(sk), &wait);
return skb == NULL ? ERR_PTR(err) : skb;
}

View File

@ -636,7 +636,7 @@ static int __init tcp_bpf_v4_build_proto(void)
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
return 0;
}
core_initcall(tcp_bpf_v4_build_proto);
late_initcall(tcp_bpf_v4_build_proto);
static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
{

View File

@ -504,8 +504,15 @@ void tcp_fastopen_active_disable(struct sock *sk)
{
struct net *net = sock_net(sk);
/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
/* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
* We want net->ipv4.tfo_active_disable_stamp to be updated first.
*/
smp_mb__before_atomic();
atomic_inc(&net->ipv4.tfo_active_disable_times);
net->ipv4.tfo_active_disable_stamp = jiffies;
NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
}
@ -523,10 +530,16 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
if (!tfo_da_times)
return false;
/* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
smp_rmb();
/* Limit timout to max: 2^6 * initial timeout */
multiplier = 1 << min(tfo_da_times - 1, 6);
timeout = multiplier * tfo_bh_timeout * HZ;
if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
/* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
multiplier * tfo_bh_timeout * HZ;
if (time_before(jiffies, timeout))
return true;
/* Mark check bit so we can check for successful active TFO

View File

@ -477,7 +477,9 @@ int ip6_forward(struct sk_buff *skb)
if (skb_warn_if_lro(skb))
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
if (!net->ipv6.devconf_all->disable_policy &&
!idev->cnf.disable_policy &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}

View File

@ -3655,7 +3655,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
err = PTR_ERR(rt->fib6_metrics);
/* Do not leave garbage there. */
rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
goto out;
goto out_free;
}
if (cfg->fc_flags & RTF_ADDRCONF)

View File

@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
sock_put(sk);
return;
goto out;
}
break;
@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
nr_start_heartbeat(sk);
bh_unlock_sock(sk);
out:
sock_put(sk);
}
static void nr_t2timer_expiry(struct timer_list *t)
@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t)
nr_enquiry_response(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
}
static void nr_t4timer_expiry(struct timer_list *t)
@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t)
bh_lock_sock(sk);
nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
bh_unlock_sock(sk);
sock_put(sk);
}
static void nr_idletimer_expiry(struct timer_list *t)
@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t)
sock_set_flag(sk, SOCK_DEAD);
}
bh_unlock_sock(sk);
sock_put(sk);
}
static void nr_t1timer_expiry(struct timer_list *t)
@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_1:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
bh_unlock_sock(sk);
return;
goto out;
} else {
nr->n2count++;
nr_write_internal(sk, NR_CONNREQ);
@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_2:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
bh_unlock_sock(sk);
return;
goto out;
} else {
nr->n2count++;
nr_write_internal(sk, NR_DISCREQ);
@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
case NR_STATE_3:
if (nr->n2count == nr->n2) {
nr_disconnect(sk, ETIMEDOUT);
bh_unlock_sock(sk);
return;
goto out;
} else {
nr->n2count++;
nr_requeue_frames(sk);
@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t)
}
nr_start_t1timer(sk);
out:
bh_unlock_sock(sk);
sock_put(sk);
}

View File

@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&d->tcf_tm);
bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
action = READ_ONCE(d->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
goto drop;
if (!skb->dev || skb->dev->type != ARPHRD_ETHER)
return action;
/* XXX: if you are going to edit more fields beyond ethernet header
* (example when you add IP header replacement or vlan swap)
* then MAX_EDIT_LEN needs to change appropriately
@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
if (unlikely(err)) /* best policy is to drop on the floor */
goto drop;
action = READ_ONCE(d->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
goto drop;
p = rcu_dereference_bh(d->skbmod_p);
flags = p->flags;
if (flags & SKBMOD_F_DMAC)

View File

@ -2866,7 +2866,7 @@ replay:
break;
case RTM_GETCHAIN:
err = tc_chain_notify(chain, skb, n->nlmsg_seq,
n->nlmsg_seq, n->nlmsg_type, true);
n->nlmsg_flags, n->nlmsg_type, true);
if (err < 0)
NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
break;

View File

@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r,
TCA_TCINDEX_POLICE);
}
static void tcindex_free_perfect_hash(struct tcindex_data *cp);
static void tcindex_partial_destroy_work(struct work_struct *work)
{
struct tcindex_data *p = container_of(to_rcu_work(work),
@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
rwork);
rtnl_lock();
kfree(p->perfect);
if (p->perfect)
tcindex_free_perfect_hash(p);
kfree(p);
rtnl_unlock();
}

View File

@ -866,6 +866,8 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
if (replace) {
list_del_init(&shkey->key_list);
sctp_auth_shkey_release(shkey);
if (asoc && asoc->active_key_id == auth_key->sca_keynumber)
sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL);
}
list_add(&cur_key->key_list, sh_keys);

View File

@ -814,6 +814,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
@ -853,6 +854,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
@ -878,6 +880,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
spin_lock(&p->chip->reg_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
@ -892,6 +895,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p)
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);

View File

@ -1820,6 +1820,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
static const struct snd_pci_quirk force_connect_list[] = {
SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
{}
};

View File

@ -1691,6 +1691,8 @@ static const struct regmap_config rt5631_regmap_config = {
.reg_defaults = rt5631_reg,
.num_reg_defaults = ARRAY_SIZE(rt5631_reg),
.cache_type = REGCACHE_RBTREE,
.use_single_read = true,
.use_single_write = true,
};
static int rt5631_i2c_probe(struct i2c_client *i2c,

View File

@ -3242,7 +3242,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
{
struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list);
static const char * const val_types[] = {
"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32",
[USB_MIXER_BOOLEAN] = "BOOLEAN",
[USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN",
[USB_MIXER_S8] = "S8",
[USB_MIXER_U8] = "U8",
[USB_MIXER_S16] = "S16",
[USB_MIXER_U16] = "U16",
[USB_MIXER_S32] = "S32",
[USB_MIXER_U32] = "U32",
[USB_MIXER_BESPOKEN] = "BESPOKEN",
};
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->head.id,

View File

@ -1840,6 +1840,9 @@ static const struct registration_quirk registration_quirks[] = {
REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */
REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */
REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */
REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */
REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */
REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */
{ 0 } /* terminator */
};

View File

@ -171,6 +171,11 @@ int mount_bpffs_for_pin(const char *name)
int err = 0;
file = malloc(strlen(name) + 1);
if (!file) {
p_err("mem alloc failed");
return -1;
}
strcpy(file, name);
dir = dirname(file);

View File

@ -836,8 +836,10 @@ int cmd_inject(int argc, const char **argv)
data.path = inject.input_name;
inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
if (IS_ERR(inject.session))
return PTR_ERR(inject.session);
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
goto out_close_output;
}
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
@ -874,5 +876,7 @@ int cmd_inject(int argc, const char **argv)
out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
out_close_output:
perf_data__close(&inject.output);
return ret;
}

View File

@ -2474,6 +2474,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
}
}
static void perf_script__exit(struct perf_script *script)
{
perf_thread_map__put(script->threads);
perf_cpu_map__put(script->cpus);
}
static int __cmd_script(struct perf_script *script)
{
int ret;
@ -3893,6 +3899,7 @@ out_delete:
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
perf_script__exit(&script);
if (script_started)
cleanup_scripting();

View File

@ -119,6 +119,6 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
perf_cpu_map__put(evsel->core.own_cpus);
evlist__delete(evlist);
return 0;
}

View File

@ -52,6 +52,7 @@ static int session_write_header(char *path)
TEST_ASSERT_VAL("failed to write header",
!perf_session__write_header(session, session->evlist, data.file.fd, true));
evlist__delete(session->evlist);
perf_session__delete(session);
return 0;

View File

@ -20,7 +20,7 @@
static void close_dir(struct perf_data_file *files, int nr)
{
while (--nr >= 1) {
while (--nr >= 0) {
close(files[nr].fd);
zfree(&files[nr].path);
}

View File

@ -1086,8 +1086,10 @@ struct map *dso__new_map(const char *name)
struct map *map = NULL;
struct dso *dso = dso__new(name);
if (dso)
if (dso) {
map = map__new2(0, dso);
dso__put(dso);
}
return map;
}

View File

@ -175,6 +175,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->cpuid);
zfree(&env->cmdline);
zfree(&env->cmdline_argv);
zfree(&env->sibling_dies);
zfree(&env->sibling_cores);
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);

View File

@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (ferror(infile)) {
pr_err("lzma: read error: %s\n", strerror(errno));
goto err_fclose;
goto err_lzma_end;
}
if (feof(infile))
@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd)
if (writen(output_fd, buf_out, write_size) != write_size) {
pr_err("lzma: write error: %s\n", strerror(errno));
goto err_fclose;
goto err_lzma_end;
}
strm.next_out = buf_out;
@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd)
break;
pr_err("lzma: failed %s\n", lzma_strerror(ret));
goto err_fclose;
goto err_lzma_end;
}
}
err = 0;
err_lzma_end:
lzma_end(&strm);
err_fclose:
fclose(infile);
return err;

Some files were not shown because too many files have changed in this diff Show More