This is the 5.4.41 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl683gYACgkQONu9yGCS
 aT4UrQ/+OWH+sgNXQW2TtBAtDt+b6WCPCwsAe74YdsbqzVf/mxVGVpHKvFJCEXxA
 KDRrBqdICgrjZ+L8Y2MPzNhyD2/nLHwB8M99ARx4B6mvOu4pT0+/xATViGmotqDN
 tzpQ3HvnFLlR/z74/xDanXgXrTAv591hpSQlpUmf6NCiBZNlhndId4qnh/z8Eumn
 wVLseK1r2CY3s3mMZTw6BXmHmj6zGA70Ckuvhp9JmxiKs9fg+pmDlUaRPHex91Xh
 LtSJd7CdpVr5YrMIC9DcQ2TN46KsZZkoo+l/W8jVNVG3ggqWUrHn7wGamwTHafd1
 TkoU7eQt9ps15p7Sj4Z19de30Y1m/g+Qq7L4NrgGcX8bhnCHdgfdbAj40GINOaB2
 WLHRVu3PgEUCbLCSixE5BRLmBTECjWapIiW50fp/jogGmeRiBbJFFnWbVtiEwyme
 KU7ZJRw/sfKNzIN0QioJ/EadK7ZkvIfr/ajinpXdxIA+4gteyKRrNb0323FRG3Ev
 JoStdR2g+dv+yEJYLmsCl3N0eEETzHK8fRJbp0lkSKjEaxW/yDRpIdhREXmWGd2V
 Hprcoiyknae0MEIFFnTvA4Oj7wOYezxP0tQg14nOdtXZX5afry5qP/lryE0kYxiV
 JcI4BrwfWI8hOwdaFd413qp+JG7eKV3RhanhaPimroQJn0WKB9Q=
 =Ipyc
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.41' into 5.4-1.0.0-imx

This is the 5.4.41 stable release

No conflicts recorded

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2020-05-14 19:37:28 +00:00
commit 8849212a93
90 changed files with 647 additions and 345 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 40 SUBLEVEL = 41
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin(); kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);

View File

@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin(); kernel_neon_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon); crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);

View File

@ -202,6 +202,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i;
for (i = 0; i < 16; i++)
*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
}
out: out:
return err; return err;
} }

View File

@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
ptep = (pte_t *)pudp; ptep = (pte_t *)pudp;
} else if (sz == (CONT_PTE_SIZE)) { } else if (sz == (CONT_PTE_SIZE)) {
pmdp = pmd_alloc(mm, pudp, addr); pmdp = pmd_alloc(mm, pudp, addr);
if (!pmdp)
return NULL;
WARN_ON(addr & (sz - 1)); WARN_ON(addr & (sz - 1));
/* /*

View File

@ -116,7 +116,8 @@ void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
set_max_mapnr(PFN_DOWN(mem_size)); set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = max_pfn;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
setup_initrd(); setup_initrd();

View File

@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
* available for the guest are AQIC and TAPQ with the t bit set * available for the guest are AQIC and TAPQ with the t bit set
* since we do not set IC.3 (FIII) we currently will only intercept * since we do not set IC.3 (FIII) we currently will only intercept
* the AQIC function code. * the AQIC function code.
* Note: running nested under z/VM can result in intercepts for other
* function codes, e.g. PQAP(QCI). We do not support this and bail out.
*/ */
reg0 = vcpu->run->s.regs.gprs[0]; reg0 = vcpu->run->s.regs.gprs[0];
fc = (reg0 >> 24) & 0xff; fc = (reg0 >> 24) & 0xff;
if (WARN_ON_ONCE(fc != 0x03)) if (fc != 0x03)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* PQAP instruction is allowed for guest kernel only */ /* PQAP instruction is allowed for guest kernel only */

View File

@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2); crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);

View File

@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_fpu_begin(); kernel_fpu_begin();
crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2); crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);

View File

@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8 #define SIZEOF_PTREGS 21*8
.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
/*
* Push registers and sanitize registers of values that a
* speculation attack might otherwise want to exploit. The
* lower registers are likely clobbered well before they
* could be put to use in a speculative execution gadget.
* Interleave XOR with PUSH for better uop scheduling:
*/
.if \save_ret .if \save_ret
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
pushq %rsi /* pt_regs->si */ pushq %rsi /* pt_regs->si */
.endif .endif
pushq \rdx /* pt_regs->dx */ pushq \rdx /* pt_regs->dx */
xorl %edx, %edx /* nospec dx */
pushq %rcx /* pt_regs->cx */ pushq %rcx /* pt_regs->cx */
xorl %ecx, %ecx /* nospec cx */
pushq \rax /* pt_regs->ax */ pushq \rax /* pt_regs->ax */
pushq %r8 /* pt_regs->r8 */ pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
pushq %r9 /* pt_regs->r9 */ pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
pushq %r10 /* pt_regs->r10 */ pushq %r10 /* pt_regs->r10 */
xorl %r10d, %r10d /* nospec r10 */
pushq %r11 /* pt_regs->r11 */ pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11*/
pushq %rbx /* pt_regs->rbx */ pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx*/
pushq %rbp /* pt_regs->rbp */ pushq %rbp /* pt_regs->rbp */
xorl %ebp, %ebp /* nospec rbp*/
pushq %r12 /* pt_regs->r12 */ pushq %r12 /* pt_regs->r12 */
xorl %r12d, %r12d /* nospec r12*/
pushq %r13 /* pt_regs->r13 */ pushq %r13 /* pt_regs->r13 */
xorl %r13d, %r13d /* nospec r13*/
pushq %r14 /* pt_regs->r14 */ pushq %r14 /* pt_regs->r14 */
xorl %r14d, %r14d /* nospec r14*/
pushq %r15 /* pt_regs->r15 */ pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15*/
UNWIND_HINT_REGS UNWIND_HINT_REGS
.if \save_ret .if \save_ret
pushq %rsi /* return address on top of stack */ pushq %rsi /* return address on top of stack */
.endif .endif
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
* well before they could be put to use in a speculative execution
* gadget.
*/
xorl %edx, %edx /* nospec dx */
xorl %ecx, %ecx /* nospec cx */
xorl %r8d, %r8d /* nospec r8 */
xorl %r9d, %r9d /* nospec r9 */
xorl %r10d, %r10d /* nospec r10 */
xorl %r11d, %r11d /* nospec r11 */
xorl %ebx, %ebx /* nospec rbx */
xorl %ebp, %ebp /* nospec rbp */
xorl %r12d, %r12d /* nospec r12 */
xorl %r13d, %r13d /* nospec r13 */
xorl %r14d, %r14d /* nospec r14 */
xorl %r15d, %r15d /* nospec r15 */
.endm .endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0 .macro POP_REGS pop_rdi=1 skip_r11rcx=0

View File

@ -249,7 +249,6 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
*/ */
syscall_return_via_sysret: syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */ /* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
POP_REGS pop_rdi=0 skip_r11rcx=1 POP_REGS pop_rdi=0 skip_r11rcx=1
/* /*
@ -258,6 +257,7 @@ syscall_return_via_sysret:
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
pushq RSP-RDI(%rdi) /* RSP */ pushq RSP-RDI(%rdi) /* RSP */
pushq (%rdi) /* RDI */ pushq (%rdi) /* RDI */
@ -512,7 +512,7 @@ END(spurious_entries_start)
* +----------------------------------------------------+ * +----------------------------------------------------+
*/ */
ENTRY(interrupt_entry) ENTRY(interrupt_entry)
UNWIND_HINT_FUNC UNWIND_HINT_IRET_REGS offset=16
ASM_CLAC ASM_CLAC
cld cld
@ -544,9 +544,9 @@ ENTRY(interrupt_entry)
pushq 5*8(%rdi) /* regs->eflags */ pushq 5*8(%rdi) /* regs->eflags */
pushq 4*8(%rdi) /* regs->cs */ pushq 4*8(%rdi) /* regs->cs */
pushq 3*8(%rdi) /* regs->ip */ pushq 3*8(%rdi) /* regs->ip */
UNWIND_HINT_IRET_REGS
pushq 2*8(%rdi) /* regs->orig_ax */ pushq 2*8(%rdi) /* regs->orig_ax */
pushq 8(%rdi) /* return address */ pushq 8(%rdi) /* return address */
UNWIND_HINT_FUNC
movq (%rdi), %rdi movq (%rdi), %rdi
jmp 2f jmp 2f
@ -637,6 +637,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
*/ */
movq %rsp, %rdi movq %rsp, %rdi
movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
UNWIND_HINT_EMPTY
/* Copy the IRET frame to the trampoline stack. */ /* Copy the IRET frame to the trampoline stack. */
pushq 6*8(%rdi) /* SS */ pushq 6*8(%rdi) /* SS */
@ -1739,7 +1740,7 @@ ENTRY(rewind_stack_do_exit)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE UNWIND_HINT_REGS
call do_exit call do_exit
END(rewind_stack_do_exit) END(rewind_stack_do_exit)

View File

@ -1608,8 +1608,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq) static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{ {
/* We can only post Fixed and LowPrio IRQs */ /* We can only post Fixed and LowPrio IRQs */
return (irq->delivery_mode == dest_Fixed || return (irq->delivery_mode == APIC_DM_FIXED ||
irq->delivery_mode == dest_LowestPrio); irq->delivery_mode == APIC_DM_LOWEST);
} }
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)

View File

@ -19,7 +19,7 @@ struct unwind_state {
#if defined(CONFIG_UNWINDER_ORC) #if defined(CONFIG_UNWINDER_ORC)
bool signal, full_regs; bool signal, full_regs;
unsigned long sp, bp, ip; unsigned long sp, bp, ip;
struct pt_regs *regs; struct pt_regs *regs, *prev_regs;
#elif defined(CONFIG_UNWINDER_FRAME_POINTER) #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
bool got_irq; bool got_irq;
unsigned long *bp, *orig_sp, ip; unsigned long *bp, *orig_sp, ip;

View File

@ -142,9 +142,6 @@ static struct orc_entry *orc_find(unsigned long ip)
{ {
static struct orc_entry *orc; static struct orc_entry *orc;
if (!orc_init)
return NULL;
if (ip == 0) if (ip == 0)
return &null_orc_entry; return &null_orc_entry;
@ -378,9 +375,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
return true; return true;
} }
/*
* If state->regs is non-NULL, and points to a full pt_regs, just get the reg
* value from state->regs.
*
* Otherwise, if state->regs just points to IRET regs, and the previous frame
* had full regs, it's safe to get the value from the previous regs. This can
* happen when early/late IRQ entry code gets interrupted by an NMI.
*/
static bool get_reg(struct unwind_state *state, unsigned int reg_off,
unsigned long *val)
{
unsigned int reg = reg_off/8;
if (!state->regs)
return false;
if (state->full_regs) {
*val = ((unsigned long *)state->regs)[reg];
return true;
}
if (state->prev_regs) {
*val = ((unsigned long *)state->prev_regs)[reg];
return true;
}
return false;
}
bool unwind_next_frame(struct unwind_state *state) bool unwind_next_frame(struct unwind_state *state)
{ {
unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
enum stack_type prev_type = state->stack_info.type; enum stack_type prev_type = state->stack_info.type;
struct orc_entry *orc; struct orc_entry *orc;
bool indirect = false; bool indirect = false;
@ -442,39 +468,35 @@ bool unwind_next_frame(struct unwind_state *state)
break; break;
case ORC_REG_R10: case ORC_REG_R10:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
orc_warn("missing regs for base reg R10 at ip %pB\n", orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->r10;
break; break;
case ORC_REG_R13: case ORC_REG_R13:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
orc_warn("missing regs for base reg R13 at ip %pB\n", orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->r13;
break; break;
case ORC_REG_DI: case ORC_REG_DI:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
orc_warn("missing regs for base reg DI at ip %pB\n", orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->di;
break; break;
case ORC_REG_DX: case ORC_REG_DX:
if (!state->regs || !state->full_regs) { if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
orc_warn("missing regs for base reg DX at ip %pB\n", orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip); (void *)state->ip);
goto err; goto err;
} }
sp = state->regs->dx;
break; break;
default: default:
@ -501,6 +523,7 @@ bool unwind_next_frame(struct unwind_state *state)
state->sp = sp; state->sp = sp;
state->regs = NULL; state->regs = NULL;
state->prev_regs = NULL;
state->signal = false; state->signal = false;
break; break;
@ -512,6 +535,7 @@ bool unwind_next_frame(struct unwind_state *state)
} }
state->regs = (struct pt_regs *)sp; state->regs = (struct pt_regs *)sp;
state->prev_regs = NULL;
state->full_regs = true; state->full_regs = true;
state->signal = true; state->signal = true;
break; break;
@ -523,6 +547,8 @@ bool unwind_next_frame(struct unwind_state *state)
goto err; goto err;
} }
if (state->full_regs)
state->prev_regs = state->regs;
state->regs = (void *)sp - IRET_FRAME_OFFSET; state->regs = (void *)sp - IRET_FRAME_OFFSET;
state->full_regs = false; state->full_regs = false;
state->signal = true; state->signal = true;
@ -531,14 +557,14 @@ bool unwind_next_frame(struct unwind_state *state)
default: default:
orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
orc->type, (void *)orig_ip); orc->type, (void *)orig_ip);
break; goto err;
} }
/* Find BP: */ /* Find BP: */
switch (orc->bp_reg) { switch (orc->bp_reg) {
case ORC_REG_UNDEFINED: case ORC_REG_UNDEFINED:
if (state->regs && state->full_regs) if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
state->bp = state->regs->bp; state->bp = tmp;
break; break;
case ORC_REG_PREV_SP: case ORC_REG_PREV_SP:
@ -582,6 +608,9 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task, void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long *first_frame) struct pt_regs *regs, unsigned long *first_frame)
{ {
if (!orc_init)
goto done;
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
state->task = task; state->task = task;
@ -648,7 +677,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */ /* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) && while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) || (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
state->sp <= (unsigned long)first_frame)) state->sp < (unsigned long)first_frame))
unwind_next_frame(state); unwind_next_frame(state);
return; return;

View File

@ -1861,7 +1861,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return NULL; return NULL;
/* Pin the user virtual address. */ /* Pin the user virtual address. */
npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
if (npinned != npages) { if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages); pr_err("SEV: Failure locking %lu pages.\n", npages);
goto err; goto err;

View File

@ -86,6 +86,9 @@ ENTRY(vmx_vmexit)
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
/* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
or $1, %_ASM_AX
pop %_ASM_AX pop %_ASM_AX
.Lvmexit_skip_rsb: .Lvmexit_skip_rsb:
#endif #endif

View File

@ -469,7 +469,7 @@ struct ioc_gq {
*/ */
atomic64_t vtime; atomic64_t vtime;
atomic64_t done_vtime; atomic64_t done_vtime;
atomic64_t abs_vdebt; u64 abs_vdebt;
u64 last_vtime; u64 last_vtime;
/* /*
@ -1145,7 +1145,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
struct iocg_wake_ctx ctx = { .iocg = iocg }; struct iocg_wake_ctx ctx = { .iocg = iocg };
u64 margin_ns = (u64)(ioc->period_us * u64 margin_ns = (u64)(ioc->period_us *
WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
u64 abs_vdebt, vdebt, vshortage, expires, oexpires; u64 vdebt, vshortage, expires, oexpires;
s64 vbudget; s64 vbudget;
u32 hw_inuse; u32 hw_inuse;
@ -1155,18 +1155,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
vbudget = now->vnow - atomic64_read(&iocg->vtime); vbudget = now->vnow - atomic64_read(&iocg->vtime);
/* pay off debt */ /* pay off debt */
abs_vdebt = atomic64_read(&iocg->abs_vdebt); vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
if (vdebt && vbudget > 0) { if (vdebt && vbudget > 0) {
u64 delta = min_t(u64, vbudget, vdebt); u64 delta = min_t(u64, vbudget, vdebt);
u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
abs_vdebt); iocg->abs_vdebt);
atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->vtime);
atomic64_add(delta, &iocg->done_vtime); atomic64_add(delta, &iocg->done_vtime);
atomic64_sub(abs_delta, &iocg->abs_vdebt); iocg->abs_vdebt -= abs_delta;
if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
atomic64_set(&iocg->abs_vdebt, 0);
} }
/* /*
@ -1222,12 +1219,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
u64 expires, oexpires; u64 expires, oexpires;
u32 hw_inuse; u32 hw_inuse;
lockdep_assert_held(&iocg->waitq.lock);
/* debt-adjust vtime */ /* debt-adjust vtime */
current_hweight(iocg, NULL, &hw_inuse); current_hweight(iocg, NULL, &hw_inuse);
vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse); vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
/* clear or maintain depending on the overage */ /*
if (time_before_eq64(vtime, now->vnow)) { * Clear or maintain depending on the overage. Non-zero vdebt is what
* guarantees that @iocg is online and future iocg_kick_delay() will
* clear use_delay. Don't leave it on when there's no vdebt.
*/
if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
blkcg_clear_delay(blkg); blkcg_clear_delay(blkg);
return false; return false;
} }
@ -1261,9 +1264,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
{ {
struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
struct ioc_now now; struct ioc_now now;
unsigned long flags;
spin_lock_irqsave(&iocg->waitq.lock, flags);
ioc_now(iocg->ioc, &now); ioc_now(iocg->ioc, &now);
iocg_kick_delay(iocg, &now, 0); iocg_kick_delay(iocg, &now, 0);
spin_unlock_irqrestore(&iocg->waitq.lock, flags);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
@ -1371,14 +1377,13 @@ static void ioc_timer_fn(struct timer_list *timer)
* should have woken up in the last period and expire idle iocgs. * should have woken up in the last period and expire idle iocgs.
*/ */
list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
if (!waitqueue_active(&iocg->waitq) && if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
!atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg)) !iocg_is_idle(iocg))
continue; continue;
spin_lock(&iocg->waitq.lock); spin_lock(&iocg->waitq.lock);
if (waitqueue_active(&iocg->waitq) || if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
atomic64_read(&iocg->abs_vdebt)) {
/* might be oversleeping vtime / hweight changes, kick */ /* might be oversleeping vtime / hweight changes, kick */
iocg_kick_waitq(iocg, &now); iocg_kick_waitq(iocg, &now);
iocg_kick_delay(iocg, &now, 0); iocg_kick_delay(iocg, &now, 0);
@ -1721,28 +1726,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
* tests are racy but the races aren't systemic - we only miss once * tests are racy but the races aren't systemic - we only miss once
* in a while which is fine. * in a while which is fine.
*/ */
if (!waitqueue_active(&iocg->waitq) && if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
!atomic64_read(&iocg->abs_vdebt) &&
time_before_eq64(vtime + cost, now.vnow)) { time_before_eq64(vtime + cost, now.vnow)) {
iocg_commit_bio(iocg, bio, cost); iocg_commit_bio(iocg, bio, cost);
return; return;
} }
/* /*
* We're over budget. If @bio has to be issued regardless, * We activated above but w/o any synchronization. Deactivation is
* remember the abs_cost instead of advancing vtime. * synchronized with waitq.lock and we won't get deactivated as long
* iocg_kick_waitq() will pay off the debt before waking more IOs. * as we're waiting or has debt, so we're good if we're activated
* here. In the unlikely case that we aren't, just issue the IO.
*/
spin_lock_irq(&iocg->waitq.lock);
if (unlikely(list_empty(&iocg->active_list))) {
spin_unlock_irq(&iocg->waitq.lock);
iocg_commit_bio(iocg, bio, cost);
return;
}
/*
* We're over budget. If @bio has to be issued regardless, remember
* the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
* off the debt before waking more IOs.
*
* This way, the debt is continuously paid off each period with the * This way, the debt is continuously paid off each period with the
* actual budget available to the cgroup. If we just wound vtime, * actual budget available to the cgroup. If we just wound vtime, we
* we would incorrectly use the current hw_inuse for the entire * would incorrectly use the current hw_inuse for the entire amount
* amount which, for example, can lead to the cgroup staying * which, for example, can lead to the cgroup staying blocked for a
* blocked for a long time even with substantially raised hw_inuse. * long time even with substantially raised hw_inuse.
*
* An iocg with vdebt should stay online so that the timer can keep
* deducting its vdebt and [de]activate use_delay mechanism
* accordingly. We don't want to race against the timer trying to
* clear them and leave @iocg inactive w/ dangling use_delay heavily
* penalizing the cgroup and its descendants.
*/ */
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt); iocg->abs_vdebt += abs_cost;
if (iocg_kick_delay(iocg, &now, cost)) if (iocg_kick_delay(iocg, &now, cost))
blkcg_schedule_throttle(rqos->q, blkcg_schedule_throttle(rqos->q,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
spin_unlock_irq(&iocg->waitq.lock);
return; return;
} }
@ -1759,20 +1785,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
* All waiters are on iocg->waitq and the wait states are * All waiters are on iocg->waitq and the wait states are
* synchronized using waitq.lock. * synchronized using waitq.lock.
*/ */
spin_lock_irq(&iocg->waitq.lock);
/*
* We activated above but w/o any synchronization. Deactivation is
* synchronized with waitq.lock and we won't get deactivated as
* long as we're waiting, so we're good if we're activated here.
* In the unlikely case that we are deactivated, just issue the IO.
*/
if (unlikely(list_empty(&iocg->active_list))) {
spin_unlock_irq(&iocg->waitq.lock);
iocg_commit_bio(iocg, bio, cost);
return;
}
init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
wait.wait.private = current; wait.wait.private = current;
wait.bio = bio; wait.bio = bio;
@ -1804,6 +1816,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
struct ioc_now now; struct ioc_now now;
u32 hw_inuse; u32 hw_inuse;
u64 abs_cost, cost; u64 abs_cost, cost;
unsigned long flags;
/* bypass if disabled or for root cgroup */ /* bypass if disabled or for root cgroup */
if (!ioc->enabled || !iocg->level) if (!ioc->enabled || !iocg->level)
@ -1823,15 +1836,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
iocg->cursor = bio_end; iocg->cursor = bio_end;
/* /*
* Charge if there's enough vtime budget and the existing request * Charge if there's enough vtime budget and the existing request has
* has cost assigned. Otherwise, account it as debt. See debt * cost assigned.
* handling in ioc_rqos_throttle() for details.
*/ */
if (rq->bio && rq->bio->bi_iocost_cost && if (rq->bio && rq->bio->bi_iocost_cost &&
time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
iocg_commit_bio(iocg, bio, cost); iocg_commit_bio(iocg, bio, cost);
else return;
atomic64_add(abs_cost, &iocg->abs_vdebt); }
/*
* Otherwise, account it as debt if @iocg is online, which it should
* be for the vast majority of cases. See debt handling in
* ioc_rqos_throttle() for details.
*/
spin_lock_irqsave(&iocg->waitq.lock, flags);
if (likely(!list_empty(&iocg->active_list))) {
iocg->abs_vdebt += abs_cost;
iocg_kick_delay(iocg, &now, cost);
} else {
iocg_commit_bio(iocg, bio, cost);
}
spin_unlock_irqrestore(&iocg->waitq.lock, flags);
} }
static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
@ -2001,7 +2027,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
iocg->ioc = ioc; iocg->ioc = ioc;
atomic64_set(&iocg->vtime, now.vnow); atomic64_set(&iocg->vtime, now.vnow);
atomic64_set(&iocg->done_vtime, now.vnow); atomic64_set(&iocg->done_vtime, now.vnow);
atomic64_set(&iocg->abs_vdebt, 0);
atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
INIT_LIST_HEAD(&iocg->active_list); INIT_LIST_HEAD(&iocg->active_list);
iocg->hweight_active = HWEIGHT_WHOLE; iocg->hweight_active = HWEIGHT_WHOLE;

View File

@ -3070,15 +3070,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
} }
} }
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_suspend(adev);
amdgpu_ras_suspend(adev); amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev); r = amdgpu_device_ip_suspend_phase1(adev);
amdgpu_amdkfd_suspend(adev);
/* evict vram memory */ /* evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_bo_evict_vram(adev);

View File

@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info }, { .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
{ /* sentinel */ }, { /* sentinel */ },
}; };
MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = { static struct platform_driver ingenic_drm_driver = {
.driver = { .driver = {

View File

@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
int res; int res;
mutex_lock(&usbhid->mutex);
set_bit(HID_OPENED, &usbhid->iofl); set_bit(HID_OPENED, &usbhid->iofl);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
return 0; res = 0;
goto Done;
}
res = usb_autopm_get_interface(usbhid->intf); res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */ /* the device must be awake to reliably request remote wakeup */
if (res < 0) { if (res < 0) {
clear_bit(HID_OPENED, &usbhid->iofl); clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO; res = -EIO;
goto Done;
} }
usbhid->intf->needs_remote_wakeup = 1; usbhid->intf->needs_remote_wakeup = 1;
@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
msleep(50); msleep(50);
clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
Done:
mutex_unlock(&usbhid->mutex);
return res; return res;
} }
@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
{ {
struct usbhid_device *usbhid = hid->driver_data; struct usbhid_device *usbhid = hid->driver_data;
mutex_lock(&usbhid->mutex);
/* /*
* Make sure we don't restart data acquisition due to * Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing * a resumption we no longer care about by avoiding racing
@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
clear_bit(HID_IN_POLLING, &usbhid->iofl); clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock); spin_unlock_irq(&usbhid->lock);
if (hid->quirks & HID_QUIRK_ALWAYS_POLL) if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
return; hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
}
hid_cancel_delayed_stuff(usbhid); mutex_unlock(&usbhid->mutex);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
} }
/* /*
@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
unsigned int n, insize = 0; unsigned int n, insize = 0;
int ret; int ret;
mutex_lock(&usbhid->mutex);
clear_bit(HID_DISCONNECTED, &usbhid->iofl); clear_bit(HID_DISCONNECTED, &usbhid->iofl);
usbhid->bufsize = HID_MIN_BUFFER_SIZE; usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
usbhid_set_leds(hid); usbhid_set_leds(hid);
device_set_wakeup_enable(&dev->dev, 1); device_set_wakeup_enable(&dev->dev, 1);
} }
mutex_unlock(&usbhid->mutex);
return 0; return 0;
fail: fail:
@ -1187,6 +1202,7 @@ fail:
usbhid->urbout = NULL; usbhid->urbout = NULL;
usbhid->urbctrl = NULL; usbhid->urbctrl = NULL;
hid_free_buffers(dev, hid); hid_free_buffers(dev, hid);
mutex_unlock(&usbhid->mutex);
return ret; return ret;
} }
@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->intf->needs_remote_wakeup = 0; usbhid->intf->needs_remote_wakeup = 0;
} }
mutex_lock(&usbhid->mutex);
clear_bit(HID_STARTED, &usbhid->iofl); clear_bit(HID_STARTED, &usbhid->iofl);
spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl); set_bit(HID_DISCONNECTED, &usbhid->iofl);
@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
usbhid->urbout = NULL; usbhid->urbout = NULL;
hid_free_buffers(hid_to_usb_dev(hid), hid); hid_free_buffers(hid_to_usb_dev(hid), hid);
mutex_unlock(&usbhid->mutex);
} }
static int usbhid_power(struct hid_device *hid, int lvl) static int usbhid_power(struct hid_device *hid, int lvl)
@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
INIT_WORK(&usbhid->reset_work, hid_reset); INIT_WORK(&usbhid->reset_work, hid_reset);
timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock); spin_lock_init(&usbhid->lock);
mutex_init(&usbhid->mutex);
ret = hid_add_device(hid); ret = hid_add_device(hid);
if (ret) { if (ret) {

View File

@ -80,6 +80,7 @@ struct usbhid_device {
dma_addr_t outbuf_dma; /* Output buffer dma */ dma_addr_t outbuf_dma; /* Output buffer dma */
unsigned long last_out; /* record of last output for timeouts */ unsigned long last_out; /* record of last output for timeouts */
struct mutex mutex; /* start/stop/open/close */
spinlock_t lock; /* fifo spinlock */ spinlock_t lock; /* fifo spinlock */
unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
struct timer_list io_retry; /* Retry timer */ struct timer_list io_retry; /* Retry timer */

View File

@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
data[0] = field->report->id; data[0] = field->report->id;
ret = wacom_get_report(hdev, HID_FEATURE_REPORT, ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
data, n, WAC_CMD_RETRIES); data, n, WAC_CMD_RETRIES);
if (ret == n) { if (ret == n && features->type == HID_GENERIC) {
ret = hid_report_raw_event(hdev, ret = hid_report_raw_event(hdev,
HID_FEATURE_REPORT, data, n, 0); HID_FEATURE_REPORT, data, n, 0);
} else if (ret == 2 && features->type != HID_GENERIC) {
features->touch_max = data[1];
} else { } else {
features->touch_max = 16; features->touch_max = 16;
hid_warn(hdev, "wacom_feature_mapping: " hid_warn(hdev, "wacom_feature_mapping: "

View File

@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
{ {
struct input_dev *pad_input = wacom->pad_input; struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data; unsigned char *data = wacom->data;
int nbuttons = wacom->features.numbered_buttons;
int buttons = data[282] | ((data[281] & 0x40) << 2); int expresskeys = data[282];
int center = (data[281] & 0x40) >> 6;
int ring = data[285] & 0x7F; int ring = data[285] & 0x7F;
bool ringstatus = data[285] & 0x80; bool ringstatus = data[285] & 0x80;
bool prox = buttons || ringstatus; bool prox = expresskeys || center || ringstatus;
/* Fix touchring data: userspace expects 0 at left and increasing clockwise */ /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
ring = 71 - ring; ring = 71 - ring;
@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
if (ring > 71) if (ring > 71)
ring -= 72; ring -= 72;
wacom_report_numbered_buttons(pad_input, 9, buttons); wacom_report_numbered_buttons(pad_input, nbuttons,
expresskeys | (center << (nbuttons - 1)));
input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
case HID_DG_TIPSWITCH: case HID_DG_TIPSWITCH:
hid_data->last_slot_field = equivalent_usage; hid_data->last_slot_field = equivalent_usage;
break; break;
case HID_DG_CONTACTCOUNT:
hid_data->cc_report = report->id;
hid_data->cc_index = i;
hid_data->cc_value_index = j;
break;
} }
} }
} }
if (hid_data->cc_report != 0 &&
hid_data->cc_index >= 0) {
struct hid_field *field = report->field[hid_data->cc_index];
int value = field->value[hid_data->cc_value_index];
if (value)
hid_data->num_expected = value;
}
else {
hid_data->num_expected = wacom_wac->features.touch_max;
}
} }
static void wacom_wac_finger_report(struct hid_device *hdev, static void wacom_wac_finger_report(struct hid_device *hdev,
@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input; struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max; unsigned touch_max = wacom_wac->features.touch_max;
struct hid_data *hid_data = &wacom_wac->hid_data;
/* If more packets of data are expected, give us a chance to /* If more packets of data are expected, give us a chance to
* process them rather than immediately syncing a partial * process them rather than immediately syncing a partial
@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
input_sync(input); input_sync(input);
wacom_wac->hid_data.num_received = 0; wacom_wac->hid_data.num_received = 0;
hid_data->num_expected = 0;
/* keep touch state for pen event */ /* keep touch state for pen event */
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
} }
} }
static void wacom_set_num_expected(struct hid_device *hdev,
struct hid_report *report,
int collection_index,
struct hid_field *field,
int field_index)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct hid_data *hid_data = &wacom_wac->hid_data;
unsigned int original_collection_level =
hdev->collection[collection_index].level;
bool end_collection = false;
int i;
if (hid_data->num_expected)
return;
// find the contact count value for this segment
for (i = field_index; i < report->maxfield && !end_collection; i++) {
struct hid_field *field = report->field[i];
unsigned int field_level =
hdev->collection[field->usage[0].collection_index].level;
unsigned int j;
if (field_level != original_collection_level)
continue;
for (j = 0; j < field->maxusage; j++) {
struct hid_usage *usage = &field->usage[j];
if (usage->collection_index != collection_index) {
end_collection = true;
break;
}
if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
hid_data->cc_report = report->id;
hid_data->cc_index = i;
hid_data->cc_value_index = j;
if (hid_data->cc_report != 0 &&
hid_data->cc_index >= 0) {
struct hid_field *field =
report->field[hid_data->cc_index];
int value =
field->value[hid_data->cc_value_index];
if (value)
hid_data->num_expected = value;
}
}
}
}
if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
hid_data->num_expected = wacom_wac->features.touch_max;
}
static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
int collection_index, struct hid_field *field, int collection_index, struct hid_field *field,
int field_index) int field_index)
{ {
struct wacom *wacom = hid_get_drvdata(hdev); struct wacom *wacom = hid_get_drvdata(hdev);
if (WACOM_FINGER_FIELD(field))
wacom_set_num_expected(hdev, report, collection_index, field,
field_index);
wacom_report_events(hdev, report, collection_index, field_index); wacom_report_events(hdev, report, collection_index, field_index);
/* /*

View File

@ -454,7 +454,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
if (!region) if (!region)
return -ENOMEM; return -ENOMEM;
list_add(&vdev->resv_regions, &region->list); list_add(&region->list, &vdev->resv_regions);
return 0; return 0;
} }

View File

@ -6649,7 +6649,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
int rc; int rc;
if (!mem_size) if (!mem_size)
return 0; return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@ -9755,6 +9755,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE; features &= ~NETIF_F_NTUPLE;
@ -9771,12 +9772,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
/* Both CTAG and STAG VLAN accelaration on the RX side have to be /* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together. * turned on or off together.
*/ */
if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { NETIF_F_HW_VLAN_STAG_RX);
if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX | features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX); NETIF_F_HW_VLAN_STAG_RX);
else else if (vlan_features)
features |= NETIF_F_HW_VLAN_CTAG_RX | features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX; NETIF_F_HW_VLAN_STAG_RX;
} }
@ -12066,12 +12069,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} }
} }
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) if (result != PCI_ERS_RESULT_RECOVERED) {
dev_close(netdev); if (netif_running(netdev))
dev_close(netdev);
pci_disable_device(pdev);
}
rtnl_unlock(); rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED; return result;
} }
/** /**

View File

@ -1058,7 +1058,6 @@ struct bnxt_vf_info {
#define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8 #define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10 #define BNXT_VF_TRUST 0x10
u32 func_flags; /* func cfg flags */
u32 min_tx_rate; u32 min_tx_rate;
u32 max_tx_rate; u32 max_tx_rate;
void *hwrm_cmd_req_addr; void *hwrm_cmd_req_addr;

View File

@ -39,7 +39,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401 #define NVM_OFF_ENABLE_SRIOV 401
#define BNXT_MSIX_VEC_MAX 1280 #define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128 #define BNXT_MSIX_VEC_MIN_MAX 128
enum bnxt_nvm_dir_type { enum bnxt_nvm_dir_type {

View File

@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
if (old_setting == setting) if (old_setting == setting)
return 0; return 0;
func_flags = vf->func_flags;
if (setting) if (setting)
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else else
func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN, /*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing * the spoof check should also include vlan anti-spoofing
*/ */
@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
req.flags = cpu_to_le32(func_flags); req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
vf->func_flags = func_flags;
if (setting) if (setting)
vf->flags |= BNXT_VF_SPOOFCHK; vf->flags |= BNXT_VF_SPOOFCHK;
else else
@ -230,7 +228,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN); memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN); memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@ -268,7 +265,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag); req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@ -307,7 +303,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return 0; return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate); req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@ -479,7 +474,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
vf = &bp->pf.vf[vf_id]; vf = &bp->pf.vf[vf_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
if (is_valid_ether_addr(vf->mac_addr)) { if (is_valid_ether_addr(vf->mac_addr)) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);

View File

@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
int status; int status;
status = pm_runtime_get_sync(&bp->pdev->dev); status = pm_runtime_get_sync(&bp->pdev->dev);
if (status < 0) if (status < 0) {
pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit; goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp); status = macb_mdio_wait_for_idle(bp);
if (status < 0) if (status < 0)
@ -367,8 +369,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
int status; int status;
status = pm_runtime_get_sync(&bp->pdev->dev); status = pm_runtime_get_sync(&bp->pdev->dev);
if (status < 0) if (status < 0) {
pm_runtime_put_noidle(&bp->pdev->dev);
goto mdio_pm_exit; goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp); status = macb_mdio_wait_for_idle(bp);
if (status < 0) if (status < 0)
@ -3691,8 +3695,10 @@ static int at91ether_open(struct net_device *dev)
int ret; int ret;
ret = pm_runtime_get_sync(&lp->pdev->dev); ret = pm_runtime_get_sync(&lp->pdev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(&lp->pdev->dev);
return ret; return ret;
}
/* Clear internal statistics */ /* Clear internal statistics */
ctl = macb_readl(lp, NCR); ctl = macb_readl(lp, NCR);
@ -4048,15 +4054,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
static int fu540_c000_init(struct platform_device *pdev) static int fu540_c000_init(struct platform_device *pdev)
{ {
struct resource *res; mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mgmt->reg))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); return PTR_ERR(mgmt->reg);
if (!res)
return -ENODEV;
mgmt->reg = ioremap(res->start, resource_size(res));
if (!mgmt->reg)
return -ENOMEM;
return macb_init(pdev); return macb_init(pdev);
} }

View File

@ -1422,6 +1422,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
struct mvpp2_ethtool_fs *efs; struct mvpp2_ethtool_fs *efs;
int ret; int ret;
if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
return -EINVAL;
efs = port->rfs_rules[info->fs.location]; efs = port->rfs_rules[info->fs.location];
if (!efs) if (!efs)
return -EINVAL; return -EINVAL;

View File

@ -4319,6 +4319,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
if (!mvpp22_rss_is_supported()) if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
if (hfunc) if (hfunc)
*hfunc = ETH_RSS_HASH_CRC32; *hfunc = ETH_RSS_HASH_CRC32;

View File

@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
if (!err || err == -ENOSPC) { if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx; priv->def_counter[port] = idx;
err = 0;
} else if (err == -ENOENT) { } else if (err == -ENOENT) {
err = 0; err = 0;
continue; continue;
@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err) if (!err)
*idx = get_param_l(&out_param); *idx = get_param_l(&out_param);
if (WARN_ON(err == -ENOSPC))
err = -EINVAL;
return err; return err;
} }
return __mlx4_counter_alloc(dev, idx); return __mlx4_counter_alloc(dev, idx);

View File

@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
} }
cmd->ent_arr[ent->idx] = ent; cmd->ent_arr[ent->idx] = ent;
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
lay = get_inst(cmd, ent->idx); lay = get_inst(cmd, ent->idx);
ent->lay = lay; ent->lay = lay;
memset(lay, 0, sizeof(*lay)); memset(lay, 0, sizeof(*lay));
@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
if (ent->callback) if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
/* Skip sending command to fw if internal error */ /* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) || if (pci_channel_offline(dev->pdev) ||
@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
/* no doorbell, no need to keep the entry */
free_ent(cmd, ent->idx);
if (ent->callback)
free_cmd(ent);
return; return;
} }

View File

@ -689,6 +689,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
} }
static void dr_cq_complete(struct mlx5_core_cq *mcq,
struct mlx5_eqe *eqe)
{
pr_err("CQ completion CQ: #%u\n", mcq->cqn);
}
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar, struct mlx5_uars_page *uar,
size_t ncqe) size_t ncqe)
@ -750,6 +756,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
cq->mcq.event = dr_cq_event; cq->mcq.event = dr_cq_event;
cq->mcq.comp = dr_cq_complete;
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in); kvfree(in);
@ -761,7 +768,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.set_ci_db = cq->wq_ctrl.db.db; cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
*cq->mcq.set_ci_db = 0; *cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
/* set no-zero value, in order to avoid the HW to run db-recovery on
* CQ that used in polling mode.
*/
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0; cq->mcq.vector = 0;
cq->mcq.irqn = irqn; cq->mcq.irqn = irqn;
cq->mcq.uar = uar; cq->mcq.uar = uar;

View File

@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
unsigned int priority, unsigned int priority,
struct mlxsw_afk_element_usage *elusage) struct mlxsw_afk_element_usage *elusage)
{ {
struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
struct mlxsw_sp_acl_tcam_vregion *vregion; struct mlxsw_sp_acl_tcam_vregion *vregion;
struct mlxsw_sp_acl_tcam_vchunk *vchunk; struct list_head *pos;
int err; int err;
if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
} }
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
list_add_tail(&vchunk->list, &vregion->vchunk_list);
/* Position the vchunk inside the list according to priority */
list_for_each(pos, &vregion->vchunk_list) {
vchunk2 = list_entry(pos, typeof(*vchunk2), list);
if (vchunk2->priority > priority)
break;
}
list_add_tail(&vchunk->list, pos);
mutex_unlock(&vregion->lock); mutex_unlock(&vregion->lock);
return vchunk; return vchunk;

View File

@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
if (!nfp_nsp_has_hwinfo_lookup(nsp)) { if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
eth_hw_addr_random(nn->dp.netdev); eth_hw_addr_random(nn->dp.netdev);
nfp_nsp_close(nsp);
return; return;
} }

View File

@ -644,7 +644,7 @@ static int tc_mii_probe(struct net_device *dev)
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
} }
linkmode_and(phydev->supported, phydev->supported, mask); linkmode_andnot(phydev->supported, phydev->supported, mask);
linkmode_copy(phydev->advertising, phydev->supported); linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0; lp->link = 0;

View File

@ -1309,7 +1309,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm; struct crypto_aead *tfm;
int ret; int ret;
tfm = crypto_alloc_aead("gcm(aes)", 0, 0); /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) if (IS_ERR(tfm))
return tfm; return tfm;

View File

@ -1119,7 +1119,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
goto out; goto out;
} }
dp83640_clock_init(clock, bus); dp83640_clock_init(clock, bus);
list_add_tail(&phyter_clocks, &clock->list); list_add_tail(&clock->list, &phyter_clocks);
out: out:
mutex_unlock(&phyter_clocks_lock); mutex_unlock(&phyter_clocks_lock);

View File

@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/

View File

@ -1071,8 +1071,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
NVME_IDENTIFY_DATA_SIZE); NVME_IDENTIFY_DATA_SIZE);
if (status) if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
/*
* Don't treat an error as fatal, as we potentially already
* have a NGUID or EUI-64.
*/
if (status > 0 && !(status & NVME_SC_DNR))
status = 0;
goto free_data; goto free_data;
}
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
struct nvme_ns_id_desc *cur = data + pos; struct nvme_ns_id_desc *cur = data + pos;
@ -1730,26 +1739,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
struct nvme_id_ns *id, struct nvme_ns_ids *ids) struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{ {
int ret = 0;
memset(ids, 0, sizeof(*ids)); memset(ids, 0, sizeof(*ids));
if (ctrl->vs >= NVME_VS(1, 1, 0)) if (ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0)) if (ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
if (ctrl->vs >= NVME_VS(1, 3, 0)) { if (ctrl->vs >= NVME_VS(1, 3, 0))
/* Don't treat error as fatal we potentially return nvme_identify_ns_descs(ctrl, nsid, ids);
* already have a NGUID or EUI-64 return 0;
*/
ret = nvme_identify_ns_descs(ctrl, nsid, ids);
if (ret)
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", ret);
if (ret > 0)
ret = 0;
}
return ret;
} }
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)

View File

@ -926,6 +926,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
gasket_get_bar_index(gasket_dev, gasket_get_bar_index(gasket_dev,
(vma->vm_pgoff << PAGE_SHIFT) + (vma->vm_pgoff << PAGE_SHIFT) +
driver_desc->legacy_mmap_address_offset); driver_desc->legacy_mmap_address_offset);
if (bar_index < 0)
return DO_MAP_REGION_INVALID;
phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset; phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
while (mapped_bytes < map_length) { while (mapped_bytes < map_length) {
/* /*

View File

@ -1445,6 +1445,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
cdns_uart_uart_driver.cons = &cdns_uart_console; cdns_uart_uart_driver.cons = &cdns_uart_console;
cdns_uart_console.index = id;
#endif #endif
rc = uart_register_driver(&cdns_uart_uart_driver); rc = uart_register_driver(&cdns_uart_uart_driver);

View File

@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
return uniscr; return uniscr;
} }
static void vc_uniscr_free(struct uni_screen *uniscr)
{
vfree(uniscr);
}
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
{ {
vfree(vc->vc_uni_screen); vc_uniscr_free(vc->vc_uni_screen);
vc->vc_uni_screen = new_uniscr; vc->vc_uni_screen = new_uniscr;
} }
@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
err = resize_screen(vc, new_cols, new_rows, user); err = resize_screen(vc, new_cols, new_rows, user);
if (err) { if (err) {
kfree(newscreen); kfree(newscreen);
kfree(new_uniscr); vc_uniscr_free(new_uniscr);
return err; return err;
} }

View File

@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0); HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
if (!IS_ERR(ci->platdata->vbus_extcon.edev)) { if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
hw_write_id_reg(ci, HS_PHY_GENCONFIG_2, hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
HS_PHY_SESS_VLD_CTRL_EN, HS_PHY_SESS_VLD_CTRL_EN,
HS_PHY_SESS_VLD_CTRL_EN); HS_PHY_SESS_VLD_CTRL_EN);

View File

@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
send it directly to the tty port */ send it directly to the tty port */
if (garmin_data_p->flags & FLAGS_QUEUING) { if (garmin_data_p->flags & FLAGS_QUEUING) {
pkt_add(garmin_data_p, data, data_length); pkt_add(garmin_data_p, data, data_length);
} else if (bulk_data || } else if (bulk_data || (data_length >= sizeof(u32) &&
getLayerId(data) == GARMIN_LAYERID_APPL) { getLayerId(data) == GARMIN_LAYERID_APPL)) {
spin_lock_irqsave(&garmin_data_p->lock, flags); spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_RESP_SEEN; garmin_data_p->flags |= APP_RESP_SEEN;

View File

@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */

View File

@ -28,6 +28,13 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/ */
/* Reported-by: Julian Groß <julian.g@posteo.de> */
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
"2Big Quadra USB3",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
/* /*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others? * commands in UAS mode. Observed with the 1.28 firmware; are there others?

View File

@ -3072,8 +3072,7 @@ static void handle_session(struct ceph_mds_session *session,
void *end = p + msg->front.iov_len; void *end = p + msg->front.iov_len;
struct ceph_mds_session_head *h; struct ceph_mds_session_head *h;
u32 op; u32 op;
u64 seq; u64 seq, features = 0;
unsigned long features = 0;
int wake = 0; int wake = 0;
bool blacklisted = false; bool blacklisted = false;
@ -3092,9 +3091,8 @@ static void handle_session(struct ceph_mds_session *session,
goto bad; goto bad;
/* version >= 3, feature bits */ /* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad); ceph_decode_32_safe(&p, end, len, bad);
ceph_decode_need(&p, end, len, bad); ceph_decode_64_safe(&p, end, features, bad);
memcpy(&features, p, min_t(size_t, len, sizeof(features))); p += len - sizeof(features);
p += len;
} }
mutex_lock(&mdsc->mutex); mutex_lock(&mdsc->mutex);

View File

@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
} }
if (IS_ERR(in)) { if (IS_ERR(in)) {
pr_warn("Can't lookup inode %llx (err: %ld)\n", dout("Can't lookup inode %llx (err: %ld)\n",
realm->ino, PTR_ERR(in)); realm->ino, PTR_ERR(in));
qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
} else { } else {
qri->timeout = 0; qri->timeout = 0;

View File

@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
if (displaced) if (displaced)
put_files_struct(displaced); put_files_struct(displaced);
if (!dump_interrupted()) { if (!dump_interrupted()) {
/*
* umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
* have this set to NULL.
*/
if (!cprm.file) {
pr_info("Core dump to |%s disabled\n", cn.corename);
goto close_fail;
}
file_start_write(cprm.file); file_start_write(cprm.file);
core_dumped = binfmt->core_dump(&cprm); core_dumped = binfmt->core_dump(&cprm);
file_end_write(cprm.file); file_end_write(cprm.file);

View File

@ -1176,6 +1176,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
{ {
struct eventpoll *ep = epi->ep; struct eventpoll *ep = epi->ep;
/* Fast preliminary check */
if (epi->next != EP_UNACTIVE_PTR)
return false;
/* Check that the same epi has not been just chained from another CPU */ /* Check that the same epi has not been just chained from another CPU */
if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
return false; return false;
@ -1242,16 +1246,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* chained in ep->ovflist and requeued later on. * chained in ep->ovflist and requeued later on.
*/ */
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
if (epi->next == EP_UNACTIVE_PTR && if (chain_epi_lockless(epi))
chain_epi_lockless(epi)) ep_pm_stay_awake_rcu(epi);
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
ep_pm_stay_awake_rcu(epi); ep_pm_stay_awake_rcu(epi);
goto out_unlock;
}
/* If this file is already in the ready list we exit soon */
if (!ep_is_linked(epi) &&
list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
ep_pm_stay_awake_rcu(epi);
} }
/* /*
@ -1827,7 +1827,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
{ {
int res = 0, eavail, timed_out = 0; int res = 0, eavail, timed_out = 0;
u64 slack = 0; u64 slack = 0;
bool waiter = false;
wait_queue_entry_t wait; wait_queue_entry_t wait;
ktime_t expires, *to = NULL; ktime_t expires, *to = NULL;
@ -1872,21 +1871,23 @@ fetch_events:
*/ */
ep_reset_busy_poll_napi_id(ep); ep_reset_busy_poll_napi_id(ep);
/* do {
* We don't have any available event to return to the caller. We need /*
* to sleep here, and we will be woken by ep_poll_callback() when events * Internally init_wait() uses autoremove_wake_function(),
* become available. * thus wait entry is removed from the wait queue on each
*/ * wakeup. Why it is important? In case of several waiters
if (!waiter) { * each new wakeup will hit the next waiter, giving it the
waiter = true; * chance to harvest new event. Otherwise wakeup can be
init_waitqueue_entry(&wait, current); * lost. This is also good performance-wise, because on
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
*/
init_wait(&wait);
write_lock_irq(&ep->lock); write_lock_irq(&ep->lock);
__add_wait_queue_exclusive(&ep->wq, &wait); __add_wait_queue_exclusive(&ep->wq, &wait);
write_unlock_irq(&ep->lock); write_unlock_irq(&ep->lock);
}
for (;;) {
/* /*
* We don't want to sleep if the ep_poll_callback() sends us * We don't want to sleep if the ep_poll_callback() sends us
* a wakeup in between. That's why we set the task state * a wakeup in between. That's why we set the task state
@ -1916,10 +1917,20 @@ fetch_events:
timed_out = 1; timed_out = 1;
break; break;
} }
}
/* We were woken up, thus go and try to harvest some events */
eavail = 1;
} while (0);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
send_events: send_events:
/* /*
* Try to transfer events to user space. In case we get 0 events and * Try to transfer events to user space. In case we get 0 events and
@ -1930,12 +1941,6 @@ send_events:
!(res = ep_send_events(ep, events, maxevents)) && !timed_out) !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events; goto fetch_events;
if (waiter) {
write_lock_irq(&ep->lock);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
return res; return res;
} }

View File

@ -26,7 +26,7 @@ static bool should_merge(struct fsnotify_event *old_fsn,
old = FANOTIFY_E(old_fsn); old = FANOTIFY_E(old_fsn);
new = FANOTIFY_E(new_fsn); new = FANOTIFY_E(new_fsn);
if (old_fsn->inode != new_fsn->inode || old->pid != new->pid || if (old_fsn->objectid != new_fsn->objectid || old->pid != new->pid ||
old->fh_type != new->fh_type || old->fh_len != new->fh_len) old->fh_type != new->fh_type || old->fh_len != new->fh_len)
return false; return false;
@ -314,7 +314,12 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
if (!event) if (!event)
goto out; goto out;
init: __maybe_unused init: __maybe_unused
fsnotify_init_event(&event->fse, inode); /*
* Use the victim inode instead of the watching inode as the id for
* event queue, so event reported on parent is merged with event
* reported on child when both directory and child watches exist.
*/
fsnotify_init_event(&event->fse, (unsigned long)id);
event->mask = mask; event->mask = mask;
if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
event->pid = get_pid(task_pid(current)); event->pid = get_pid(task_pid(current));

View File

@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn,
if (old->mask & FS_IN_IGNORED) if (old->mask & FS_IN_IGNORED)
return false; return false;
if ((old->mask == new->mask) && if ((old->mask == new->mask) &&
(old_fsn->inode == new_fsn->inode) && (old_fsn->objectid == new_fsn->objectid) &&
(old->name_len == new->name_len) && (old->name_len == new->name_len) &&
(!old->name_len || !strcmp(old->name, new->name))) (!old->name_len || !strcmp(old->name, new->name)))
return true; return true;
@ -118,7 +118,7 @@ int inotify_handle_event(struct fsnotify_group *group,
mask &= ~IN_ISDIR; mask &= ~IN_ISDIR;
fsn_event = &event->fse; fsn_event = &event->fse;
fsnotify_init_event(fsn_event, inode); fsnotify_init_event(fsn_event, (unsigned long)inode);
event->mask = mask; event->mask = mask;
event->wd = i_mark->wd; event->wd = i_mark->wd;
event->sync_cookie = cookie; event->sync_cookie = cookie;

View File

@ -635,7 +635,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
group->overflow_event = &oevent->fse; group->overflow_event = &oevent->fse;
fsnotify_init_event(group->overflow_event, NULL); fsnotify_init_event(group->overflow_event, 0);
oevent->mask = FS_Q_OVERFLOW; oevent->mask = FS_Q_OVERFLOW;
oevent->wd = -1; oevent->wd = -1;
oevent->sync_cookie = 0; oevent->sync_cookie = 0;

View File

@ -220,6 +220,7 @@ struct backing_dev_info {
wait_queue_head_t wb_waitq; wait_queue_head_t wb_waitq;
struct device *dev; struct device *dev;
char dev_name[64];
struct device *owner; struct device *owner;
struct timer_list laptop_mode_wb_timer; struct timer_list laptop_mode_wb_timer;

View File

@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested)); (1 << WB_async_congested));
} }
extern const char *bdi_unknown_name; const char *bdi_dev_name(struct backing_dev_info *bdi);
static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
{
if (!bdi || !bdi->dev)
return bdi_unknown_name;
return dev_name(bdi->dev);
}
#endif /* _LINUX_BACKING_DEV_H */ #endif /* _LINUX_BACKING_DEV_H */

View File

@ -133,8 +133,7 @@ struct fsnotify_ops {
*/ */
struct fsnotify_event { struct fsnotify_event {
struct list_head list; struct list_head list;
/* inode may ONLY be dereferenced during handle_event(). */ unsigned long objectid; /* identifier for queue merges */
struct inode *inode; /* either the inode the event happened to or its parent */
}; };
/* /*
@ -500,10 +499,10 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
static inline void fsnotify_init_event(struct fsnotify_event *event, static inline void fsnotify_init_event(struct fsnotify_event *event,
struct inode *inode) unsigned long objectid)
{ {
INIT_LIST_HEAD(&event->list); INIT_LIST_HEAD(&event->list);
event->inode = inode; event->objectid = objectid;
} }
#else #else

View File

@ -3,6 +3,8 @@
#define _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h> #include <uapi/linux/virtio_net.h>
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
bool little_endian) bool little_endian)
{ {
unsigned int gso_type = 0; unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4; gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break; break;
case VIRTIO_NET_HDR_GSO_TCPV6: case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6; gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break; break;
case VIRTIO_NET_HDR_GSO_UDP: case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP; gso_type = SKB_GSO_UDP;
ip_proto = IPPROTO_UDP;
thlen = sizeof(struct udphdr);
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off)) if (!skb_partial_csum_set(skb, start, off))
return -EINVAL; return -EINVAL;
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
return -EINVAL;
} else { } else {
/* gso packets without NEEDS_CSUM do not set transport_offset. /* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types. * probe and drop if does not match one of the above types.
*/ */
if (gso_type && skb->network_header) { if (gso_type && skb->network_header) {
struct flow_keys_basic keys;
if (!skb->protocol) if (!skb->protocol)
virtio_net_hdr_set_proto(skb, hdr); virtio_net_hdr_set_proto(skb, hdr);
retry: retry:
skb_probe_transport_header(skb); if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
if (!skb_transport_header_was_set(skb)) { NULL, 0, 0, 0,
0)) {
/* UFO does not specify ipv4 or 6: try both */ /* UFO does not specify ipv4 or 6: try both */
if (gso_type & SKB_GSO_UDP && if (gso_type & SKB_GSO_UDP &&
skb->protocol == htons(ETH_P_IP)) { skb->protocol == htons(ETH_P_IP)) {
@ -75,6 +91,12 @@ retry:
} }
return -EINVAL; return -EINVAL;
} }
if (keys.control.thoff + thlen > skb_headlen(skb) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
} }
} }

View File

@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
return 1; return 1;
} }
static inline int IP_ECN_set_ect1(struct iphdr *iph)
{
u32 check = (__force u32)iph->check;
if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
return 0;
check += (__force u16)htons(0x100);
iph->check = (__force __sum16)(check + (check>=0xFFFF));
iph->tos ^= INET_ECN_MASK;
return 1;
}
static inline void IP_ECN_clear(struct iphdr *iph) static inline void IP_ECN_clear(struct iphdr *iph)
{ {
iph->tos &= ~INET_ECN_MASK; iph->tos &= ~INET_ECN_MASK;
@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
return 1; return 1;
} }
static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
{
__be32 from, to;
if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
return 0;
from = *(__be32 *)iph;
to = from ^ htonl(INET_ECN_MASK << 20);
*(__be32 *)iph = to;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
(__force __wsum)to);
return 1;
}
static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
{ {
dscp &= ~INET_ECN_MASK; dscp &= ~INET_ECN_MASK;
@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
return 0; return 0;
} }
static inline int INET_ECN_set_ect1(struct sk_buff *skb)
{
switch (skb->protocol) {
case cpu_to_be16(ETH_P_IP):
if (skb_network_header(skb) + sizeof(struct iphdr) <=
skb_tail_pointer(skb))
return IP_ECN_set_ect1(ip_hdr(skb));
break;
case cpu_to_be16(ETH_P_IPV6):
if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
skb_tail_pointer(skb))
return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
break;
}
return 0;
}
/* /*
* RFC 6040 4.2 * RFC 6040 4.2
* To decapsulate the inner header at the tunnel egress, a compliant * To decapsulate the inner header at the tunnel egress, a compliant
@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
int rc; int rc;
rc = __INET_ECN_decapsulate(outer, inner, &set_ce); rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
if (!rc && set_ce) if (!rc) {
INET_ECN_set_ce(skb); if (set_ce)
INET_ECN_set_ce(skb);
else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
INET_ECN_set_ect1(skb);
}
return rc; return rc;
} }

View File

@ -177,6 +177,7 @@ struct fib6_info {
struct rt6_info { struct rt6_info {
struct dst_entry dst; struct dst_entry dst;
struct fib6_info __rcu *from; struct fib6_info __rcu *from;
int sernum;
struct rt6key rt6i_dst; struct rt6key rt6i_dst;
struct rt6key rt6i_src; struct rt6key rt6i_src;
@ -260,6 +261,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
struct fib6_info *from; struct fib6_info *from;
u32 cookie = 0; u32 cookie = 0;
if (rt->sernum)
return rt->sernum;
rcu_read_lock(); rcu_read_lock();
from = rcu_dereference(rt->from); from = rcu_dereference(rt->from);

View File

@ -428,6 +428,13 @@ static inline int rt_genid_ipv4(struct net *net)
return atomic_read(&net->ipv4.rt_genid); return atomic_read(&net->ipv4.rt_genid);
} }
#if IS_ENABLED(CONFIG_IPV6)
static inline int rt_genid_ipv6(const struct net *net)
{
return atomic_read(&net->ipv6.fib6_sernum);
}
#endif
static inline void rt_genid_bump_ipv4(struct net *net) static inline void rt_genid_bump_ipv4(struct net *net)
{ {
atomic_inc(&net->ipv4.rt_genid); atomic_inc(&net->ipv4.rt_genid);

View File

@ -82,6 +82,7 @@ struct mqueue_inode_info {
struct sigevent notify; struct sigevent notify;
struct pid *notify_owner; struct pid *notify_owner;
u32 notify_self_exec_id;
struct user_namespace *notify_user_ns; struct user_namespace *notify_user_ns;
struct user_struct *user; /* user who created, for accounting */ struct user_struct *user; /* user who created, for accounting */
struct sock *notify_sock; struct sock *notify_sock;
@ -709,28 +710,44 @@ static void __do_notify(struct mqueue_inode_info *info)
* synchronously. */ * synchronously. */
if (info->notify_owner && if (info->notify_owner &&
info->attr.mq_curmsgs == 1) { info->attr.mq_curmsgs == 1) {
struct kernel_siginfo sig_i;
switch (info->notify.sigev_notify) { switch (info->notify.sigev_notify) {
case SIGEV_NONE: case SIGEV_NONE:
break; break;
case SIGEV_SIGNAL: case SIGEV_SIGNAL: {
/* sends signal */ struct kernel_siginfo sig_i;
struct task_struct *task;
/* do_mq_notify() accepts sigev_signo == 0, why?? */
if (!info->notify.sigev_signo)
break;
clear_siginfo(&sig_i); clear_siginfo(&sig_i);
sig_i.si_signo = info->notify.sigev_signo; sig_i.si_signo = info->notify.sigev_signo;
sig_i.si_errno = 0; sig_i.si_errno = 0;
sig_i.si_code = SI_MESGQ; sig_i.si_code = SI_MESGQ;
sig_i.si_value = info->notify.sigev_value; sig_i.si_value = info->notify.sigev_value;
/* map current pid/uid into info->owner's namespaces */
rcu_read_lock(); rcu_read_lock();
/* map current pid/uid into info->owner's namespaces */
sig_i.si_pid = task_tgid_nr_ns(current, sig_i.si_pid = task_tgid_nr_ns(current,
ns_of_pid(info->notify_owner)); ns_of_pid(info->notify_owner));
sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
current_uid());
/*
* We can't use kill_pid_info(), this signal should
* bypass check_kill_permission(). It is from kernel
* but si_fromuser() can't know this.
* We do check the self_exec_id, to avoid sending
* signals to programs that don't expect them.
*/
task = pid_task(info->notify_owner, PIDTYPE_TGID);
if (task && task->self_exec_id ==
info->notify_self_exec_id) {
do_send_sig_info(info->notify.sigev_signo,
&sig_i, task, PIDTYPE_TGID);
}
rcu_read_unlock(); rcu_read_unlock();
kill_pid_info(info->notify.sigev_signo,
&sig_i, info->notify_owner);
break; break;
}
case SIGEV_THREAD: case SIGEV_THREAD:
set_cookie(info->notify_cookie, NOTIFY_WOKENUP); set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
netlink_sendskb(info->notify_sock, info->notify_cookie); netlink_sendskb(info->notify_sock, info->notify_cookie);
@ -1315,6 +1332,7 @@ retry:
info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_signo = notification->sigev_signo;
info->notify.sigev_value = notification->sigev_value; info->notify.sigev_value = notification->sigev_value;
info->notify.sigev_notify = SIGEV_SIGNAL; info->notify.sigev_notify = SIGEV_SIGNAL;
info->notify_self_exec_id = current->self_exec_id;
break; break;
} }

View File

@ -8318,6 +8318,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
*/ */
allocate_snapshot = false; allocate_snapshot = false;
#endif #endif
/*
* Because of some magic with the way alloc_percpu() works on
* x86_64, we need to synchronize the pgd of all the tables,
* otherwise the trace events that happen in x86_64 page fault
* handlers can't cope with accessing the chance that a
* alloc_percpu()'d memory might be touched in the page fault trace
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
* calls in tracing, because something might get triggered within a
* page fault trace event!
*/
vmalloc_sync_mappings();
return 0; return 0;
} }

View File

@ -454,7 +454,7 @@ static bool __within_notrace_func(unsigned long addr)
static bool within_notrace_func(struct trace_kprobe *tk) static bool within_notrace_func(struct trace_kprobe *tk)
{ {
unsigned long addr = addr = trace_kprobe_address(tk); unsigned long addr = trace_kprobe_address(tk);
char symname[KSYM_NAME_LEN], *p; char symname[KSYM_NAME_LEN], *p;
if (!__within_notrace_func(addr)) if (!__within_notrace_func(addr))

View File

@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
* Runs a user-space application. The application is started * Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of system workqueues. * asynchronously if wait is not set, and runs as a child of system workqueues.
* (ie. it runs with full root capabilities and optimized affinity). * (ie. it runs with full root capabilities and optimized affinity).
*
* Note: successful return value does not guarantee the helper was called at
* all. You can't rely on sub_info->{init,cleanup} being called even for
* UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
* into a successful no-op.
*/ */
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{ {

View File

@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
EXPORT_SYMBOL_GPL(noop_backing_dev_info); EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static struct class *bdi_class; static struct class *bdi_class;
const char *bdi_unknown_name = "(unknown)"; static const char *bdi_unknown_name = "(unknown)";
/* /*
* bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
if (bdi->dev) /* The driver needs to use separate queues per device */ if (bdi->dev) /* The driver needs to use separate queues per device */
return 0; return 0;
dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args); vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
if (IS_ERR(dev)) if (IS_ERR(dev))
return PTR_ERR(dev); return PTR_ERR(dev);
@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi)
} }
EXPORT_SYMBOL(bdi_put); EXPORT_SYMBOL(bdi_put);
const char *bdi_dev_name(struct backing_dev_info *bdi)
{
if (!bdi || !bdi->dev)
return bdi_unknown_name;
return bdi->dev_name;
}
EXPORT_SYMBOL_GPL(bdi_dev_name);
static wait_queue_head_t congestion_wqh[2] = { static wait_queue_head_t congestion_wqh[2] = {
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])

View File

@ -5101,19 +5101,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
unsigned int size; unsigned int size;
int node; int node;
int __maybe_unused i; int __maybe_unused i;
long error = -ENOMEM;
size = sizeof(struct mem_cgroup); size = sizeof(struct mem_cgroup);
size += nr_node_ids * sizeof(struct mem_cgroup_per_node *); size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
memcg = kzalloc(size, GFP_KERNEL); memcg = kzalloc(size, GFP_KERNEL);
if (!memcg) if (!memcg)
return NULL; return ERR_PTR(error);
memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
1, MEM_CGROUP_ID_MAX, 1, MEM_CGROUP_ID_MAX,
GFP_KERNEL); GFP_KERNEL);
if (memcg->id.id < 0) if (memcg->id.id < 0) {
error = memcg->id.id;
goto fail; goto fail;
}
memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu); memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
if (!memcg->vmstats_local) if (!memcg->vmstats_local)
@ -5158,7 +5161,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
fail: fail:
mem_cgroup_id_remove(memcg); mem_cgroup_id_remove(memcg);
__mem_cgroup_free(memcg); __mem_cgroup_free(memcg);
return NULL; return ERR_PTR(error);
} }
static struct cgroup_subsys_state * __ref static struct cgroup_subsys_state * __ref
@ -5169,8 +5172,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
long error = -ENOMEM; long error = -ENOMEM;
memcg = mem_cgroup_alloc(); memcg = mem_cgroup_alloc();
if (!memcg) if (IS_ERR(memcg))
return ERR_PTR(error); return ERR_CAST(memcg);
memcg->high = PAGE_COUNTER_MAX; memcg->high = PAGE_COUNTER_MAX;
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
@ -5220,7 +5223,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
fail: fail:
mem_cgroup_id_remove(memcg); mem_cgroup_id_remove(memcg);
mem_cgroup_free(memcg); mem_cgroup_free(memcg);
return ERR_PTR(-ENOMEM); return ERR_PTR(error);
} }
static int mem_cgroup_css_online(struct cgroup_subsys_state *css) static int mem_cgroup_css_online(struct cgroup_subsys_state *css)

View File

@ -1555,6 +1555,7 @@ void set_zone_contiguous(struct zone *zone)
if (!__pageblock_pfn_to_page(block_start_pfn, if (!__pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, zone)) block_end_pfn, zone))
return; return;
cond_resched();
} }
/* We confirm that there is no hole */ /* We confirm that there is no hole */
@ -2350,6 +2351,14 @@ static inline void boost_watermark(struct zone *zone)
if (!watermark_boost_factor) if (!watermark_boost_factor)
return; return;
/*
* Don't bother in zones that are unlikely to produce results.
* On small machines, including kdump capture kernels running
* in a small area, boosting the watermark can cause an out of
* memory situation immediately.
*/
if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
return;
max_boost = mult_frac(zone->_watermark[WMARK_HIGH], max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
watermark_boost_factor, 10000); watermark_boost_factor, 10000);

View File

@ -897,7 +897,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
if (!orig_node) if (!orig_node)
return; goto out;
neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
ethhdr->h_source); ethhdr->h_source);

View File

@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
*/ */
static u8 batadv_nc_random_weight_tq(u8 tq) static u8 batadv_nc_random_weight_tq(u8 tq)
{ {
u8 rand_val, rand_tq;
get_random_bytes(&rand_val, sizeof(rand_val));
/* randomize the estimated packet loss (max TQ - estimated TQ) */ /* randomize the estimated packet loss (max TQ - estimated TQ) */
rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
/* normalize the randomized packet loss */
rand_tq /= BATADV_TQ_MAX_VALUE;
/* convert to (randomized) estimated tq again */ /* convert to (randomized) estimated tq again */
return BATADV_TQ_MAX_VALUE - rand_tq; return BATADV_TQ_MAX_VALUE - rand_tq;

View File

@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
ret = batadv_parse_throughput(net_dev, buff, "throughput_override", ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
&tp_override); &tp_override);
if (!ret) if (!ret)
return count; goto out;
old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
if (old_tp_override == tp_override) if (old_tp_override == tp_override)
@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
tp_override = atomic_read(&hard_iface->bat_v.throughput_override); tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
batadv_hardif_put(hard_iface);
return sprintf(buff, "%u.%u MBit\n", tp_override / 10, return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
tp_override % 10); tp_override % 10);
} }

View File

@ -3907,6 +3907,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
dump = false; dump = false;
if (start_offset == end_offset) {
err = 0;
goto nla_put_failure;
}
} }
err = devlink_nl_region_read_snapshot_fill(skb, devlink, err = devlink_nl_region_read_snapshot_fill(skb, devlink,

View File

@ -1954,6 +1954,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
NEIGH_UPDATE_F_OVERRIDE_ISROUTER); NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
} }
if (protocol)
neigh->protocol = protocol;
if (ndm->ndm_flags & NTF_EXT_LEARNED) if (ndm->ndm_flags & NTF_EXT_LEARNED)
flags |= NEIGH_UPDATE_F_EXT_LEARNED; flags |= NEIGH_UPDATE_F_EXT_LEARNED;
@ -1967,9 +1970,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
NETLINK_CB(skb).portid, extack); NETLINK_CB(skb).portid, extack);
if (protocol)
neigh->protocol = protocol;
neigh_release(neigh); neigh_release(neigh);
out: out:

View File

@ -259,7 +259,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
{ {
struct dsa_port *cpu_dp = dev->dsa_ptr; struct dsa_port *cpu_dp = dev->dsa_ptr;
dev->netdev_ops = cpu_dp->orig_ndo_ops; if (cpu_dp->orig_ndo_ops)
dev->netdev_ops = cpu_dp->orig_ndo_ops;
cpu_dp->orig_ndo_ops = NULL; cpu_dp->orig_ndo_ops = NULL;
} }

View File

@ -1388,9 +1388,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
} }
ip6_rt_copy_init(pcpu_rt, res); ip6_rt_copy_init(pcpu_rt, res);
pcpu_rt->rt6i_flags |= RTF_PCPU; pcpu_rt->rt6i_flags |= RTF_PCPU;
if (f6i->nh)
pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
return pcpu_rt; return pcpu_rt;
} }
static bool rt6_is_valid(const struct rt6_info *rt6)
{
return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
}
/* It should be called with rcu_read_lock() acquired */ /* It should be called with rcu_read_lock() acquired */
static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
{ {
@ -1398,6 +1407,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
struct rt6_info *prev, **p;
p = this_cpu_ptr(res->nh->rt6i_pcpu);
prev = xchg(p, NULL);
if (prev) {
dst_dev_put(&prev->dst);
dst_release(&prev->dst);
}
pcpu_rt = NULL;
}
return pcpu_rt; return pcpu_rt;
} }
@ -2599,6 +2621,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
rt = container_of(dst, struct rt6_info, dst); rt = container_of(dst, struct rt6_info, dst);
if (rt->sernum)
return rt6_is_valid(rt) ? dst : NULL;
rcu_read_lock(); rcu_read_lock();
/* All IPV6 dsts are created with ->obsolete set to the value /* All IPV6 dsts are created with ->obsolete set to the value

View File

@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
enum nf_nat_manip_type maniptype) enum nf_nat_manip_type maniptype)
{ {
struct udphdr *hdr; struct udphdr *hdr;
bool do_csum;
if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false; return false;
hdr = (struct udphdr *)(skb->data + hdroff); hdr = (struct udphdr *)(skb->data + hdroff);
do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
__udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
return true; return true;
} }

View File

@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
const struct sk_buff *skb, const struct sk_buff *skb,
const struct iphdr *ip, const struct iphdr *ip,
unsigned char *opts) unsigned char *opts,
struct tcphdr *_tcph)
{ {
const struct tcphdr *tcp; const struct tcphdr *tcp;
struct tcphdr _tcph;
tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
if (!tcp) if (!tcp)
return NULL; return NULL;
@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int fmatch = FMATCH_WRONG; int fmatch = FMATCH_WRONG;
struct nf_osf_hdr_ctx ctx; struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp; const struct tcphdr *tcp;
struct tcphdr _tcph;
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp) if (!tcp)
return false; return false;
@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
const struct nf_osf_finger *kf; const struct nf_osf_finger *kf;
struct nf_osf_hdr_ctx ctx; struct nf_osf_hdr_ctx ctx;
const struct tcphdr *tcp; const struct tcphdr *tcp;
struct tcphdr _tcph;
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts); tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
if (!tcp) if (!tcp)
return false; return false;

View File

@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
sch->q.qlen = 0; sch->q.qlen = 0;
sch->qstats.backlog = 0; sch->qstats.backlog = 0;
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); if (q->tab)
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
q->head = q->tail = 0; q->head = q->tail = 0;
red_restart(&q->vars); red_restart(&q->vars);
} }

View File

@ -417,7 +417,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));

View File

@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (ctl->divisor && if (ctl->divisor &&
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL; return -EINVAL;
/* slot->allot is a short, make sure quantum is not too big. */
if (ctl->quantum) {
unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
if (scaled <= 0 || scaled > SHRT_MAX)
return -EINVAL;
}
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog)) ctl_v1->Wlog))
return -EINVAL; return -EINVAL;

View File

@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
{ {
struct tc_skbprio_qopt *ctl = nla_data(opt); struct tc_skbprio_qopt *ctl = nla_data(opt);
if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
return -EINVAL;
sch->limit = ctl->limit; sch->limit = ctl->limit;
return 0; return 0;
} }

View File

@ -1865,7 +1865,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
*/ */
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
SCTP_ST_CHUNK(0), NULL, SCTP_ST_CHUNK(0), repl,
commands); commands);
} else { } else {
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@ -5470,7 +5470,7 @@ enum sctp_disposition sctp_sf_do_9_2_start_shutdown(
* in the Cumulative TSN Ack field the last sequential TSN it * in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer. * has received from the peer.
*/ */
reply = sctp_make_shutdown(asoc, NULL); reply = sctp_make_shutdown(asoc, arg);
if (!reply) if (!reply)
goto nomem; goto nomem;
@ -6068,7 +6068,7 @@ enum sctp_disposition sctp_sf_autoclose_timer_expire(
disposition = SCTP_DISPOSITION_CONSUME; disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) { if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands); NULL, commands);
} }
return disposition; return disposition;

View File

@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s); ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
if (!ret)
return 0;
} }
if (ret < 0)
tipc_conn_close(con);
tipc_conn_close(con);
return ret; return ret;
} }

View File

@ -797,6 +797,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
*copied -= sk_msg_free(sk, msg); *copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk); tls_free_open_rec(sk);
} }
if (psock)
sk_psock_put(sk, psock);
return err; return err;
} }
more_data: more_data:
@ -2076,8 +2078,9 @@ static void tls_data_ready(struct sock *sk)
strp_data_ready(&ctx->strp); strp_data_ready(&ctx->strp);
psock = sk_psock_get(sk); psock = sk_psock_get(sk);
if (psock && !list_empty(&psock->ingress_msg)) { if (psock) {
ctx->saved_data_ready(sk); if (!list_empty(&psock->ingress_msg))
ctx->saved_data_ready(sk);
sk_psock_put(sk, psock); sk_psock_put(sk, psock);
} }
} }

View File

@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \
faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
echo echo
cat $T.aa cat $T.aa
cleanup cleanup

View File

@ -159,7 +159,12 @@ class IocgStat:
else: else:
self.inflight_pct = 0 self.inflight_pct = 0
self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000 # vdebt used to be an atomic64_t and is now u64, support both
try:
self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
except:
self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
self.use_delay = blkg.use_delay.counter.value_() self.use_delay = blkg.use_delay.counter.value_()
self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000 self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000

View File

@ -1402,7 +1402,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
struct cfi_reg *cfa = &state->cfa; struct cfi_reg *cfa = &state->cfa;
struct stack_op *op = &insn->stack_op; struct stack_op *op = &insn->stack_op;
if (cfa->base != CFI_SP) if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
return 0; return 0;
/* push */ /* push */

View File

@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
*/ */
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
u32 pc = *vcpu_pc(vcpu);
bool is_thumb; bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr) if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2; pc += 2;
else else
*vcpu_pc(vcpu) += 4; pc += 4;
*vcpu_pc(vcpu) = pc;
kvm_adjust_itstate(vcpu); kvm_adjust_itstate(vcpu);
} }

View File

@ -389,7 +389,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{ {
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
intid > VGIC_NR_PRIVATE_IRQS) intid >= VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm); kvm_arm_halt_guest(vcpu->kvm);
} }
@ -397,7 +397,7 @@ static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{ {
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
intid > VGIC_NR_PRIVATE_IRQS) intid >= VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_guest(vcpu->kvm); kvm_arm_resume_guest(vcpu->kvm);
} }