This is the 5.4.121 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmCo0UgACgkQONu9yGCS
 aT518BAAvnehTRdJsIaNTkHakC0RewR0DURUVovlLKyyoDro9AShGPztwEOrlUoc
 HweLawothBcuEmNmOzYxVVz4Io7lsKZHStm1ZSMnCu4AwdnjWNgyGSM/hwWQ5o/0
 BBEQVrp9WoRo+9o2uRQap52EBfDzNyYTiOeeNUD7XAh9NYp7UxzenACBjSzBnJpH
 q7U5kuqgGsc0bxWd2plgqxaYBTScbe4OkVYpUOQ/odBjiJ5+USof6+a4MhZ7uCVj
 wPxBK4ZzNv6cDiPxvAxALIhauAAx0XSyHQs/l7J3qux2yQ8o59fymvXTSn4cg/LE
 66hZDEb7DBcEQkVoO+W1Vg4Ww3v+hY3Fz7bhr2xU82v/rD48D0tEd+YLEgM8unfJ
 WeCgApiP8k4ikSNFthgkPXQ74WOll6DC1LW/NiVvC2SE+kVj9oA0VCFYJjDCHb5L
 fbfJQ2CkTR+JWGezXDIO9BauvcA6A9nCJPmUPpYD1aSkZmaw0vLvyTSly5BH5yoM
 BYWCO+hGMQeMh/p+VZgaXIfsI7YE7+tO3zpVRE3WVNPF+IesI6A1sqMcuLIlr8+j
 Lqwk/YB1tzbd1EWVQ+lAIxEehMjvcxikhp3fr8jzkvE6cbkH/EETr1JvORTbAggw
 csdm2q3OT8PY2fC88d0Uo/SEL3UtFwo4h0d9UOYGxDLR9wR6hQ4=
 =q0ia
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmCpXqcACgkQ7G51OISz
 Hs3NvxAAiXLlwo22VlP888LApM4RgRVe3nXg90LmpvlVQjQfwDAzIkLA1vTz4yA/
 5t6eH2Ew0Tdrl0nLi3/QWomDBEoz/9qU64184RH1cBPmcX8Ok+Lm8Nrxh7+bt/Qz
 kHanYti60tsYGoIWdTpWqq9GomBBlg1WSTWYHZwphrmTjAcLRgDB16EdKwfjDCOj
 QKXkmoMLm3OF8wKLY3/xyGJIvvAJZL7EqHD3uhIfNbaDU+PFiPqIne2YFtFljLG3
 Qj7XyhPG04vqaZf80uiATVWTbCF39GoPrYIn99Gtdf7x2G/lUSKoOk6hwSPzWDAe
 T2BvjBrhWWfDlwegW/FOf0URXCMlv+voz/aJ6wwUP64bs75BliW2oUbVbG29K0HI
 GWsmTgyf/18FR8RB27ojOEVydH62gATi+T5TLpKIm6k71ymo7Zb++1YO7O9buZHu
 /w78F8TWLUiArnRlHgyNkrkF2j4bmsV6KA3+7sMffC4CpHZGK88P8MP5oKydChld
 ewZDZpt0X6Lncauj7CCPNROgyKcZNip/Tp8L9XZss+gmsQH2U8W0wETzUsqfYAEP
 qAqycggdzOjZp27SzUX4MOmhiympkPXMlcLqrz3Ohz6jeNenwU7uDcPxEsXBZhIL
 M2zF8NtnASJmh5UmcHXShub/Wt735PUyGC/RlgbhHySCny/0+ng=
 =lW3L
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.121' into 5.4-2.3.x-imx

This is the 5.4.121 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-05-22 19:42:28 +00:00
commit 98e62acbfd
70 changed files with 421 additions and 184 deletions

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
use strict;
use Text::Tabs;
use Getopt::Long;

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# add symbolic names to read_msr / write_msr in trace
# decode_msr msr-index.h < trace
import sys

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# This is a POC (proof of concept or piece of crap, take your pick) for reading the
# text representation of trace output related to page allocation. It makes an attempt
# to extract some high-level information on what is going on. The accuracy of the parser

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# This is a POC for reading the text representation of trace output related to
# page reclaim. It makes an attempt to extract some high-level information on
# what is going on. The accuracy of the parser may vary

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 120
SUBLEVEL = 121
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -335,6 +335,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}

View File

@ -27,6 +27,7 @@
#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
#include <linux/arm-smccc.h>
#include "signal.h"
/*
@ -160,6 +161,8 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);

View File

@ -3,7 +3,9 @@
* Copyright (c) 2015, Linaro Limited
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
@ -27,7 +29,14 @@ UNWIND( .fnstart)
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
pop {r4-r7}
ldr r4, [sp, #36]
cmp r4, #0
beq 1f // No quirk structure
ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
bne 1f // No quirk present
str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr

View File

@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
@ -26,6 +27,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (!idmap_pgd)
return -EINVAL;
/*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
*/
pause_graph_tracing();
/*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
@ -33,6 +41,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn, __mpidr);
unpause_graph_tracing();
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@ -46,7 +57,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
u32 __mpidr = cpu_logical_map(smp_processor_id());
return __cpu_suspend(arg, fn, __mpidr);
int ret;
pause_graph_tracing();
ret = __cpu_suspend(arg, fn, __mpidr);
unpause_graph_tracing();
return ret;
}
#define idmap_pgd NULL
#endif

View File

@ -552,6 +552,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void);
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);

View File

@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
}
/**
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
*
* @vcpu: the vcpu pointer
*
* This ensures we will trap access to:
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
*/
static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
{
/*
* This also clears MDCR_EL2_E2PB_MASK to disable guest access
* to the profiling buffer.
*/
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
/* Is the VM being debugged by userspace? */
if (vcpu->guest_debug)
/* Route all software debug exceptions to EL2 */
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
/*
* Trap debug register access when one of the following is true:
* - Userspace is using the hardware to debug the guest
* (KVM_GUESTDBG_USE_HW is set).
* - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
*/
if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
}
/**
* kvm_arm_vcpu_init_debug - setup vcpu debug traps
*
* @vcpu: the vcpu pointer
*
* Set vcpu initial mdcr_el2 value.
*/
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
{
preempt_disable();
kvm_arm_setup_mdcr_el2(vcpu);
preempt_enable();
}
/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
*/
@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* @vcpu: the vcpu pointer
*
* This is called before each entry into the hypervisor to setup any
* debug related registers. Currently this just ensures we will trap
* access to:
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
* debug related registers.
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
/*
* This also clears MDCR_EL2_E2PB_MASK to disable guest access
* to the profiling buffer.
*/
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
kvm_arm_setup_mdcr_el2(vcpu);
/* Is Guest debugging in effect? */
if (vcpu->guest_debug) {
/* Route all software debug exceptions to EL2 */
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
/* Save guest debug state */
save_guest_debug_regs(vcpu);
@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
&vcpu->arch.debug_ptr->dbg_bcr[0],
@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
BUG_ON(!vcpu->guest_debug &&
vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
/* Trap debug register access */
if (trap_debug)
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
}

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
#
# Usage: unwcheck.py FILE

View File

@ -10,9 +10,19 @@
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
/*
* Clang prior to 13 had "mcount" instead of "_mcount":
* https://reviews.llvm.org/D98881
*/
#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
#define MCOUNT_NAME _mcount
#else
#define MCOUNT_NAME mcount
#endif
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
void _mcount(void);
void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@ -33,7 +43,7 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)

View File

@ -47,8 +47,8 @@
ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global _mcount
.set _mcount, ftrace_stub
.global MCOUNT_NAME
.set MCOUNT_NAME, ftrace_stub
#endif
ret
ENDPROC(ftrace_stub)
@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount)
ENTRY(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
@ -124,6 +124,6 @@ do_trace:
jalr t5
RESTORE_ABI_STATE
ret
ENDPROC(_mcount)
ENDPROC(MCOUNT_NAME)
#endif
EXPORT_SYMBOL(_mcount)
EXPORT_SYMBOL(MCOUNT_NAME)

View File

@ -17,6 +17,7 @@ config GCOV
bool "Enable gcov support"
depends on DEBUG_INFO
depends on !KCOV
depends on !MODULES
help
This option allows developers to retrieve coverage data from a UML
session.

View File

@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
obj-$(CONFIG_GCOV) += gmon_syms.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
VERSION {
{
local: *;
};
}
SECTIONS
{
PROVIDE (__executable_start = START);

View File

@ -1,16 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/module.h>
extern void __bb_init_func(void *) __attribute__((weak));
EXPORT_SYMBOL(__bb_init_func);
extern void __gcov_init(void *) __attribute__((weak));
EXPORT_SYMBOL(__gcov_init);
extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak));
EXPORT_SYMBOL(__gcov_merge_add);
extern void __gcov_exit(void) __attribute__((weak));
EXPORT_SYMBOL(__gcov_exit);

View File

@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
VERSION {
{
local: *;
};
}
SECTIONS
{
/* This must contain the right address - not quite the default ELF one.*/

View File

@ -253,7 +253,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
rv->err = wrmsr_safe_regs(rv->regs);
}
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;
@ -266,7 +266,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
}
EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;

View File

@ -910,21 +910,20 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Power management */
pm_runtime_disable(dev);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
}
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
tasklet_kill(&chan->vc.task);
}
/* Deregister eDMA device */
dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
dma_async_device_unregister(&dw->rd_edma);
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
/* Turn debugfs off */
dw_edma_v0_core_debugfs_off();

View File

@ -1394,6 +1394,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
.no_edge_events_on_boot = true,
},
},
{
/*
* The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
* external embedded-controller connected via I2C + an ACPI GPIO
* event handler on INT33FFC:02 pin 12, causing spurious wakeups.
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
},
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
.ignore_wake = "INT33FC:02@12",
},
},
{
/*
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an

View File

@ -7267,6 +7267,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
static int validate_overlay(struct drm_atomic_state *state)
{
int i;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane_state *primary_state, *overlay_state = NULL;
/* Check if primary plane is contained inside overlay */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
return 0;
overlay_state = new_plane_state;
continue;
}
}
/* check if we're making changes to the overlay plane */
if (!overlay_state)
return 0;
/* check if overlay plane is enabled */
if (!overlay_state->crtc)
return 0;
/* find the primary plane for the CRTC that the overlay is enabled on */
primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
if (IS_ERR(primary_state))
return PTR_ERR(primary_state);
/* check if primary plane is enabled */
if (!primary_state->crtc)
return 0;
/* Perform the bounds check to ensure the overlay plane covers the primary */
if (primary_state->crtc_x < overlay_state->crtc_x ||
primary_state->crtc_y < overlay_state->crtc_y ||
primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
return -EINVAL;
}
return 0;
}
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@ -7440,6 +7487,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
ret = validate_overlay(state);
if (ret)
goto fail;
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,

View File

@ -36,6 +36,7 @@
#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/uuid.h>
#include <asm/unaligned.h>
/* Device, Driver information */
@ -1127,6 +1128,40 @@ static void elants_i2c_power_off(void *_data)
}
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id i2c_hid_ids[] = {
{"ACPI0C50", 0 },
{"PNP0C50", 0 },
{ },
};
static const guid_t i2c_hid_guid =
GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
static bool elants_acpi_is_hid_device(struct device *dev)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *obj;
if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
return false;
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
if (obj) {
ACPI_FREE(obj);
return true;
}
return false;
}
#else
static bool elants_acpi_is_hid_device(struct device *dev)
{
return false;
}
#endif
static int elants_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@ -1135,9 +1170,14 @@ static int elants_i2c_probe(struct i2c_client *client,
unsigned long irqflags;
int error;
/* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
if (elants_acpi_is_hid_device(&client->dev)) {
dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
return -ENODEV;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev,
"%s: i2c check functionality error\n", DEVICE_NAME);
dev_err(&client->dev, "I2C check functionality error\n");
return -ENXIO;
}

View File

@ -20,6 +20,7 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/regulator/consumer.h>
@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
sizeof(chip_id), (u8 *)&chip_id);
if (error < 0) {
dev_err(&client->dev, "Chip ID read error %d\n", error);
if (error < 0)
return error;
}
data->chip_id = le32_to_cpu(chip_id);
dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
int error;
u32 status;
/*
* Some buggy BIOS-es bring up the chip in a stuck state where it
* blocks the I2C bus. The following steps are necessary to
* unstuck the chip / bus:
* 1. Turn off the Silead chip.
* 2. Try to do an I2C transfer with the chip, this will fail in
* response to which the I2C-bus-driver will call:
* i2c_recover_bus() which will unstuck the I2C-bus. Note the
* unstuck-ing of the I2C bus only works if we first drop the
* chip off the bus by turning it off.
* 3. Turn the chip back on.
*
* On the x86/ACPI systems were this problem is seen, step 1. and
* 3. require making ACPI calls and dealing with ACPI Power
* Resources. The workaround below runtime-suspends the chip to
* turn it off, leaving it up to the ACPI subsystem to deal with
* this.
*/
if (device_property_read_bool(&client->dev,
"silead,stuck-controller-bug")) {
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_allow(&client->dev);
pm_runtime_suspend(&client->dev);
dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
silead_ts_get_id(client);
/* The forbid will also resume the device */
pm_runtime_forbid(&client->dev);
pm_runtime_disable(&client->dev);
}
silead_ts_set_power(client, SILEAD_POWER_OFF);
silead_ts_set_power(client, SILEAD_POWER_ON);
error = silead_ts_get_id(client);
if (error)
if (error) {
dev_err(&client->dev, "Chip ID read error %d\n", error);
return error;
}
error = silead_ts_init(client);
if (error)

View File

@ -846,7 +846,7 @@ EXPORT_SYMBOL(capi20_put_message);
* Return value: CAPI result code
*/
u16 capi20_get_manufacturer(u32 contr, u8 *buf)
u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
{
struct capi_ctr *ctr;
u16 ret;
@ -916,7 +916,7 @@ EXPORT_SYMBOL(capi20_get_version);
* Return value: CAPI result code
*/
u16 capi20_get_serial(u32 contr, u8 *serial)
u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
{
struct capi_ctr *ctr;
u16 ret;

View File

@ -95,19 +95,19 @@
#include <asm/sections.h>
#define v1printk(a...) do { \
if (verbose) \
printk(KERN_INFO a); \
} while (0)
#define v2printk(a...) do { \
if (verbose > 1) \
printk(KERN_INFO a); \
touch_nmi_watchdog(); \
} while (0)
#define eprintk(a...) do { \
printk(KERN_ERR a); \
WARN_ON(1); \
} while (0)
#define v1printk(a...) do { \
if (verbose) \
printk(KERN_INFO a); \
} while (0)
#define v2printk(a...) do { \
if (verbose > 1) \
printk(KERN_INFO a); \
touch_nmi_watchdog(); \
} while (0)
#define eprintk(a...) do { \
printk(KERN_ERR a); \
WARN_ON(1); \
} while (0)
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdbts_io_ops;

View File

@ -2690,7 +2690,7 @@ do { \
seq_printf(seq, "%-12s", s); \
for (i = 0; i < n; ++i) \
seq_printf(seq, " %16" fmt_spec, v); \
seq_putc(seq, '\n'); \
seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)

View File

@ -213,7 +213,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
unsigned int rqs = fifosz / 256 - 1;
u32 mtl_rx_op, mtl_rx_int;
u32 mtl_rx_op;
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
@ -274,11 +274,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
}
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
/* Enable MTL RX overflow */
mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
ioaddr + MTL_CHAN_INT_CTRL(channel));
}
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,

View File

@ -3978,7 +3978,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
int mtl_status;
if (unlikely(status)) {
/* For LPI we need to save the tx status */
@ -3989,17 +3988,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
for (queue = 0; queue < queues_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
queue);
if (mtl_status != -EINVAL)
status |= mtl_status;
if (status & CORE_IRQ_MTL_RX_OVERFLOW)
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr,
queue);
status = stmmac_host_mtl_irq_status(priv, priv->hw,
queue);
}
/* PCS link status */

View File

@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
offset += hdr_padded_len;
p += hdr_padded_len;
copy = len;
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
/* Copy all frame if it fits skb->head, otherwise
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
*/
if (len <= skb_tailroom(skb))
copy = len;
else
copy = ETH_HLEN + metasize;
skb_put_data(skb, p, copy);
if (metasize) {

View File

@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
node_bits = (cfg->res.start >> 32) & (1 << 12);
node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);

View File

@ -11,6 +11,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@ -314,9 +315,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
* structure here for the BAR.
*/
bar4_start = res_pem->start + 0xf00000;
pem_pci->ea_entry[0] = (u32)bar4_start | 2;
pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
cfg->priv = pem_pci;
return 0;
@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
#define PEM_RES_BASE 0x87e0c0000000UL
#define PEM_NODE_MASK GENMASK(45, 44)
#define PEM_INDX_MASK GENMASK(26, 24)
#define PEM_RES_BASE 0x87e0c0000000ULL
#define PEM_NODE_MASK GENMASK_ULL(45, 44)
#define PEM_INDX_MASK GENMASK_ULL(26, 24)
#define PEM_MIN_DOM_IN_NODE 4
#define PEM_MAX_DOM_IN_NODE 10

View File

@ -540,6 +540,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
slot->flags &= ~SLOT_ENABLED;
continue;
}
pci_dev_put(dev);
}
}

View File

@ -588,6 +588,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#else
static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
u16 segment, struct resource *res)
{
return -ENODEV;
}
#endif
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);

View File

@ -1846,7 +1846,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
default:
unreachable();
/* unreachable */
break;
}
}

View File

@ -11337,13 +11337,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_io_buf *lpfc_cmd;
IOCB_t *icmd = NULL;
int rc = 1;
if (iocbq->vport != vport)
return rc;
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
!(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
!(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
return rc;
icmd = &iocbq->iocb;
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN)
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);

View File

@ -1205,7 +1205,7 @@ static void tcmu_set_next_deadline(struct list_head *queue,
del_timer(timer);
}
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
static bool tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
@ -1245,7 +1245,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
break;
return false;
}
tcmu_handle_completion(cmd, entry);

View File

@ -1287,11 +1287,10 @@ sl811h_hub_control(
goto error;
put_unaligned_le32(sl811->port1, buf);
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
#endif
dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
sl811->port1);
if (__is_defined(VERBOSE) ||
*(u16*)(buf+2)) /* only if wPortChange is interesting */
dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
sl811->port1);
break;
case SetPortFeature:
if (wIndex != 1 || wLength != 0)

View File

@ -1987,6 +1987,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
size_t shorted = 0;
ssize_t ret;
if (bdev_read_only(I_BDEV(bd_inode)))
@ -2005,12 +2006,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
return -EOPNOTSUPP;
iov_iter_truncate(from, size - iocb->ki_pos);
size -= iocb->ki_pos;
if (iov_iter_count(from) > size) {
shorted = iov_iter_count(from) - size;
iov_iter_truncate(from, size);
}
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
iov_iter_reexpand(from, iov_iter_count(from) + shorted);
blk_finish_plug(&plug);
return ret;
}
@ -2022,13 +2028,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
ssize_t ret;
if (pos >= size)
return 0;
size -= pos;
iov_iter_truncate(to, size);
return generic_file_read_iter(iocb, to);
if (iov_iter_count(to) > size) {
shorted = iov_iter_count(to) - size;
iov_iter_truncate(to, size);
}
ret = generic_file_read_iter(iocb, to);
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_read_iter);

View File

@ -1780,6 +1780,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
u32 invalidating_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
ceph_fscache_invalidate(inode);
invalidate_mapping_pages(&inode->i_data, 0, -1);
spin_lock(&ci->i_ceph_lock);

View File

@ -1875,6 +1875,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
ceph_fscache_invalidate(inode);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
pr_err("invalidate_pages %p fails\n", inode);
}

View File

@ -65,14 +65,18 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
skb_reset_mac_header(skb);
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
if (!pskb_may_pull(skb, needed))
return -EINVAL;
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
p_off = skb_transport_offset(skb) + thlen;
if (p_off > skb_headlen(skb))
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
@ -102,14 +106,14 @@ retry:
}
p_off = keys.control.thoff + thlen;
if (p_off > skb_headlen(skb) ||
if (!pskb_may_pull(skb, p_off) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
} else if (gso_type) {
p_off = thlen;
if (p_off > skb_headlen(skb))
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
}
}

View File

@ -69,7 +69,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
static int next_slab_inited;
static size_t depot_offset;
static DEFINE_SPINLOCK(depot_lock);
static DEFINE_RAW_SPINLOCK(depot_lock);
static bool init_stack_slab(void **prealloc)
{
@ -269,7 +269,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
prealloc = page_address(page);
}
spin_lock_irqsave(&depot_lock, flags);
raw_spin_lock_irqsave(&depot_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
@ -293,7 +293,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
WARN_ON(!init_stack_slab(&prealloc));
}
spin_unlock_irqrestore(&depot_lock, flags);
raw_spin_unlock_irqrestore(&depot_lock, flags);
exit:
if (prealloc) {
/* Nobody used this memory, ok to free it. */

View File

@ -99,8 +99,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
rcu_read_lock();
if (netif_is_bridge_port(dev)) {
p = br_port_get_rcu(dev);
vg = nbp_vlan_group_rcu(p);
p = br_port_get_check_rcu(dev);
if (p)
vg = nbp_vlan_group_rcu(p);
} else if (dev->priv_flags & IFF_EBRIDGE) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);

View File

@ -387,7 +387,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (!(nt->parms.o_flags & TUNNEL_SEQ))
dev->features |= NETIF_F_LLTX;
dev_hold(dev);
ip6gre_tunnel_link(ign, nt);
return nt;
@ -1483,6 +1482,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
}
ip6gre_tnl_init_features(dev);
dev_hold(dev);
return 0;
cleanup_dst_cache_init:
@ -1525,8 +1525,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
dev_hold(dev);
}
static struct inet6_protocol ip6gre_protocol __read_mostly = {
@ -1876,6 +1874,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip6erspan_tnl_link_config(tunnel, 1);
dev_hold(dev);
return 0;
cleanup_dst_cache_init:
@ -1975,8 +1974,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU])
ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
dev_hold(dev);
out:
return err;
}

View File

@ -267,7 +267,6 @@ static int ip6_tnl_create2(struct net_device *dev)
strcpy(t->parms.name, dev->name);
dev_hold(dev);
ip6_tnl_link(ip6n, t);
return 0;
@ -1861,6 +1860,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
dev_hold(dev);
return 0;
destroy_dst:
@ -1904,7 +1904,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;

View File

@ -952,7 +952,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;

View File

@ -211,8 +211,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
ipip6_tunnel_clone_6rd(dev, sitn);
dev_hold(dev);
ipip6_tunnel_link(sitn, t);
return 0;
@ -1408,7 +1406,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
dev->tstats = NULL;
return err;
}
dev_hold(dev);
return 0;
}
@ -1424,7 +1422,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
}

View File

@ -363,12 +363,15 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
static inline bool xskq_full_desc(struct xsk_queue *q)
{
return xskq_nb_avail(q, q->nentries) == q->nentries;
/* No barriers needed since data is not accessed */
return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
q->nentries;
}
static inline bool xskq_empty_desc(struct xsk_queue *q)
{
return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
/* No barriers needed since data is not accessed */
return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
}
void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python3
#
# Copyright 2004 Matt Mackall <mpm@selenic.com>
#

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Manipulate options in a .config file from the command line

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# diffconfig - a tool to compare .config files.

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
use strict;

View File

@ -395,7 +395,7 @@ if ($arch eq "x86_64") {
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} elsif ($arch eq "riscv") {
$function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
$type = ".quad";
$alignment = 2;
} elsif ($arch eq "nds32") {

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
#
# show_deltas: Read list of printk messages instrumented with

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0-or-later
use strict;

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
#
# Author: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
"""

View File

@ -1202,11 +1202,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
*index = ch;
return "Headphone";
case AUTO_PIN_LINE_OUT:
/* This deals with the case where we have two DACs and
* one LO, one HP and one Speaker */
if (!ch && cfg->speaker_outs && cfg->hp_outs) {
bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
/* This deals with the case where one HP or one Speaker or
* one HP + one Speaker need to share the DAC with LO
*/
if (!ch) {
bool hp_lo_shared = false, spk_lo_shared = false;
if (cfg->speaker_outs)
spk_lo_shared = !path_has_mixer(codec,
spec->speaker_paths[0], ctl_type);
if (cfg->hp_outs)
hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
if (hp_lo_shared && spk_lo_shared)
return spec->vmaster_mute.hook ? "PCM" : "Master";
if (hp_lo_shared)

View File

@ -1,4 +1,4 @@
#! /usr/bin/python
#! /usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- python -*-
# -*- coding: utf-8 -*-

View File

@ -1,4 +1,4 @@
#! /usr/bin/python
#! /usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
# -*- python -*-
# -*- coding: utf-8 -*-

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
open (IN,"ktest.pl");

View File

@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/env python3
# Copyright (C) 2017 Netronome Systems, Inc.
# Copyright (c) 2019 Mellanox Technologies. All rights reserved

View File

@ -1,4 +1,4 @@
#!/usr/bin/perl
#!/usr/bin/env perl
# SPDX-License-Identifier: GPL-2.0
# Prefix all lines with "# ", unbuffered. Command being piped in may need
# to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd".

View File

@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/env python3
"""
tdc_batch.py - a script to generate TC batch file

View File

@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""
tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch

View File

@ -579,6 +579,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
kvm_arm_vcpu_init_debug(vcpu);
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the