This is the 5.4.99 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmAs43QACgkQONu9yGCS
 aT4omw/+JPBAZB5ClIOSDuf3/yJkbigVRFNVmQJy4/cluG32cxlcpudoau7AXq3N
 0Sn/rfSdldl5eI98OTA+Y0yPIsVnQJdei228A5gmULkkc+rEFugorSJKRmmA7tH0
 VdZ1C4NlhhmjoIT/W8mMNzv14dtyGQvRbT+zzfxqwqL6tF9+alcdBYTP/Z691K6x
 8Csfe05MZ8VkvBizStaTXC+dtMhU917Ikd5i5v4ZzaesZJcUTLS7J82FhtKeoz7q
 tDoA/Bl+pN1KjyIIE61/zJ8DKzBtOeuo1PWJFpO+EBVhKVosr3oWJfTAiM7Fsnu5
 dbKHYPsbe3mB79JdQibr7TpU7vSjDr5a/HTuYtp7WM1R5IssiFeVOdpXTGim/s/E
 Flao5LYSUcj0X/Io6TyUnxQWw8sJz3PGKYiLUn8/9DBpzNFzynQ+vuapXCoGxJzh
 W108q32PIx2ZTJsD5RUUqZbytG/zKzI1+SxXo2uOhs9/k5qT+35Yp9epsE2Cp8v1
 Oiw3P/ZUDNk6zPj0dsHcTsqTofRK07l71HnM8iIbCWSPw834IoGBuB8c3H7HaHn4
 v5M4tMTDAaKi/e09K92fR6SZDgZz8D0N+sLLneA4NEASXIJanCUwcgVCUbja+BO1
 H1hiYTTZQa7kOkSxBa/wGsWkdfvOpOvCSFr+c6LPmB9sHMe4K8o=
 =3BI0
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmAtjOoACgkQ7G51OISz
 Hs3p0w/+J3WWl9sa+L25ZY92AZDQWMFEIlsiyKrVVgC3dRhtEr2iWylMFsFRCS3K
 IYTcHxe3MrhnaRycXZ25I0jse3doAFkTbsEl3WcjabI4/xARUW77vedz5CvxBBH9
 5Spjn5qs5zfmxHTss4qbta1zEq7PjtoJEpFVDhlhK6VXKXz07QZrRnSAztde030R
 vg9OaJFyQauPguySXISBg/XXLJEJ3j5NO3PnAW5eJKJyguQF/ujFNsv5s80297BT
 vc7kNdyy7Zz+C8dem65WMsB9egcuqt6jwjadePGz/5mhUxaj+IESJnkZ3ezCSgMI
 NDdIROT228RScO5BVEHXZHKsAdyIKibdZ7Ta7P607buvBRSd7+eV6/QkbndNYxva
 40NBGTlevffBBBl3KXbZTSJX4IrUCr45kfwNXrHGo588aiSrOoYPbr3Tq0TsWyvc
 hkDOb5XARlvyun4vvL5cKmfOV7I6MrnGs07chQ8Xd732SIhRwgzsZg7cM3kzmceB
 cEV1d+2uTmxzzldW34fU/xWPSDb/dFKjOKrsttMNM3lbL58oPS17eu4zjSQ/vO72
 x/h8KY6Uc1ceja/fWZrk4t1fusMNJZ/NPxPVjOFJpMgz90aGMMQDm4bVM6yW1x4r
 zTr1iR6q9N/+S6Y2Vc2LEuEhB2fC9q9kuFbp8Evk8DPSotQT514=
 =FE60
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.99' into 5.4-2.3.x-imx

This is the 5.4.99 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-02-17 21:38:47 +00:00
commit ce0c0d68c7
73 changed files with 665 additions and 320 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 98 SUBLEVEL = 99
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -329,9 +329,6 @@
clocks = <&xtal_32k>, <&xtal>; clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal"; clock-names = "xtal_32k", "xtal";
assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
assigned-clock-rates = <208000000>;
}; };
}; };

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KEXEC_INTERNAL_H
#define _ARM_KEXEC_INTERNAL_H
struct kexec_relocate_data {
unsigned long kexec_start_address;
unsigned long kexec_indirection_page;
unsigned long kexec_mach_type;
unsigned long kexec_r2;
};
#endif

View File

@ -15,6 +15,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#endif #endif
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/glue-df.h> #include <asm/glue-df.h>
#include <asm/glue-pf.h> #include <asm/glue-pf.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
@ -190,5 +191,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif #endif
DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0; return 0;
} }

View File

@ -15,6 +15,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/fncpy.h> #include <asm/fncpy.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
@ -24,11 +25,6 @@
extern void relocate_new_kernel(void); extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size; extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi; static atomic_t waiting_for_crash_ipi;
/* /*
@ -161,6 +157,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image) void machine_kexec(struct kimage *image)
{ {
unsigned long page_list, reboot_entry_phys; unsigned long page_list, reboot_entry_phys;
struct kexec_relocate_data *data;
void (*reboot_entry)(void); void (*reboot_entry)(void);
void *reboot_code_buffer; void *reboot_code_buffer;
@ -176,18 +173,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page); reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
set_kernel_text_rw();
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */ /* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer, reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel, &relocate_new_kernel,
relocate_new_kernel_size); relocate_new_kernel_size);
data = reboot_code_buffer + relocate_new_kernel_size;
data->kexec_start_address = image->start;
data->kexec_indirection_page = page_list;
data->kexec_mach_type = machine_arch_type;
data->kexec_r2 = image->arch.kernel_r2;
/* get the identity mapping physical address for the reboot code */ /* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry); reboot_entry_phys = virt_to_idmap(reboot_entry);

View File

@ -5,14 +5,16 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/kexec.h> #include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */ .align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel) ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page adr r7, relocate_new_kernel_end
ldr r1,kexec_start_address ldr r0, [r7, #KEXEC_INDIR_PAGE]
ldr r1, [r7, #KEXEC_START_ADDR]
/* /*
* If there is no indirection page (we are doing crashdumps) * If there is no indirection page (we are doing crashdumps)
@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2: 2:
/* Jump to relocated kernel */ /* Jump to relocated kernel */
mov lr,r1 mov lr, r1
mov r0,#0 mov r0, #0
ldr r1,kexec_mach_type ldr r1, [r7, #KEXEC_MACH_TYPE]
ldr r2,kexec_boot_atags ldr r2, [r7, #KEXEC_R2]
ARM( ret lr ) ARM( ret lr )
THUMB( bx lr ) THUMB( bx lr )
.align
.globl kexec_start_address
kexec_start_address:
.long 0x0
.globl kexec_indirection_page
kexec_indirection_page:
.long 0x0
.globl kexec_mach_type
kexec_mach_type:
.long 0x0
/* phy addr of the atags for the new kernel */
.globl kexec_boot_atags
kexec_boot_atags:
.long 0x0
ENDPROC(relocate_new_kernel) ENDPROC(relocate_new_kernel)
.align 3
relocate_new_kernel_end: relocate_new_kernel_end:
.globl relocate_new_kernel_size .globl relocate_new_kernel_size

View File

@ -694,18 +694,20 @@ struct page *get_signal_page(void)
addr = page_address(page); addr = page_address(page);
/* Poison the entire page */
memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
PAGE_SIZE / sizeof(u32));
/* Give the signal return code some randomness */ /* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc); offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset; signal_return_offset = offset;
/* /* Copy signal return handlers into the page */
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
ptr = (unsigned long)addr + offset; /* Flush out all instructions in this page */
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); ptr = (unsigned long)addr;
flush_icache_range(ptr, ptr + PAGE_SIZE);
return page; return page;
} }

View File

@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF); (cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */ /* Enter broadcast mode for periodic timers */
tick_broadcast_enable(); RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */ /* Enter broadcast mode for one-shot timers */
tick_broadcast_enter(); RCU_NONIDLE(tick_broadcast_enter());
/* /*
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) { if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state); RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/* /*
* Call idle CPU cluster PM enter notifier chain * Call idle CPU cluster PM enter notifier chain
@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0; index = 0;
cx = state_ptr + index; cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state); RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0; mpuss_can_lose_context = 0;
} }
} }
@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context) mpuss_can_lose_context)
gic_dist_disable(); gic_dist_disable();
clkdm_deny_idle(cpu_clkdm[1]); RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON); RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
clkdm_allow_idle(cpu_clkdm[1]); RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) { mpuss_can_lose_context) {
@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit(); cpu_pm_exit();
cpu_pm_out: cpu_pm_out:
tick_broadcast_exit(); RCU_NONIDLE(tick_broadcast_exit());
fail: fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpuidle_coupled_parallel_barrier(dev, &abort_barrier);

View File

@ -370,8 +370,6 @@ static int __init xen_guest_init(void)
return -ENOMEM; return -ENOMEM;
} }
gnttab_init(); gnttab_init();
if (!xen_initial_domain())
xenbus_probe();
/* /*
* Making sure board specific code will not set up ops for * Making sure board specific code will not set up ops for

View File

@ -337,7 +337,9 @@
&gcc { &gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>, protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>, <GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>; <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
}; };
&pm8998_gpio { &pm8998_gpio {

View File

@ -232,7 +232,9 @@
&gcc { &gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>, protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>, <GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>; <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
}; };
&i2c1 { &i2c1 {

View File

@ -232,6 +232,7 @@
reg = <0x0 0xf8000000 0x0 0x2000000>, reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>; <0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base"; reg-names = "axi-base", "apb-base";
device_type = "pci";
#address-cells = <3>; #address-cells = <3>;
#size-cells = <2>; #size-cells = <2>;
#interrupt-cells = <1>; #interrupt-cells = <1>;
@ -250,7 +251,6 @@
<0 0 0 2 &pcie0_intc 1>, <0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>, <0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>; <0 0 0 4 &pcie0_intc 3>;
linux,pci-domain = <0>;
max-link-speed = <1>; max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>; msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>, phys = <&pcie_phy 0>, <&pcie_phy 1>,

View File

@ -63,6 +63,9 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE, thread_info, preempt_count); OFFSET(TI_PRE, thread_info, preempt_count);
#ifdef CONFIG_PREEMPTION
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
#endif
return 0; return 0;
} }

View File

@ -102,6 +102,7 @@ SECTIONS
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
*(.tramp.ftrace.text); *(.tramp.ftrace.text);
#endif #endif
NOINSTR_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT

View File

@ -119,7 +119,10 @@ extern unsigned long min_low_pfn;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) #define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
})
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

View File

@ -61,6 +61,9 @@ endif
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
ifeq ($(CONFIG_X86_32),y) ifeq ($(CONFIG_X86_32),y)
BITS := 32 BITS := 32
UTS_MACHINE := i386 UTS_MACHINE := i386
@ -131,9 +134,6 @@ else
KBUILD_CFLAGS += -mno-red-zone KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel KBUILD_CFLAGS += -mcmodel=kernel
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif endif
ifdef CONFIG_X86_X32 ifdef CONFIG_X86_X32

View File

@ -6320,13 +6320,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'. * limit 'something'.
*/ */
/* no more than 50% of tags for async I/O */ /* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U); bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/* /*
* no more than 75% of tags for sync writes (25% extra tags * no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync * w.r.t. async I/O, to prevent async I/O from starving sync
* writes) * writes)
*/ */
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U); bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/* /*
* In-word depths in case some bfq_queue is being weight- * In-word depths in case some bfq_queue is being weight-
@ -6336,9 +6336,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage. * shortage.
*/ */
/* no more than ~18% of tags for async I/O */ /* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U); bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */ /* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U); bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++) for (j = 0; j < 2; j++)

View File

@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width; max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p); ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
rate = *parent_rate / p / m; rate = *parent_rate / p / m;
} else { } else {

View File

@ -25,6 +25,9 @@
/* Maximum value for gpio line identifiers */ /* Maximum value for gpio line identifiers */
#define EP93XX_GPIO_LINE_MAX 63 #define EP93XX_GPIO_LINE_MAX 63
/* Number of GPIO chips in EP93XX */
#define EP93XX_GPIO_CHIP_NUM 8
/* Maximum value for irq capable line identifiers */ /* Maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ 23 #define EP93XX_GPIO_LINE_MAX_IRQ 23
@ -34,74 +37,75 @@
*/ */
#define EP93XX_GPIO_F_IRQ_BASE 80 #define EP93XX_GPIO_F_IRQ_BASE 80
struct ep93xx_gpio_irq_chip {
struct irq_chip ic;
u8 irq_offset;
u8 int_unmasked;
u8 int_enabled;
u8 int_type1;
u8 int_type2;
u8 int_debounce;
};
struct ep93xx_gpio_chip {
struct gpio_chip gc;
struct ep93xx_gpio_irq_chip *eic;
};
struct ep93xx_gpio { struct ep93xx_gpio {
void __iomem *base; void __iomem *base;
struct gpio_chip gc[8]; struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
}; };
#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
return egc->eic;
}
/************************************************************************* /*************************************************************************
* Interrupt handling for EP93xx on-chip GPIOs * Interrupt handling for EP93xx on-chip GPIOs
*************************************************************************/ *************************************************************************/
static unsigned char gpio_int_unmasked[3]; #define EP93XX_INT_TYPE1_OFFSET 0x00
static unsigned char gpio_int_enabled[3]; #define EP93XX_INT_TYPE2_OFFSET 0x04
static unsigned char gpio_int_type1[3]; #define EP93XX_INT_EOI_OFFSET 0x08
static unsigned char gpio_int_type2[3]; #define EP93XX_INT_EN_OFFSET 0x0c
static unsigned char gpio_int_debounce[3]; #define EP93XX_INT_STATUS_OFFSET 0x10
#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
/* Port ordering is: A B F */ static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; struct ep93xx_gpio_irq_chip *eic)
static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
{ {
BUG_ON(port > 2); writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
writeb_relaxed(0, epg->base + int_en_register_offset[port]); writeb_relaxed(eic->int_type2,
epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
writeb_relaxed(gpio_int_type2[port], writeb_relaxed(eic->int_type1,
epg->base + int_type2_register_offset[port]); epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
writeb_relaxed(gpio_int_type1[port], writeb_relaxed(eic->int_unmasked & eic->int_enabled,
epg->base + int_type1_register_offset[port]); epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
epg->base + int_en_register_offset[port]);
}
static int ep93xx_gpio_port(struct gpio_chip *gc)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = 0;
while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
port++;
/* This should not happen but is there as a last safeguard */
if (port == ARRAY_SIZE(epg->gc)) {
pr_crit("can't find the GPIO port\n");
return 0;
}
return port;
} }
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc, static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable) unsigned int offset, bool enable)
{ {
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc); struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset); int port_mask = BIT(offset);
if (enable) if (enable)
gpio_int_debounce[port] |= port_mask; eic->int_debounce |= port_mask;
else else
gpio_int_debounce[port] &= ~port_mask; eic->int_debounce &= ~port_mask;
writeb(gpio_int_debounce[port], writeb(eic->int_debounce,
epg->base + int_debounce_register_offset[port]); epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
} }
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
*/ */
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS); stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
for_each_set_bit(offset, &stat, 8) for_each_set_bit(offset, &stat, 8)
generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain, generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
offset)); offset));
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS); stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
for_each_set_bit(offset, &stat, 8) for_each_set_bit(offset, &stat, 8)
generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain, generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
offset)); offset));
chained_irq_exit(irqchip, desc); chained_irq_exit(irqchip, desc);
@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
static void ep93xx_gpio_irq_ack(struct irq_data *d) static void ep93xx_gpio_irq_ack(struct irq_data *d)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7); int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
gpio_int_type2[port] ^= port_mask; /* switch edge direction */ eic->int_type2 ^= port_mask; /* switch edge direction */
ep93xx_gpio_update_int_params(epg, port); ep93xx_gpio_update_int_params(epg, eic);
} }
writeb(port_mask, epg->base + eoi_register_offset[port]); writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
} }
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7); int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
gpio_int_type2[port] ^= port_mask; /* switch edge direction */ eic->int_type2 ^= port_mask; /* switch edge direction */
gpio_int_unmasked[port] &= ~port_mask; eic->int_unmasked &= ~port_mask;
ep93xx_gpio_update_int_params(epg, port); ep93xx_gpio_update_int_params(epg, eic);
writeb(port_mask, epg->base + eoi_register_offset[port]); writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
} }
static void ep93xx_gpio_irq_mask(struct irq_data *d) static void ep93xx_gpio_irq_mask(struct irq_data *d)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
gpio_int_unmasked[port] &= ~BIT(d->irq & 7); eic->int_unmasked &= ~BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, port); ep93xx_gpio_update_int_params(epg, eic);
} }
static void ep93xx_gpio_irq_unmask(struct irq_data *d) static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
gpio_int_unmasked[port] |= BIT(d->irq & 7); eic->int_unmasked |= BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, port); ep93xx_gpio_update_int_params(epg, eic);
} }
/* /*
@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int offset = d->irq & 7; int offset = d->irq & 7;
int port_mask = BIT(offset); int port_mask = BIT(offset);
irq_flow_handler_t handler; irq_flow_handler_t handler;
@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
switch (type) { switch (type) {
case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_RISING:
gpio_int_type1[port] |= port_mask; eic->int_type1 |= port_mask;
gpio_int_type2[port] |= port_mask; eic->int_type2 |= port_mask;
handler = handle_edge_irq; handler = handle_edge_irq;
break; break;
case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_FALLING:
gpio_int_type1[port] |= port_mask; eic->int_type1 |= port_mask;
gpio_int_type2[port] &= ~port_mask; eic->int_type2 &= ~port_mask;
handler = handle_edge_irq; handler = handle_edge_irq;
break; break;
case IRQ_TYPE_LEVEL_HIGH: case IRQ_TYPE_LEVEL_HIGH:
gpio_int_type1[port] &= ~port_mask; eic->int_type1 &= ~port_mask;
gpio_int_type2[port] |= port_mask; eic->int_type2 |= port_mask;
handler = handle_level_irq; handler = handle_level_irq;
break; break;
case IRQ_TYPE_LEVEL_LOW: case IRQ_TYPE_LEVEL_LOW:
gpio_int_type1[port] &= ~port_mask; eic->int_type1 &= ~port_mask;
gpio_int_type2[port] &= ~port_mask; eic->int_type2 &= ~port_mask;
handler = handle_level_irq; handler = handle_level_irq;
break; break;
case IRQ_TYPE_EDGE_BOTH: case IRQ_TYPE_EDGE_BOTH:
gpio_int_type1[port] |= port_mask; eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */ /* set initial polarity based on current input level */
if (gc->get(gc, offset)) if (gc->get(gc, offset))
gpio_int_type2[port] &= ~port_mask; /* falling */ eic->int_type2 &= ~port_mask; /* falling */
else else
gpio_int_type2[port] |= port_mask; /* rising */ eic->int_type2 |= port_mask; /* rising */
handler = handle_edge_irq; handler = handle_edge_irq;
break; break;
default: default:
@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handler); irq_set_handler_locked(d, handler);
gpio_int_enabled[port] |= port_mask; eic->int_enabled |= port_mask;
ep93xx_gpio_update_int_params(epg, port); ep93xx_gpio_update_int_params(epg, eic);
return 0; return 0;
} }
static struct irq_chip ep93xx_gpio_irq_chip = {
.name = "GPIO",
.irq_ack = ep93xx_gpio_irq_ack,
.irq_mask_ack = ep93xx_gpio_irq_mask_ack,
.irq_mask = ep93xx_gpio_irq_mask,
.irq_unmask = ep93xx_gpio_irq_unmask,
.irq_set_type = ep93xx_gpio_irq_type,
};
/************************************************************************* /*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs * gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/ *************************************************************************/
@ -276,17 +271,19 @@ struct ep93xx_gpio_bank {
const char *label; const char *label;
int data; int data;
int dir; int dir;
int irq;
int base; int base;
bool has_irq; bool has_irq;
bool has_hierarchical_irq; bool has_hierarchical_irq;
unsigned int irq_base; unsigned int irq_base;
}; };
#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \ #define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
{ \ { \
.label = _label, \ .label = _label, \
.data = _data, \ .data = _data, \
.dir = _dir, \ .dir = _dir, \
.irq = _irq, \
.base = _base, \ .base = _base, \
.has_irq = _has_irq, \ .has_irq = _has_irq, \
.has_hierarchical_irq = _has_hier, \ .has_hierarchical_irq = _has_hier, \
@ -295,16 +292,16 @@ struct ep93xx_gpio_bank {
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
/* Bank A has 8 IRQs */ /* Bank A has 8 IRQs */
EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64), EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64),
/* Bank B has 8 IRQs */ /* Bank B has 8 IRQs */
EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72), EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72),
EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0), EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0), EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0), EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
/* Bank F has 8 IRQs */ /* Bank F has 8 IRQs */
EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0), EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0),
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0), EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0), EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
}; };
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset, static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
return EP93XX_GPIO_F_IRQ_BASE + offset; return EP93XX_GPIO_F_IRQ_BASE + offset;
} }
static int ep93xx_gpio_add_bank(struct gpio_chip *gc, static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
{
ic->irq_ack = ep93xx_gpio_irq_ack;
ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
ic->irq_mask = ep93xx_gpio_irq_mask;
ic->irq_unmask = ep93xx_gpio_irq_unmask;
ic->irq_set_type = ep93xx_gpio_irq_type;
}
static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev, struct platform_device *pdev,
struct ep93xx_gpio *epg, struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank) struct ep93xx_gpio_bank *bank)
{ {
void __iomem *data = epg->base + bank->data; void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir; void __iomem *dir = epg->base + bank->dir;
struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq; struct gpio_irq_chip *girq;
int err; int err;
@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq = &gc->irq; girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) { if (bank->has_irq || bank->has_hierarchical_irq) {
struct irq_chip *ic;
gc->set_config = ep93xx_gpio_set_config; gc->set_config = ep93xx_gpio_set_config;
girq->chip = &ep93xx_gpio_irq_chip; egc->eic = devm_kcalloc(dev, 1,
sizeof(*egc->eic),
GFP_KERNEL);
if (!egc->eic)
return -ENOMEM;
egc->eic->irq_offset = bank->irq;
ic = &egc->eic->ic;
ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
if (!ic->name)
return -ENOMEM;
ep93xx_init_irq_chip(dev, ic);
girq->chip = ic;
} }
if (bank->has_irq) { if (bank->has_irq) {
@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i; gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
irq_set_chip_data(gpio_irq, &epg->gc[5]); irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq, irq_set_chip_and_handler(gpio_irq,
&ep93xx_gpio_irq_chip, girq->chip,
handle_level_irq); handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST); irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
} }
@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(epg->base); return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct gpio_chip *gc = &epg->gc[i]; struct ep93xx_gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank)) if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))

View File

@ -1093,8 +1093,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none; link->type = dc_connection_none;
prev_sink = link->local_sink; prev_sink = link->local_sink;
if (prev_sink != NULL) if (prev_sink)
dc_sink_retain(prev_sink); dc_sink_release(prev_sink);
switch (link->connector_signal) { switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: { case SIGNAL_TYPE_HDMI_TYPE_A: {
@ -1417,8 +1417,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* TODO: check if we still need the S3 mode update workaround. * TODO: check if we still need the S3 mode update workaround.
* If yes, put it here. * If yes, put it here.
*/ */
if (aconnector->dc_sink) if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL); amdgpu_dm_update_freesync_caps(connector, NULL);
dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink; aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink); dc_sink_retain(aconnector->dc_sink);
@ -6463,14 +6465,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state); ret = PTR_ERR_OR_ZERO(conn_state);
if (ret) if (ret)
goto err; goto out;
/* Attach crtc to drm_atomic_state*/ /* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state); ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret) if (ret)
goto err; goto out;
/* force a restore */ /* force a restore */
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
@ -6480,17 +6482,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state); ret = PTR_ERR_OR_ZERO(plane_state);
if (ret) if (ret)
goto err; goto out;
/* Call commit internally with the state we just constructed */ /* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state); ret = drm_atomic_commit(state);
if (!ret)
return 0;
err: out:
DRM_ERROR("Restoring old state failed with %i\n", ret);
drm_atomic_state_put(state); drm_atomic_state_put(state);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret; return ret;
} }

View File

@ -826,6 +826,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2, DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_PLL3,
DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21 DCN20_CLK_SRC_TOTAL_DCN21
}; };
@ -1498,6 +1500,14 @@ static bool construct(
dcn21_clock_source_create(ctx, ctx->dc_bios, dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2, CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false); &clk_src_regs[2], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;

View File

@ -665,6 +665,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) | SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync)); SUN4I_TCON1_BASIC5_H_SYNC(hsync));
/* Setup the polarity of multiple signals */
if (tcon->quirks->polarity_in_ch0) {
val = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
} else {
/* according to vendor driver, this bit must be always set */
val = SUN4I_TCON1_IO_POL_UNKNOWN;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
}
/* Map output pins to channel 1 */ /* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK, SUN4I_TCON_GCTL_IOMAP_MASK,
@ -1482,6 +1506,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true, .has_channel_1 = true,
.polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux, .set_mux = sun8i_r40_tcon_tv_set_mux,
}; };

View File

@ -153,6 +153,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff) #define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0 #define SUN4I_TCON1_IO_POL_REG 0xf0
/* there is no documentation about this bit */
#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
#define SUN4I_TCON1_IO_TRI_REG 0xf4 #define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8 #define SUN4I_TCON_ECC_FIFO_REG 0xf8
@ -224,6 +229,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */ bool supports_lvds; /* Does the TCON support an LVDS output? */
bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */ /* callback to handle tcon muxing options */

View File

@ -49,11 +49,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
{ {
/* /*
* Controller support maximum of 594 MHz, which correlates to * Controller support maximum of 594 MHz, which correlates to
* 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than * 4K@60Hz 4:4:4 or RGB.
* 340 MHz scrambling has to be enabled. Because scrambling is
* not yet implemented, just limit to 340 MHz for now.
*/ */
if (mode->clock > 340000) if (mode->clock > 594000)
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
return MODE_OK; return MODE_OK;

View File

@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = { static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */ /* pixelclk bpp8 bpp10 bpp12 */
{ 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, }, { 27000000, { 0x0012, 0x0000, 0x0000 }, },
{ 59400000, { 0x0008, 0x0008, 0x0008 }, }, { 74250000, { 0x0013, 0x001a, 0x001b }, },
{ 72000000, { 0x0008, 0x0008, 0x001b }, }, { 148500000, { 0x0019, 0x0033, 0x0034 }, },
{ 74250000, { 0x0013, 0x0013, 0x0013 }, }, { 297000000, { 0x0019, 0x001b, 0x001b }, },
{ 90000000, { 0x0008, 0x001a, 0x001b }, }, { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ 118800000, { 0x001b, 0x001a, 0x001b }, },
{ 144000000, { 0x001b, 0x001a, 0x0034 }, },
{ 180000000, { 0x001b, 0x0033, 0x0034 }, },
{ 216000000, { 0x0036, 0x0033, 0x0034 }, },
{ 237600000, { 0x0036, 0x0033, 0x001b }, },
{ 288000000, { 0x0036, 0x001b, 0x001b }, },
{ 297000000, { 0x0019, 0x001b, 0x0019 }, },
{ 330000000, { 0x0036, 0x001b, 0x001b }, },
{ 594000000, { 0x003f, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, } { ~0UL, { 0x0000, 0x0000, 0x0000 }, }
}; };
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = { static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/ /*pixelclk symbol term vlev*/
{ 74250000, 0x8009, 0x0004, 0x0232}, { 27000000, 0x8009, 0x0007, 0x02b0 },
{ 148500000, 0x8029, 0x0004, 0x0273}, { 74250000, 0x8009, 0x0006, 0x022d },
{ 594000000, 0x8039, 0x0004, 0x014a}, { 148500000, 0x8029, 0x0006, 0x0270 },
{ 297000000, 0x8039, 0x0005, 0x01ab },
{ 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000} { ~0UL, 0x0000, 0x0000, 0x0000}
}; };

View File

@ -205,7 +205,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base); __drm_atomic_helper_plane_reset(plane, &vc4_state->base);
} }
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{ {
if (vc4_state->dlist_count == vc4_state->dlist_size) { if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2); u32 new_size = max(4u, vc4_state->dlist_count * 2);
@ -220,7 +220,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size; vc4_state->dlist_size = new_size;
} }
vc4_state->dlist[vc4_state->dlist_count++] = val; vc4_state->dlist_count++;
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
{
unsigned int idx = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
vc4_state->dlist[idx] = val;
} }
/* Returns the scl0/scl1 field based on whether the dimensions need to /* Returns the scl0/scl1 field based on whether the dimensions need to
@ -871,8 +879,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm(). * be set when calling vc4_plane_allocate_lbm().
*/ */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
vc4_state->lbm_offset = vc4_state->dlist_count++; vc4_state->lbm_offset = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
}
if (num_planes > 1) { if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel /* Emit Cb/Cr as channel 0 and Y as channel

View File

@ -53,6 +53,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15) #define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14) #define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12) #define STM32F7_I2C_CR1_ANFOFF BIT(12)
#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7) #define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6) #define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5) #define STM32F7_I2C_CR1_STOPIE BIT(5)
@ -151,7 +153,7 @@
#define STM32F7_I2C_MAX_SLAVE 0x2 #define STM32F7_I2C_MAX_SLAVE 0x2
#define STM32F7_I2C_DNF_DEFAULT 0 #define STM32F7_I2C_DNF_DEFAULT 0
#define STM32F7_I2C_DNF_MAX 16 #define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 #define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@ -657,6 +659,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF); STM32F7_I2C_CR1_ANFOFF);
/* Program the Digital Filter */
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF_MASK);
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE); STM32F7_I2C_CR1_PE);
} }

View File

@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS := OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \ OBJCOPYFLAGS_rodata_objcopy.o := \
--rename-section .text=.rodata,alloc,readonly,load --rename-section .noinstr.text=.rodata,alloc,readonly,load
targets += rodata.o rodata_objcopy.o targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)

View File

@ -5,7 +5,7 @@
*/ */
#include "lkdtm.h" #include "lkdtm.h"
void notrace lkdtm_rodata_do_nothing(void) void noinstr lkdtm_rodata_do_nothing(void)
{ {
/* Does nothing. We just want an architecture agnostic "return". */ /* Does nothing. We just want an architecture agnostic "return". */
} }

View File

@ -190,6 +190,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0) #define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ #define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40 #define ENETC_RSSHASH_KEY_SIZE 40
#define ENETC_PRSSCAPR 0x1404
#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ #define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700 #define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0) #define ENETC_PSIVLANFMR_VS BIT(0)

View File

@ -947,6 +947,51 @@ static int enetc_configure_serdes(struct enetc_ndev_priv *priv)
return 0; return 0;
} }
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
static int enetc_init_port_rfs_memory(struct enetc_si *si)
{
struct enetc_cmd_rfse rfse = {0};
struct enetc_hw *hw = &si->hw;
int num_rfs, i, err = 0;
u32 val;
val = enetc_port_rd(hw, ENETC_PRFSCAPR);
num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
for (i = 0; i < num_rfs; i++) {
err = enetc_set_fs_entry(si, &rfse, i);
if (err)
break;
}
return err;
}
static int enetc_init_port_rss_memory(struct enetc_si *si)
{
struct enetc_hw *hw = &si->hw;
int num_rss, err;
int *rss_table;
u32 val;
val = enetc_port_rd(hw, ENETC_PRSSCAPR);
num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
if (!num_rss)
return 0;
rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
if (!rss_table)
return -ENOMEM;
err = enetc_set_rss_table(si, rss_table, num_rss);
kfree(rss_table);
return err;
}
static int enetc_pf_probe(struct pci_dev *pdev, static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
@ -1007,6 +1052,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res; goto err_alloc_si_res;
} }
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
goto err_init_port_rfs;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
goto err_init_port_rss;
}
err = enetc_alloc_msix(priv); err = enetc_alloc_msix(priv);
if (err) { if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n"); dev_err(&pdev->dev, "MSIX alloc failed\n");
@ -1032,6 +1089,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
err_reg_netdev: err_reg_netdev:
enetc_free_msix(priv); enetc_free_msix(priv);
err_init_port_rss:
err_init_port_rfs:
err_alloc_msix: err_alloc_msix:
enetc_free_si_resources(priv); enetc_free_si_resources(priv);
err_alloc_si_res: err_alloc_si_res:

View File

@ -8563,12 +8563,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{ {
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int reset_try_times = 0; int reset_try_times = 0;
int reset_status; int reset_status;
u16 queue_gid; u16 queue_gid;
int ret; int ret;
if (queue_id >= handle->kinfo.num_tqps) {
dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
queue_id);
return;
}
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);

View File

@ -4595,7 +4595,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done); complete(&adapter->init_done);
adapter->init_done_rc = -EIO; adapter->init_done_rc = -EIO;
} }
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
if (rc && rc != -EBUSY) {
/* We were unable to schedule the failover
* reset either because the adapter was still
* probing (eg: during kexec) or we could not
* allocate memory. Clear the failover_pending
* flag since no one else will. We ignore
* EBUSY because it means either FAILOVER reset
* is already scheduled or the adapter is
* being removed.
*/
netdev_err(netdev,
"Error %ld scheduling failover reset\n",
rc);
adapter->failover_pending = false;
}
break; break;
case IBMVNIC_CRQ_INIT_COMPLETE: case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n"); dev_info(dev, "Partner initialization complete\n");

View File

@ -330,7 +330,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) { } else if (!qopt->enable) {
return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
MTL_QUEUE_DCB);
if (ret)
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
} }
/* Port Transmit Rate and Speed Divider */ /* Port Transmit Rate and Speed Divider */

View File

@ -452,15 +452,17 @@ static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more) int len, bool more)
{ {
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head; struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
offset += q->buf_offset; struct page *page = virt_to_head_page(data);
int offset = data - page_address(page) + q->buf_offset;
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size); q->buf_size);
} else {
skb_free_frag(data);
} }
if (more) if (more)

View File

@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
RING_IDX prod, cons; RING_IDX prod, cons;
struct sk_buff *skb; struct sk_buff *skb;
int needed; int needed;
unsigned long flags;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue); skb = skb_peek(&queue->rx_queue);
if (!skb) if (!skb) {
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false; return false;
}
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb)) if (skb_is_gso(skb))
@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if (skb->sw_hash) if (skb->sw_hash)
needed++; needed++;
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
do { do {
prod = queue->rx.sring->req_prod; prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons; cons = queue->rx.req_cons;

View File

@ -3147,6 +3147,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, }, .driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */

View File

@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C"); MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
static int enable_tablet_mode_sw = -1;
module_param(enable_tablet_mode_sw, int, 0444);
MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@ -654,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
} }
/* Tablet mode */ /* Tablet mode */
val = hp_wmi_hw_state(HPWMI_TABLET_MASK); if (enable_tablet_mode_sw > 0) {
if (!(val < 0)) { val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); if (val >= 0) {
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
}
} }
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);

View File

@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/ */
#include <linux/delay.h>
#include <linux/time64.h>
#include <linux/ulpi/regs.h> #include <linux/ulpi/regs.h>
#include "core.h" #include "core.h"
@ -17,12 +19,22 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
static int dwc3_ulpi_busyloop(struct dwc3 *dwc) #define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{ {
unsigned count = 1000; unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
unsigned int count = 1000;
u32 reg; u32 reg;
if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
ns += DWC3_ULPI_BASE_DELAY;
if (read)
ns += DWC3_ULPI_BASE_DELAY;
while (count--) { while (count--) {
ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
if (reg & DWC3_GUSB2PHYACC_DONE) if (reg & DWC3_GUSB2PHYACC_DONE)
return 0; return 0;
@ -47,7 +59,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
ret = dwc3_ulpi_busyloop(dwc); ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret) if (ret)
return ret; return ret;
@ -71,7 +83,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
reg |= DWC3_GUSB2PHYACC_WRITE | val; reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
return dwc3_ulpi_busyloop(dwc); return dwc3_ulpi_busyloop(dwc, addr, false);
} }
static const struct ulpi_ops dwc3_ulpi_ops = { static const struct ulpi_ops dwc3_ulpi_ops = {

View File

@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type, const char *type,
const char *nodename); const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus); int xenbus_probe_devices(struct xen_bus_type *bus);
void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);

View File

@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
void xenbus_probe(void) static void xenbus_probe(void)
{ {
xenstored_ready = 1; xenstored_ready = 1;

View File

@ -76,6 +76,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
if (ovl_is_private_xattr(name)) if (ovl_is_private_xattr(name))
continue; continue;
error = security_inode_copy_up_xattr(name);
if (error < 0 && error != -EOPNOTSUPP)
break;
if (error == 1) {
error = 0;
continue; /* Discard */
}
retry: retry:
size = vfs_getxattr(old, name, value, value_size); size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE) if (size == -ERANGE)
@ -99,13 +107,6 @@ retry:
goto retry; goto retry;
} }
error = security_inode_copy_up_xattr(name);
if (error < 0 && error != -EOPNOTSUPP)
break;
if (error == 1) {
error = 0;
continue; /* Discard */
}
error = vfs_setxattr(new, name, value, size, 0); error = vfs_setxattr(new, name, value, size, 0);
if (error) if (error)
break; break;

View File

@ -337,7 +337,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
goto out; goto out;
if (!value && !upperdentry) { if (!value && !upperdentry) {
old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0); err = vfs_getxattr(realdentry, name, NULL, 0);
revert_creds(old_cred);
if (err < 0) if (err < 0)
goto out_drop_write; goto out_drop_write;
} }

View File

@ -79,7 +79,7 @@ static void ovl_dentry_release(struct dentry *dentry)
static struct dentry *ovl_d_real(struct dentry *dentry, static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode) const struct inode *inode)
{ {
struct dentry *real; struct dentry *real = NULL, *lower;
/* It's an overlay file */ /* It's an overlay file */
if (inode && d_inode(dentry) == inode) if (inode && d_inode(dentry) == inode)
@ -98,9 +98,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (real && !inode && ovl_has_upperdata(d_inode(dentry))) if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
return real; return real;
real = ovl_dentry_lowerdata(dentry); lower = ovl_dentry_lowerdata(dentry);
if (!real) if (!lower)
goto bug; goto bug;
real = lower;
/* Handle recursion */ /* Handle recursion */
real = d_real(real, inode); real = d_real(real, inode);
@ -108,8 +109,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (!inode || inode == d_inode(real)) if (!inode || inode == d_inode(real))
return real; return real;
bug: bug:
WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry, WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0); __func__, dentry, inode ? inode->i_sb->s_id : "NULL",
inode ? inode->i_ino : 0, real,
real && d_inode(real) ? d_inode(real)->i_ino : 0);
return dentry; return dentry;
} }

View File

@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end[];
/* Start and end of .opd section - used for function descriptors. */ /* Start and end of .opd section - used for function descriptors. */
extern char __start_opd[], __end_opd[]; extern char __start_opd[], __end_opd[];
/* Start and end of instrumentation protected text section */
extern char __noinstr_text_start[], __noinstr_text_end[];
extern __visible const void __nosave_begin, __nosave_end; extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */ /* Function descriptor handling (if any). Override in asm/sections.h */

View File

@ -396,7 +396,7 @@
} \ } \
\ \
/* Built-in firmware blobs */ \ /* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \ __start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \ KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \ __end_builtin_fw = .; \
@ -510,6 +510,15 @@
#define RODATA RO_DATA_SECTION(4096) #define RODATA RO_DATA_SECTION(4096)
#define RO_DATA(align) RO_DATA_SECTION(align) #define RO_DATA(align) RO_DATA_SECTION(align)
/*
* Non-instrumentable text section
*/
#define NOINSTR_TEXT \
ALIGN_FUNCTION(); \
__noinstr_text_start = .; \
*(.noinstr.text) \
__noinstr_text_end = .;
/* /*
* .text section. Map to function alignment to avoid address changes * .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map * during second ld run in second ld pass when generating System.map
@ -524,6 +533,7 @@
*(TEXT_MAIN .text.fixup) \ *(TEXT_MAIN .text.fixup) \
*(.text.unlikely .text.unlikely.*) \ *(.text.unlikely .text.unlikely.*) \
*(.text.unknown .text.unknown.*) \ *(.text.unknown .text.unknown.*) \
NOINSTR_TEXT \
*(.text..refcount) \ *(.text..refcount) \
*(.ref.text) \ *(.ref.text) \
MEM_KEEP(init.text*) \ MEM_KEEP(init.text*) \

View File

@ -134,12 +134,65 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Annotate a C jump table to allow objtool to follow the code flow */ /* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(.rodata..c_jump_table) #define __annotate_jump_table __section(.rodata..c_jump_table)
#ifdef CONFIG_DEBUG_ENTRY
/* Begin/end of an instrumentation safe region */
#define instrumentation_begin() ({ \
asm volatile("%c0:\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
* _begin() a +1 and _end() a -1 and computes a sum over the instructions.
* When the value is greater than 0, we consider instrumentation allowed.
*
* There is a problem with code like:
*
* noinstr void foo()
* {
* instrumentation_begin();
* ...
* if (cond) {
* instrumentation_begin();
* ...
* instrumentation_end();
* }
* bar();
* instrumentation_end();
* }
*
* If instrumentation_end() would be an empty label, like all the other
* annotations, the inner _end(), which is at the end of a conditional block,
* would land on the instruction after the block.
*
* If we then consider the sum of the !cond path, we'll see that the call to
* bar() is with a 0-value, even though, we meant it to happen with a positive
* value.
*
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
#define instrumentation_end() ({ \
asm volatile("%c0: nop\n\t" \
".pushsection .discard.instr_end\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
#endif /* CONFIG_DEBUG_ENTRY */
#else #else
#define annotate_reachable() #define annotate_reachable()
#define annotate_unreachable() #define annotate_unreachable()
#define __annotate_jump_table #define __annotate_jump_table
#endif #endif
#ifndef instrumentation_begin
#define instrumentation_begin() do { } while(0)
#define instrumentation_end() do { } while(0)
#endif
#ifndef ASM_UNREACHABLE #ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE # define ASM_UNREACHABLE
#endif #endif

View File

@ -118,6 +118,10 @@ struct ftrace_likely_data {
#define notrace __attribute__((__no_instrument_function__)) #define notrace __attribute__((__no_instrument_function__))
#endif #endif
/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text")))
/* /*
* it doesn't make sense on ARM (currently the only user of __naked) * it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without * to trace naked functions because then mcount is called without

View File

@ -4044,6 +4044,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable(); local_bh_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) { for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i); struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@ -4051,6 +4052,7 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq); netif_tx_stop_queue(txq);
__netif_tx_unlock(txq); __netif_tx_unlock(txq);
} }
spin_unlock(&dev->tx_global_lock);
local_bh_enable(); local_bh_enable();
} }

View File

@ -261,7 +261,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{ {
i->count = count; i->count = count;
} }
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
struct csum_state {
__wsum csum;
size_t off;
};
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,

View File

@ -187,8 +187,6 @@ void xs_suspend_cancel(void);
struct work_struct; struct work_struct;
void xenbus_probe(void);
#define XENBUS_IS_ERR_READ(str) ({ \ #define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \ if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \ kfree(str); \

View File

@ -112,6 +112,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */ /* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries); n_buckets = roundup_pow_of_two(attr->max_entries);
if (!n_buckets)
return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));

View File

@ -3627,6 +3627,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
{ {
struct psi_trigger *new; struct psi_trigger *new;
struct cgroup *cgrp; struct cgroup *cgrp;
struct psi_group *psi;
cgrp = cgroup_kn_lock_live(of->kn, false); cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp) if (!cgrp)
@ -3635,7 +3636,8 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
cgroup_get(cgrp); cgroup_get(cgrp);
cgroup_kn_unlock(of->kn); cgroup_kn_unlock(of->kn);
new = psi_trigger_create(&cgrp->psi, buf, nbytes, res); psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
new = psi_trigger_create(psi, buf, nbytes, res);
if (IS_ERR(new)) { if (IS_ERR(new)) {
cgroup_put(cgrp); cgroup_put(cgrp);
return PTR_ERR(new); return PTR_ERR(new);

View File

@ -2498,7 +2498,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) { (entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */ /* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt); val = this_cpu_inc_return(trace_buffered_event_cnt);
if (val == 1) { if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
trace_event_setup(entry, type, flags, pc); trace_event_setup(entry, type, flags, pc);
entry->array[0] = len; entry->array[0] = len;
return entry; return entry;

View File

@ -1107,7 +1107,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) { list_for_each_entry(file, &tr->events, list) {
call = file->event_call; call = file->event_call;
if (!trace_event_name(call) || !call->class || !call->class->reg) if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
!trace_event_name(call) || !call->class || !call->class->reg)
continue; continue;
if (system && strcmp(call->class->system, system->name) != 0) if (system && strcmp(call->class->system, system->name) != 0)

View File

@ -570,12 +570,13 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
} }
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i) struct csum_state *csstate,
struct iov_iter *i)
{ {
struct pipe_inode_info *pipe = i->pipe; struct pipe_inode_info *pipe = i->pipe;
__wsum sum = csstate->csum;
size_t off = csstate->off;
size_t n, r; size_t n, r;
size_t off = 0;
__wsum sum = *csum;
int idx; int idx;
if (!sanity(i)) if (!sanity(i))
@ -596,7 +597,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
addr += chunk; addr += chunk;
} }
i->count -= bytes; i->count -= bytes;
*csum = sum; csstate->csum = sum;
csstate->off = off;
return bytes; return bytes;
} }
@ -1484,18 +1486,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
} }
EXPORT_SYMBOL(csum_and_copy_from_iter_full); EXPORT_SYMBOL(csum_and_copy_from_iter_full);
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i) struct iov_iter *i)
{ {
struct csum_state *csstate = _csstate;
const char *from = addr; const char *from = addr;
__wsum *csum = csump;
__wsum sum, next; __wsum sum, next;
size_t off = 0; size_t off;
if (unlikely(iov_iter_is_pipe(i))) if (unlikely(iov_iter_is_pipe(i)))
return csum_and_copy_to_pipe_iter(addr, bytes, csum, i); return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
sum = *csum; sum = csstate->csum;
off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) { if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */ WARN_ON(1); /* for now */
return 0; return 0;
@ -1524,7 +1527,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
off += v.iov_len; off += v.iov_len;
}) })
) )
*csum = sum; csstate->csum = sum;
csstate->off = off;
return bytes; return bytes;
} }
EXPORT_SYMBOL(csum_and_copy_to_iter); EXPORT_SYMBOL(csum_and_copy_to_iter);

View File

@ -700,8 +700,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len, struct iov_iter *to, int len,
__wsum *csump) __wsum *csump)
{ {
return __skb_datagram_iter(skb, offset, to, len, true, struct csum_state csdata = { .csum = *csump };
csum_and_copy_to_iter, csump); int ret;
ret = __skb_datagram_iter(skb, offset, to, len, true,
csum_and_copy_to_iter, &csdata);
if (ret)
return ret;
*csump = csdata.csum;
return 0;
} }
/** /**

View File

@ -5275,10 +5275,11 @@ static void gro_normal_list(struct napi_struct *napi)
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack. * pass the whole batch up to the stack.
*/ */
static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
{ {
list_add_tail(&skb->list, &napi->rx_list); list_add_tail(&skb->list, &napi->rx_list);
if (++napi->rx_count >= gro_normal_batch) napi->rx_count += segs;
if (napi->rx_count >= gro_normal_batch)
gro_normal_list(napi); gro_normal_list(napi);
} }
@ -5317,7 +5318,7 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
} }
out: out:
gro_normal_one(napi, skb); gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
} }
@ -5608,7 +5609,7 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
{ {
switch (ret) { switch (ret) {
case GRO_NORMAL: case GRO_NORMAL:
gro_normal_one(napi, skb); gro_normal_one(napi, skb, 1);
break; break;
case GRO_DROP: case GRO_DROP:
@ -5696,7 +5697,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
__skb_push(skb, ETH_HLEN); __skb_push(skb, ETH_HLEN);
skb->protocol = eth_type_trans(skb, skb->dev); skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_NORMAL) if (ret == GRO_NORMAL)
gro_normal_one(napi, skb); gro_normal_one(napi, skb, 1);
break; break;
case GRO_DROP: case GRO_DROP:

View File

@ -403,18 +403,21 @@ static int dsa_switch_setup(struct dsa_switch *ds)
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev); ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
if (!ds->slave_mii_bus) { if (!ds->slave_mii_bus) {
err = -ENOMEM; err = -ENOMEM;
goto unregister_notifier; goto teardown;
} }
dsa_slave_mii_bus_init(ds); dsa_slave_mii_bus_init(ds);
err = mdiobus_register(ds->slave_mii_bus); err = mdiobus_register(ds->slave_mii_bus);
if (err < 0) if (err < 0)
goto unregister_notifier; goto teardown;
} }
return 0; return 0;
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
unregister_notifier: unregister_notifier:
dsa_switch_unregister_notifier(ds); dsa_switch_unregister_notifier(ds);
unregister_devlink: unregister_devlink:

View File

@ -1091,7 +1091,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
* Let nf_ct_resolve_clash() deal with this later. * Let nf_ct_resolve_clash() deal with this later.
*/ */
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue; continue;
NF_CT_STAT_INC_ATOMIC(net, found); NF_CT_STAT_INC_ATOMIC(net, found);

View File

@ -354,7 +354,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
return -1; return -1;
tcph = (void *)(skb_network_header(skb) + thoff); tcph = (void *)(skb_network_header(skb) + thoff);
inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true); inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
return 0; return 0;
} }
@ -371,7 +371,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
udph = (void *)(skb_network_header(skb) + thoff); udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port, inet_proto_csum_replace2(&udph->check, skb, port,
new_port, true); new_port, false);
if (!udph->check) if (!udph->check)
udph->check = CSUM_MANGLED_0; udph->check = CSUM_MANGLED_0;
} }

View File

@ -7696,6 +7696,17 @@ int __nft_release_basechain(struct nft_ctx *ctx)
} }
EXPORT_SYMBOL_GPL(__nft_release_basechain); EXPORT_SYMBOL_GPL(__nft_release_basechain);
static void __nft_release_hooks(struct net *net)
{
struct nft_table *table;
struct nft_chain *chain;
list_for_each_entry(table, &net->nft.tables, list) {
list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain);
}
}
static void __nft_release_tables(struct net *net) static void __nft_release_tables(struct net *net)
{ {
struct nft_flowtable *flowtable, *nf; struct nft_flowtable *flowtable, *nf;
@ -7711,10 +7722,6 @@ static void __nft_release_tables(struct net *net)
list_for_each_entry_safe(table, nt, &net->nft.tables, list) { list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
ctx.family = table->family; ctx.family = table->family;
list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain);
/* No packets are walking on these chains anymore. */
ctx.table = table; ctx.table = table;
list_for_each_entry(chain, &table->chains, list) { list_for_each_entry(chain, &table->chains, list) {
ctx.chain = chain; ctx.chain = chain;
@ -7762,6 +7769,11 @@ static int __net_init nf_tables_init_net(struct net *net)
return 0; return 0;
} }
static void __net_exit nf_tables_pre_exit_net(struct net *net)
{
__nft_release_hooks(net);
}
static void __net_exit nf_tables_exit_net(struct net *net) static void __net_exit nf_tables_exit_net(struct net *net)
{ {
mutex_lock(&net->nft.commit_mutex); mutex_lock(&net->nft.commit_mutex);
@ -7774,8 +7786,9 @@ static void __net_exit nf_tables_exit_net(struct net *net)
} }
static struct pernet_operations nf_tables_net_ops = { static struct pernet_operations nf_tables_net_ops = {
.init = nf_tables_init_net, .init = nf_tables_init_net,
.exit = nf_tables_exit_net, .pre_exit = nf_tables_pre_exit_net,
.exit = nf_tables_exit_net,
}; };
static int __init nf_tables_module_init(void) static int __init nf_tables_module_init(void)

View File

@ -152,7 +152,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
/* /*
* Drop entries with timestamps older then 'time'. * Drop entries with timestamps older then 'time'.
*/ */
static void recent_entry_reap(struct recent_table *t, unsigned long time) static void recent_entry_reap(struct recent_table *t, unsigned long time,
struct recent_entry *working, bool update)
{ {
struct recent_entry *e; struct recent_entry *e;
@ -161,6 +162,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
*/ */
e = list_entry(t->lru_list.next, struct recent_entry, lru_list); e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
/*
* Do not reap the entry which are going to be updated.
*/
if (e == working && update)
return;
/* /*
* The last time stamp is the most recent. * The last time stamp is the most recent.
*/ */
@ -303,7 +310,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
/* info->seconds must be non-zero */ /* info->seconds must be non-zero */
if (info->check_set & XT_RECENT_REAP) if (info->check_set & XT_RECENT_REAP)
recent_entry_reap(t, time); recent_entry_reap(t, time, e,
info->check_set & XT_RECENT_UPDATE && ret);
} }
if (info->check_set & XT_RECENT_SET || if (info->check_set & XT_RECENT_SET ||

View File

@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret; ssize_t ret;
void *kbuf; void *kbuf;
if (!len)
return -EINVAL;
if (len > KMALLOC_MAX_SIZE)
return -ENOMEM;
kbuf = kzalloc(len, GFP_KERNEL); kbuf = kzalloc(len, GFP_KERNEL);
if (!kbuf) if (!kbuf)
return -ENOMEM; return -ENOMEM;

View File

@ -532,6 +532,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args,
if (args->nr_local == 0) if (args->nr_local == 0)
return -EINVAL; return -EINVAL;
if (args->nr_local > UIO_MAXIOV)
return -EMSGSIZE;
iov->iov = kcalloc(args->nr_local, iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec), sizeof(struct rds_iovec),
GFP_KERNEL); GFP_KERNEL);

View File

@ -507,8 +507,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
rxrpc_disconnect_call(call); rxrpc_disconnect_call(call);
if (call->security) if (call->security)
call->security->free_call_crypto(call); call->security->free_call_crypto(call);
rxrpc_cleanup_ring(call);
_leave(""); _leave("");
} }

View File

@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
{ {
struct sctp_ht_iter *iter = seq->private; struct sctp_ht_iter *iter = seq->private;
if (v && v != SEQ_START_TOKEN) {
struct sctp_transport *transport = v;
sctp_transport_put(transport);
}
sctp_transport_walk_stop(&iter->hti); sctp_transport_walk_stop(&iter->hti);
} }
@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{ {
struct sctp_ht_iter *iter = seq->private; struct sctp_ht_iter *iter = seq->private;
if (v && v != SEQ_START_TOKEN) {
struct sctp_transport *transport = v;
sctp_transport_put(transport);
}
++*pos; ++*pos;
return sctp_transport_get_next(seq_file_net(seq), &iter->hti); return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
sk->sk_rcvbuf); sk->sk_rcvbuf);
seq_printf(seq, "\n"); seq_printf(seq, "\n");
sctp_transport_put(transport);
return 0; return 0;
} }
@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n"); seq_printf(seq, "\n");
} }
sctp_transport_put(transport);
return 0; return 0;
} }

View File

@ -808,10 +808,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
*/ */
sk = sock->sk; sk = sock->sk;
lock_sock(sk);
if (sock->state == SS_UNCONNECTED) { if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN; err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM) if (sk->sk_type == SOCK_STREAM)
return err; goto out;
} else { } else {
sock->state = SS_DISCONNECTING; sock->state = SS_DISCONNECTING;
err = 0; err = 0;
@ -820,10 +822,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
/* Receive and send shutdowns are treated alike. */ /* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) { if (mode) {
lock_sock(sk);
sk->sk_shutdown |= mode; sk->sk_shutdown |= mode;
sk->sk_state_change(sk); sk->sk_state_change(sk);
release_sock(sk);
if (sk->sk_type == SOCK_STREAM) { if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE); sock_reset_flag(sk, SOCK_DONE);
@ -831,6 +831,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
} }
} }
out:
release_sock(sk);
return err; return err;
} }
@ -1099,7 +1101,6 @@ static void vsock_connect_timeout(struct work_struct *work)
{ {
struct sock *sk; struct sock *sk;
struct vsock_sock *vsk; struct vsock_sock *vsk;
int cancel = 0;
vsk = container_of(work, struct vsock_sock, connect_work.work); vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk); sk = sk_vsock(vsk);
@ -1110,11 +1111,9 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = TCP_CLOSE; sk->sk_state = TCP_CLOSE;
sk->sk_err = ETIMEDOUT; sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk); sk->sk_error_report(sk);
cancel = 1; vsock_transport_cancel_pkt(vsk);
} }
release_sock(sk); release_sock(sk);
if (cancel)
vsock_transport_cancel_pkt(vsk);
sock_put(sk); sock_put(sk);
} }

View File

@ -464,14 +464,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
static int hvs_shutdown(struct vsock_sock *vsk, int mode) static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{ {
struct sock *sk = sk_vsock(vsk);
if (!(mode & SEND_SHUTDOWN)) if (!(mode & SEND_SHUTDOWN))
return 0; return 0;
lock_sock(sk);
hvs_shutdown_lock_held(vsk->trans, mode); hvs_shutdown_lock_held(vsk->trans, mode);
release_sock(sk);
return 0; return 0;
} }

View File

@ -1100,10 +1100,10 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
vsk = vsock_sk(sk); vsk = vsock_sk(sk);
space_available = virtio_transport_space_update(sk, pkt);
lock_sock(sk); lock_sock(sk);
space_available = virtio_transport_space_update(sk, pkt);
/* Update CID in case it has changed after a transport reset event */ /* Update CID in case it has changed after a transport reset event */
vsk->local_addr.svm_cid = dst.svm_cid; vsk->local_addr.svm_cid = dst.svm_cid;

View File

@ -960,7 +960,7 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel" #define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
".kprobes.text", ".cpuidle.text" ".kprobes.text", ".cpuidle.text", ".noinstr.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", ".text.*", \ ".fixup", ".entry.text", ".exception.text", ".text.*", \
".coldtext" ".coldtext"

View File

@ -371,10 +371,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
{ {
int size, ret; int size, ret;
kuid_t kroot; kuid_t kroot;
u32 nsmagic, magic;
uid_t root, mappedroot; uid_t root, mappedroot;
char *tmpbuf = NULL; char *tmpbuf = NULL;
struct vfs_cap_data *cap; struct vfs_cap_data *cap;
struct vfs_ns_cap_data *nscap; struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry; struct dentry *dentry;
struct user_namespace *fs_ns; struct user_namespace *fs_ns;
@ -396,46 +397,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
fs_ns = inode->i_sb->s_user_ns; fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf; cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header((size_t) ret, cap)) { if (is_v2header((size_t) ret, cap)) {
/* If this is sizeof(vfs_cap_data) then we're ok with the root = 0;
* on-disk value, so return that. */ } else if (is_v3header((size_t) ret, cap)) {
if (alloc) nscap = (struct vfs_ns_cap_data *) tmpbuf;
*buffer = tmpbuf; root = le32_to_cpu(nscap->rootid);
else } else {
kfree(tmpbuf); size = -EINVAL;
return ret; goto out_free;
} else if (!is_v3header((size_t) ret, cap)) {
kfree(tmpbuf);
return -EINVAL;
} }
nscap = (struct vfs_ns_cap_data *) tmpbuf;
root = le32_to_cpu(nscap->rootid);
kroot = make_kuid(fs_ns, root); kroot = make_kuid(fs_ns, root);
/* If the root kuid maps to a valid uid in current ns, then return /* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */ * this as a nscap. */
mappedroot = from_kuid(current_user_ns(), kroot); mappedroot = from_kuid(current_user_ns(), kroot);
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) { if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
size = sizeof(struct vfs_ns_cap_data);
if (alloc) { if (alloc) {
*buffer = tmpbuf; if (!nscap) {
/* v2 -> v3 conversion */
nscap = kzalloc(size, GFP_ATOMIC);
if (!nscap) {
size = -ENOMEM;
goto out_free;
}
nsmagic = VFS_CAP_REVISION_3;
magic = le32_to_cpu(cap->magic_etc);
if (magic & VFS_CAP_FLAGS_EFFECTIVE)
nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
nscap->magic_etc = cpu_to_le32(nsmagic);
} else {
/* use allocated v3 buffer */
tmpbuf = NULL;
}
nscap->rootid = cpu_to_le32(mappedroot); nscap->rootid = cpu_to_le32(mappedroot);
} else *buffer = nscap;
kfree(tmpbuf); }
return size; goto out_free;
} }
if (!rootid_owns_currentns(kroot)) { if (!rootid_owns_currentns(kroot)) {
kfree(tmpbuf); size = -EOVERFLOW;
return -EOPNOTSUPP; goto out_free;
} }
/* This comes from a parent namespace. Return as a v2 capability */ /* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data); size = sizeof(struct vfs_cap_data);
if (alloc) { if (alloc) {
*buffer = kmalloc(size, GFP_ATOMIC); if (nscap) {
if (*buffer) { /* v3 -> v2 conversion */
struct vfs_cap_data *cap = *buffer; cap = kzalloc(size, GFP_ATOMIC);
__le32 nsmagic, magic; if (!cap) {
size = -ENOMEM;
goto out_free;
}
magic = VFS_CAP_REVISION_2; magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc); nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE) if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
@ -443,9 +459,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic); cap->magic_etc = cpu_to_le32(magic);
} else { } else {
size = -ENOMEM; /* use unconverted v2 */
tmpbuf = NULL;
} }
*buffer = cap;
} }
out_free:
kfree(tmpbuf); kfree(tmpbuf);
return size; return size;
} }

View File

@ -26,6 +26,7 @@
#include <inttypes.h> #include <inttypes.h>
#include <linux/errqueue.h> #include <linux/errqueue.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <netdb.h> #include <netdb.h>
@ -34,7 +35,6 @@
#include <netinet/ip.h> #include <netinet/ip.h>
#include <netinet/udp.h> #include <netinet/udp.h>
#include <netinet/tcp.h> #include <netinet/tcp.h>
#include <netpacket/packet.h>
#include <poll.h> #include <poll.h>
#include <stdarg.h> #include <stdarg.h>
#include <stdbool.h> #include <stdbool.h>
@ -396,12 +396,12 @@ static void do_test(int family, unsigned int report_opt)
total_len = cfg_payload_len; total_len = cfg_payload_len;
if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) { if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
total_len += sizeof(struct udphdr); total_len += sizeof(struct udphdr);
if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
if (family == PF_INET) if (family == PF_INET)
total_len += sizeof(struct iphdr); total_len += sizeof(struct iphdr);
else else
total_len += sizeof(struct ipv6hdr); total_len += sizeof(struct ipv6hdr);
}
/* special case, only rawv6_sendmsg: /* special case, only rawv6_sendmsg:
* pass proto in sin6_port if not connected * pass proto in sin6_port if not connected
* also see ANK comment in net/ipv4/raw.c * also see ANK comment in net/ipv4/raw.c