This is the 5.4.125 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmDB+Z8ACgkQONu9yGCS
 aT5qig//WVut449WUeYQLKD8rAB5CUVm2Xl3509Ts8W6LSzYGHiYv1SRVeH2y1lS
 QnfCnBciopl2UyYxqXGQwoRYdY1T2E/MWUmwGUk0/qlZYOzg5xQ368Shm0lvohJI
 DsywZrYqJDUCoeyXoWJYrq/3RiAvMK30teKDcn1A2HhhWdo0nsGLp1GUX396ptcV
 3xw2ZvCVwuikwxq5jlQKUEkH59TD/ZkCzvn9gfd86FY1R0ohApLJckhGIuT3wA1c
 Tfekgvfngx1HcEWIAzWFqZPoB8mOF5pn06yZhuPdMKa8UUq78ckN7kbchERj2wJD
 cDFSQQrMI3nL9sA8ryYV1YFl3fyGX5Epm4O465whzjKWoZ9HwN+iwl6Qv+kOmX41
 YUmpUplhsPN+I7+cX1jF7Ohw583uDbFPw6XbyZ0ArZr03JVVv4Vjrv5QA9fVHR06
 OP7+zEUlBtu/g3k0Bj5MU8UKem0shXavkPqukrtB+MhrXh2VngEXEVOvKMOFgA4b
 BnBEga4SrCR/wB+SucIV4fqzV0tq4HD/cPpy67OafrWoqhwlnBsMCQUd+puxkCnM
 y+eEoRwTzRSW+U9y8KdAERW8qSR/vCyKCUoaKxOV3Jj0v8xp0Y6VHKlKmb//w5Gn
 Lk7sNjD60Um3Au53A5pJvh8qNg+OsNc46sEmGGndE4Mrada93gE=
 =O2C+
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmDCIf8ACgkQ7G51OISz
 Hs04IA/9HPtgSX+5Uha8T9IWUKxKwK8BwXAnBnowBkt76X50PLR7/i1wD3WmNdMc
 nVe+bScX6gTjhbqICO2toDZ+lcqWsM00cHPnjZGGwnGDFIvlbxYAYZt/dPTHxgze
 YXDu7dxY5Cb3tAYBX1Ng165Rti9gJC8QNGOLXiCOUhDSTNMepe02wi6bKR3jN/hm
 jjl02Qo9BQI70a1w3zOFHH8ffQuUdOoTFji8hq2u+cJ1tP6FuftJyPvIAm+MDLNd
 83dg1P4eg73Qk+tp93OKrSG3pnCngxgveCB+U3SQnCd2b83asNVigjxoxkrZZJ7L
 9kxq4ifyAfH9TLQJ5lo2xOdQ1ra0+KTBwYKr2X1/N5mrXnmi9OCt54tXFnkPcJLN
 S0HAP9cCf+NtoACirUfNETeZJDaISvHiYT8XhbJ+y1mr+3pbN/4DQ3P/4u1ykoyF
 XBQwuAEw6ljz22HbZBuLrsB339CSwVuJbSaFrmeuUsX7AKIA/p45lr6L5JQssTyD
 a0NWKWFMJ7rV/f10u/B24kZwcSNghx1xMuX8hfBnyPtbR4ChnlnKnSSLsF+AJQEW
 7chnIejPa0UwQAkZmd/b1qaqZq5oIct3BFRZUfcledlej8HweLNJGyy8+T5ZSu5d
 G5ku8CU2hIgnKSBaqF7AaqkcDga2fjelGJetJjqdPwgIzMJmqnE=
 =b+bL
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.125' into 5.4-2.3.x-imx

This is the 5.4.125 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-06-10 14:30:21 +00:00
commit 276aedc8f1
86 changed files with 930 additions and 299 deletions

View File

@ -461,13 +461,15 @@ or iterations will move the index to the first index in the range.
Each entry will only be returned once, no matter how many indices it Each entry will only be returned once, no matter how many indices it
occupies. occupies.
Using xas_next() or xas_prev() with a multi-index xa_state Using xas_next() or xas_prev() with a multi-index xa_state is not
is not supported. Using either of these functions on a multi-index entry supported. Using either of these functions on a multi-index entry will
will reveal sibling entries; these should be skipped over by the caller. reveal sibling entries; these should be skipped over by the caller.
Storing ``NULL`` into any index of a multi-index entry will set the entry Storing ``NULL`` into any index of a multi-index entry will set the
at every index to ``NULL`` and dissolve the tie. Splitting a multi-index entry at every index to ``NULL`` and dissolve the tie. A multi-index
entry into entries occupying smaller ranges is not yet supported. entry can be split into entries occupying smaller ranges by calling
xas_split_alloc() without the xa_lock held, followed by taking the lock
and calling xas_split().
Functions and structures Functions and structures
======================== ========================

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 124 SUBLEVEL = 125
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -99,9 +99,13 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-reset-duration = <20>; phy-reset-duration = <20>;
phy-supply = <&sw2_reg>; phy-supply = <&sw2_reg>;
phy-handle = <&ethphy0>;
status = "okay"; status = "okay";
fixed-link {
speed = <1000>;
full-duplex;
};
mdio { mdio {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View File

@ -408,6 +408,18 @@
vin-supply = <&sw1_reg>; vin-supply = <&sw1_reg>;
}; };
&reg_pu {
vin-supply = <&sw1_reg>;
};
&reg_vdd1p1 {
vin-supply = <&sw2_reg>;
};
&reg_vdd2p5 {
vin-supply = <&sw2_reg>;
};
&uart1 { &uart1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>; pinctrl-0 = <&pinctrl_uart1>;

View File

@ -126,7 +126,7 @@
compatible = "nxp,pca8574"; compatible = "nxp,pca8574";
reg = <0x3a>; reg = <0x3a>;
gpio-controller; gpio-controller;
#gpio-cells = <1>; #gpio-cells = <2>;
}; };
}; };

View File

@ -193,7 +193,7 @@
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>; pinctrl-0 = <&pinctrl_usdhc1>;
keep-power-in-suspend; keep-power-in-suspend;
tuning-step = <2>; fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>; vmmc-supply = <&reg_3p3v>;
no-1-8-v; no-1-8-v;
broken-cd; broken-cd;

View File

@ -307,7 +307,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
bus-width = <4>; bus-width = <4>;
tuning-step = <2>; fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>; vmmc-supply = <&reg_3p3v>;
wakeup-source; wakeup-source;
no-1-8-v; no-1-8-v;

View File

@ -141,8 +141,8 @@
ddr: memory-controller@1080000 { ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller"; compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>; reg = <0x0 0x1080000 0x0 0x1000>;
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
big-endian; little-endian;
}; };
dcfg: syscon@1e00000 { dcfg: syscon@1e00000 {

View File

@ -45,8 +45,8 @@
reg_12p0_main: regulator-12p0-main { reg_12p0_main: regulator-12p0-main {
compatible = "regulator-fixed"; compatible = "regulator-fixed";
regulator-name = "12V_MAIN"; regulator-name = "12V_MAIN";
regulator-min-microvolt = <5000000>; regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <5000000>; regulator-max-microvolt = <12000000>;
regulator-always-on; regulator-always-on;
}; };

View File

@ -432,14 +432,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true; return true;
} }
@ -447,7 +447,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -457,7 +457,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -467,21 +467,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu, static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
} }
static bool trap_bcr(struct kvm_vcpu *vcpu, static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true; return true;
} }
@ -489,7 +489,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -500,7 +500,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -510,22 +510,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu, static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
} }
static bool trap_wvr(struct kvm_vcpu *vcpu, static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, trace_trap_reg(__func__, rd->CRm, p->is_write,
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true; return true;
} }
@ -533,7 +533,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -543,7 +543,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -553,21 +553,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu, static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
} }
static bool trap_wcr(struct kvm_vcpu *vcpu, static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write) if (p->is_write)
reg_to_dbg(vcpu, p, dbg_reg); reg_to_dbg(vcpu, p, dbg_reg);
else else
dbg_to_reg(vcpu, p, dbg_reg); dbg_to_reg(vcpu, p, dbg_reg);
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true; return true;
} }
@ -575,7 +575,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -585,7 +585,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr) const struct kvm_one_reg *reg, void __user *uaddr)
{ {
__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT; return -EFAULT;
@ -595,7 +595,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu, static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
} }
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)

View File

@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void); extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
extern void lapic_update_legacy_vectors(void);
extern void lapic_online(void); extern void lapic_online(void);
extern void lapic_offline(void); extern void lapic_offline(void);
extern bool apic_needs_pit(void); extern bool apic_needs_pit(void);

View File

@ -6,8 +6,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <uapi/asm/kvm_para.h> #include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void);
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void); bool kvm_check_and_clear_guest_paused(void);
#else #else
@ -85,13 +83,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
} }
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
void kvmclock_init(void);
void kvmclock_disable(void);
bool kvm_para_available(void); bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void); unsigned int kvm_arch_para_features(void);
unsigned int kvm_arch_para_hints(void); unsigned int kvm_arch_para_hints(void);
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
void kvm_async_pf_task_wake(u32 token); void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void); u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address); void do_async_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
#ifdef CONFIG_PARAVIRT_SPINLOCKS #ifdef CONFIG_PARAVIRT_SPINLOCKS
@ -125,11 +124,6 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
{ {
return 0; return 0;
} }
static inline void kvm_disable_steal_time(void)
{
return;
}
#endif #endif
#endif /* _ASM_X86_KVM_PARA_H */ #endif /* _ASM_X86_KVM_PARA_H */

View File

@ -2579,6 +2579,7 @@ static void __init apic_bsp_setup(bool upmode)
end_local_APIC_setup(); end_local_APIC_setup();
irq_remap_enable_fault_handling(); irq_remap_enable_fault_handling();
setup_IO_APIC(); setup_IO_APIC();
lapic_update_legacy_vectors();
} }
#ifdef CONFIG_UP_LATE_INIT #ifdef CONFIG_UP_LATE_INIT

View File

@ -680,6 +680,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
} }
void __init lapic_update_legacy_vectors(void)
{
unsigned int i;
if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
return;
/*
* If the IO/APIC is disabled via config, kernel command line or
* lack of enumeration then all legacy interrupts are routed
* through the PIC. Make sure that they are marked as legacy
* vectors. PIC_CASCADE_IRQ has already been marked in
* lapic_assign_system_vectors().
*/
for (i = 0; i < nr_legacy_irqs(); i++) {
if (i != PIC_CASCADE_IR)
lapic_assign_legacy_vector(i, true);
}
}
void __init lapic_assign_system_vectors(void) void __init lapic_assign_system_vectors(void)
{ {
unsigned int i, vector = 0; unsigned int i, vector = 0;

View File

@ -24,6 +24,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/swait.h> #include <linux/swait.h>
#include <linux/syscore_ops.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/traps.h> #include <asm/traps.h>
@ -33,6 +34,7 @@
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/reboot.h>
static int kvmapf = 1; static int kvmapf = 1;
@ -351,6 +353,14 @@ static void kvm_pv_disable_apf(void)
smp_processor_id()); smp_processor_id());
} }
static void kvm_disable_steal_time(void)
{
if (!has_steal_clock)
return;
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
static void kvm_pv_guest_cpu_reboot(void *unused) static void kvm_pv_guest_cpu_reboot(void *unused)
{ {
/* /*
@ -393,14 +403,6 @@ static u64 kvm_steal_clock(int cpu)
return steal; return steal;
} }
void kvm_disable_steal_time(void)
{
if (!has_steal_clock)
return;
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
static inline void __set_percpu_decrypted(void *ptr, unsigned long size) static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{ {
early_set_memory_decrypted((unsigned long) ptr, size); early_set_memory_decrypted((unsigned long) ptr, size);
@ -428,6 +430,27 @@ static void __init sev_map_percpu_data(void)
} }
} }
static void kvm_guest_cpu_offline(bool shutdown)
{
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
kvm_pv_disable_apf();
if (!shutdown)
apf_task_wake_all();
kvmclock_disable();
}
static int kvm_cpu_online(unsigned int cpu)
{
unsigned long flags;
local_irq_save(flags);
kvm_guest_cpu_init();
local_irq_restore(flags);
return 0;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
@ -547,30 +570,47 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init(); kvm_spinlock_init();
} }
static void kvm_guest_cpu_offline(void)
{
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
kvm_pv_disable_apf();
apf_task_wake_all();
}
static int kvm_cpu_online(unsigned int cpu)
{
local_irq_disable();
kvm_guest_cpu_init();
local_irq_enable();
return 0;
}
static int kvm_cpu_down_prepare(unsigned int cpu) static int kvm_cpu_down_prepare(unsigned int cpu)
{ {
local_irq_disable(); unsigned long flags;
kvm_guest_cpu_offline();
local_irq_enable(); local_irq_save(flags);
kvm_guest_cpu_offline(false);
local_irq_restore(flags);
return 0; return 0;
} }
#endif
static int kvm_suspend(void)
{
kvm_guest_cpu_offline(false);
return 0;
}
static void kvm_resume(void)
{
kvm_cpu_online(raw_smp_processor_id());
}
static struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
};
/*
* After a PV feature is registered, the host will keep writing to the
* registered memory location. If the guest happens to shutdown, this memory
* won't be valid. In cases like kexec, in which you install a new kernel, this
* means a random memory location will be kept being written.
*/
#ifdef CONFIG_KEXEC_CORE
static void kvm_crash_shutdown(struct pt_regs *regs)
{
kvm_guest_cpu_offline(true);
native_machine_crash_shutdown(regs);
}
#endif #endif
static void __init kvm_apf_trap_init(void) static void __init kvm_apf_trap_init(void)
@ -649,6 +689,12 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init(); kvm_guest_cpu_init();
#endif #endif
#ifdef CONFIG_KEXEC_CORE
machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif
register_syscore_ops(&kvm_syscore_ops);
/* /*
* Hard lockup detection is enabled by default. Disable it, as guests * Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is * can get false positives too easily, for example if the host is

View File

@ -20,7 +20,6 @@
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/mem_encrypt.h> #include <asm/mem_encrypt.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/kvmclock.h> #include <asm/kvmclock.h>
static int kvmclock __initdata = 1; static int kvmclock __initdata = 1;
@ -197,28 +196,9 @@ static void kvm_setup_secondary_clock(void)
} }
#endif #endif
/* void kvmclock_disable(void)
* After the clock is registered, the host will keep writing to the
* registered memory location. If the guest happens to shutdown, this memory
* won't be valid. In cases like kexec, in which you install a new kernel, this
* means a random memory location will be kept being written. So before any
* kind of shutdown from our side, we unregister the clock by writing anything
* that does not have the 'enable' bit set in the msr
*/
#ifdef CONFIG_KEXEC_CORE
static void kvm_crash_shutdown(struct pt_regs *regs)
{ {
native_write_msr(msr_kvm_system_time, 0, 0); native_write_msr(msr_kvm_system_time, 0, 0);
kvm_disable_steal_time();
native_machine_crash_shutdown(regs);
}
#endif
static void kvm_shutdown(void)
{
native_write_msr(msr_kvm_system_time, 0, 0);
kvm_disable_steal_time();
native_machine_shutdown();
} }
static void __init kvmclock_init_mem(void) static void __init kvmclock_init_mem(void)
@ -346,10 +326,6 @@ void __init kvmclock_init(void)
#endif #endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
machine_ops.shutdown = kvm_shutdown;
#ifdef CONFIG_KEXEC_CORE
machine_ops.crash_shutdown = kvm_crash_shutdown;
#endif
kvm_get_preset_lpj(); kvm_get_preset_lpj();
/* /*

View File

@ -4057,7 +4057,7 @@ static int cr_interception(struct vcpu_svm *svm)
err = 0; err = 0;
if (cr >= 16) { /* mov to cr */ if (cr >= 16) { /* mov to cr */
cr -= 16; cr -= 16;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_readl(&svm->vcpu, reg);
switch (cr) { switch (cr) {
case 0: case 0:
if (!check_selective_cr0_intercepted(svm, val)) if (!check_selective_cr0_intercepted(svm, val))
@ -4102,7 +4102,7 @@ static int cr_interception(struct vcpu_svm *svm)
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1; return 1;
} }
kvm_register_write(&svm->vcpu, reg, val); kvm_register_writel(&svm->vcpu, reg, val);
} }
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
} }
@ -4132,13 +4132,13 @@ static int dr_interception(struct vcpu_svm *svm)
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
if (!kvm_require_dr(&svm->vcpu, dr - 16)) if (!kvm_require_dr(&svm->vcpu, dr - 16))
return 1; return 1;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_readl(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val); kvm_set_dr(&svm->vcpu, dr - 16, val);
} else { } else {
if (!kvm_require_dr(&svm->vcpu, dr)) if (!kvm_require_dr(&svm->vcpu, dr))
return 1; return 1;
kvm_get_dr(&svm->vcpu, dr, &val); kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val); kvm_register_writel(&svm->vcpu, reg, val);
} }
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);

View File

@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
} }
break; break;
case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Address handler %p\n", object));
acpi_os_delete_mutex(object->address_space.context_mutex);
break;
default: default:
break; break;

View File

@ -1376,9 +1376,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */ /* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */ /* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,

View File

@ -263,8 +263,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
return 0; return 0;
n = 0; len = CPER_REC_LEN;
len = CPER_REC_LEN - 1;
dmi_memdev_name(mem->mem_dev_handle, &bank, &device); dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
if (bank && device) if (bank && device)
n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@ -273,7 +272,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
"DIMM location: not present. DMI handle: 0x%.4x ", "DIMM location: not present. DMI handle: 0x%.4x ",
mem->mem_dev_handle); mem->mem_dev_handle);
msg[n] = '\0';
return n; return n;
} }

View File

@ -66,11 +66,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
return false; return false;
} }
if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
return false;
}
if (PAGE_SIZE > EFI_PAGE_SIZE && if (PAGE_SIZE > EFI_PAGE_SIZE &&
(!PAGE_ALIGNED(in->phys_addr) || (!PAGE_ALIGNED(in->phys_addr) ||
!PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {

View File

@ -351,7 +351,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{ {
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr; struct amdgpu_ctx_mgr *mgr;
unsigned long ras_counter;
if (!fpriv) if (!fpriv)
return -EINVAL; return -EINVAL;
@ -376,21 +375,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty)) if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
/*query ue count*/
ras_counter = amdgpu_ras_query_error_count(adev, false);
/*ras counter is monotonic increasing*/
if (ras_counter != ctx->ras_counter_ue) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
ctx->ras_counter_ue = ras_counter;
}
/*query ce count*/
ras_counter = amdgpu_ras_query_error_count(adev, true);
if (ras_counter != ctx->ras_counter_ce) {
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
ctx->ras_counter_ce = ras_counter;
}
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return 0; return 0;
} }

View File

@ -354,6 +354,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error: error:
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
return r; return r;

View File

@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE && if (id->vendor == USB_VENDOR_ID_APPLE &&
id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
hdev->type != HID_TYPE_USBMOUSE) hdev->type != HID_TYPE_USBMOUSE)
return 0; return -ENODEV;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) { if (msc == NULL) {

View File

@ -611,9 +611,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
continue; continue;
for (n = 0; n < field->report_count; n++) { if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
if (field->usage[n].hid == HID_DG_CONTACTID) for (n = 0; n < field->report_count; n++) {
rdata->is_mt_collection = true; if (field->usage[n].hid == HID_DG_CONTACTID) {
rdata->is_mt_collection = true;
break;
}
}
} }
} }

View File

@ -50,6 +50,7 @@
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7)
/* flags */ /* flags */
@ -185,6 +186,11 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME }, I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE }, I2C_HID_QUIRK_BAD_INPUT_SIZE },
/*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
{ 0, 0 } { 0, 0 }
}; };
@ -468,7 +474,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
} }
/* At least some SIS devices need this after reset */ /* At least some SIS devices need this after reset */
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
out_unlock: out_unlock:
mutex_unlock(&ihid->reset_lock); mutex_unlock(&ihid->reset_lock);
@ -1114,8 +1121,8 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID); hid->product = le16_to_cpu(ihid->hdesc.wProductID);
snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
client->name, hid->vendor, hid->product); client->name, (u16)hid->vendor, (u16)hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);

View File

@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
error = -EPERM;
hid_notice(hid, hid_notice(hid,
"device does not support device managed pool\n"); "device does not support device managed pool\n");
goto fail; goto fail;

View File

@ -792,10 +792,10 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index) int index)
{ {
if (disallow_fan_support && index >= 8) if (disallow_fan_support && index >= 20)
return 0; return 0;
if (disallow_fan_type_call && if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15)) (index == 21 || index == 25 || index == 28))
return 0; return 0;
if (index >= 0 && index <= 1 && if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))

View File

@ -641,6 +641,14 @@ static int geni_i2c_remove(struct platform_device *pdev)
return 0; return 0;
} }
static void geni_i2c_shutdown(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
/* Make client i2c transfers start failing */
i2c_mark_adapter_suspended(&gi2c->adap);
}
static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev) static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
{ {
int ret; int ret;
@ -677,6 +685,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
{ {
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
i2c_mark_adapter_suspended(&gi2c->adap);
if (!gi2c->suspended) { if (!gi2c->suspended) {
geni_i2c_runtime_suspend(dev); geni_i2c_runtime_suspend(dev);
pm_runtime_disable(dev); pm_runtime_disable(dev);
@ -686,8 +696,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
return 0; return 0;
} }
static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
i2c_mark_adapter_resumed(&gi2c->adap);
return 0;
}
static const struct dev_pm_ops geni_i2c_pm_ops = { static const struct dev_pm_ops geni_i2c_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume, SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
NULL) NULL)
}; };
@ -701,6 +719,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
static struct platform_driver geni_i2c_driver = { static struct platform_driver geni_i2c_driver = {
.probe = geni_i2c_probe, .probe = geni_i2c_probe,
.remove = geni_i2c_remove, .remove = geni_i2c_remove,
.shutdown = geni_i2c_shutdown,
.driver = { .driver = {
.name = "geni_i2c", .name = "geni_i2c",
.pm = &geni_i2c_pm_ops, .pm = &geni_i2c_pm_ops,

View File

@ -7003,7 +7003,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid); pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id); pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id); pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs); pf->max_vfs = le16_to_cpu(resp->max_vfs);

View File

@ -2233,15 +2233,20 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
case XDP_TX: case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
if (result == I40E_XDP_CONSUMED)
goto out_failure;
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; if (err)
goto out_failure;
result = I40E_XDP_REDIR;
break; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fall through */ /* fall through */
case XDP_ABORTED: case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fall through -- handle aborts by dropping packet */ /* fall through -- handle aborts by dropping packet */
case XDP_DROP: case XDP_DROP:

View File

@ -212,21 +212,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err)
goto out_failure;
rcu_read_unlock();
return I40E_XDP_REDIR;
}
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
break; break;
case XDP_TX: case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
break; if (result == I40E_XDP_CONSUMED)
case XDP_REDIRECT: goto out_failure;
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
break; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fall through */ /* fall through */
case XDP_ABORTED: case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */ /* fallthrough -- handle aborts by dropping packet */
case XDP_DROP: case XDP_DROP:

View File

@ -34,6 +34,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) #define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400 #define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400 #define PF_MBX_ARQBAH 0x0022E400

View File

@ -2109,6 +2109,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
struct ice_tx_offload_params offload = { 0 }; struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi; struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first; struct ice_tx_buf *first;
struct ethhdr *eth;
unsigned int count; unsigned int count;
int tso, csum; int tso, csum;
@ -2156,7 +2157,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop; goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
if (unlikely(skb->priority == TC_PRIO_CONTROL && eth = (struct ethhdr *)skb_mac_header(skb);
if (unlikely((skb->priority == TC_PRIO_CONTROL ||
eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF && vsi->type == ICE_VSI_PF &&
vsi->port_info->is_sw_lldp)) vsi->port_info->is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |

View File

@ -384,13 +384,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
*/ */
clear_bit(ICE_VF_STATE_INIT, vf->vf_states); clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
* in the case of VFR. If this is done for PFR, it can mess up VF * needs to clear them in the case of VFR/VFLR. If this is done for
* resets because the VF driver may already have started cleanup * PFR, it can mess up VF resets because the VF driver may already
* by the time we get here. * have started cleanup by the time we get here.
*/ */
if (!is_pfr) if (!is_pfr) {
wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
}
/* In the case of a VFLR, the HW has already reset the VF and we /* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register. * just need to clean up, so don't hit the VFRTRIG register.

View File

@ -1079,11 +1079,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX: case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
if (result == IXGBEVF_XDP_CONSUMED)
goto out_failure;
break; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fallthrough */ /* fallthrough */
case XDP_ABORTED: case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */ /* fallthrough -- handle aborts by dropping packet */
case XDP_DROP: case XDP_DROP:

View File

@ -1589,6 +1589,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate); uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
/* if the speed hasn't changed, don't report it.
* RTL8156 shipped before 2021 sends notification about every 32ms.
*/
if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
return;
dev->rx_speed = rx_speed;
dev->tx_speed = tx_speed;
/* /*
* Currently the USB-NET API does not support reporting the actual * Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead. * device speed. Do print it instead.
@ -1629,7 +1638,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/ */
usbnet_link_change(dev, !!event->wValue, 0); if (netif_carrier_ok(dev->net) != !!event->wValue)
usbnet_link_change(dev, !!event->wValue, 0);
break; break;
case USB_CDC_NOTIFY_SPEED_CHANGE: case USB_CDC_NOTIFY_SPEED_CHANGE:

View File

@ -712,7 +712,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
hsotg->hibernated = 0; hsotg->hibernated = 0;
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
hsotg->bus_suspended = 0; hsotg->bus_suspended = 0;
#endif
if (gpwrdn & GPWRDN_IDSTS) { if (gpwrdn & GPWRDN_IDSTS) {
hsotg->op_state = OTG_STATE_B_PERIPHERAL; hsotg->op_state = OTG_STATE_B_PERIPHERAL;

View File

@ -2,6 +2,7 @@
config VFIO_PCI config VFIO_PCI
tristate "VFIO support for PCI devices" tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD depends on VFIO && PCI && EVENTFD
depends on MMU
select VFIO_VIRQFD select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER select IRQ_BYPASS_MANAGER
help help

View File

@ -1576,7 +1576,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (len == 0xFF) { if (len == 0xFF) {
len = vfio_ext_cap_len(vdev, ecap, epos); len = vfio_ext_cap_len(vdev, ecap, epos);
if (len < 0) if (len < 0)
return ret; return len;
} }
} }

View File

@ -289,7 +289,7 @@ err_irq:
vfio_platform_regions_cleanup(vdev); vfio_platform_regions_cleanup(vdev);
err_reg: err_reg:
mutex_unlock(&driver_lock); mutex_unlock(&driver_lock);
module_put(THIS_MODULE); module_put(vdev->parent_module);
return ret; return ret;
} }

View File

@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid, struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb) publish_pci_dev_cb publish_cb)
{ {
int err = 0, slot, func = -1; int err = 0, slot, func = PCI_FUNC(dev->devfn);
struct pci_dev_entry *t, *dev_entry; struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@ -94,23 +94,26 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/* /*
* Keep multi-function devices together on the virtual PCI bus, except * Keep multi-function devices together on the virtual PCI bus, except
* virtual functions. * that we want to keep virtual functions at func 0 on their own. They
* aren't multi-function devices and hence their presence at func 0
* may cause guests to not scan the other functions.
*/ */
if (!dev->is_virtfn) { if (!dev->is_virtfn || func) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) { for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) if (list_empty(&vpci_dev->dev_list[slot]))
continue; continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]), t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list); struct pci_dev_entry, list);
if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
continue;
if (match_slot(dev, t->dev)) { if (match_slot(dev, t->dev)) {
pr_info("vpci: %s: assign to virtual slot %d func %d\n", pr_info("vpci: %s: assign to virtual slot %d func %d\n",
pci_name(dev), slot, pci_name(dev), slot,
PCI_FUNC(dev->devfn)); func);
list_add_tail(&dev_entry->list, list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]); &vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock; goto unlock;
} }
} }
@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
pci_name(dev), slot); pci_name(dev), slot);
list_add_tail(&dev_entry->list, list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]); &vpci_dev->dev_list[slot]);
func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock; goto unlock;
} }
} }

View File

@ -1338,16 +1338,20 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
for (i = 0; i < bbio->num_stripes; i++, stripe++) { for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes; u64 bytes;
struct request_queue *req_q; struct request_queue *req_q;
struct btrfs_device *device = stripe->dev;
if (!stripe->dev->bdev) { if (!device->bdev) {
ASSERT(btrfs_test_opt(fs_info, DEGRADED)); ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue; continue;
} }
req_q = bdev_get_queue(stripe->dev->bdev); req_q = bdev_get_queue(device->bdev);
if (!blk_queue_discard(req_q)) if (!blk_queue_discard(req_q))
continue; continue;
ret = btrfs_issue_discard(stripe->dev->bdev, if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
continue;
ret = btrfs_issue_discard(device->bdev,
stripe->physical, stripe->physical,
stripe->length, stripe->length,
&bytes); &bytes);
@ -1879,7 +1883,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
trace_run_delayed_ref_head(fs_info, head, 0); trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head); btrfs_delayed_ref_unlock(head);
btrfs_put_delayed_ref_head(head); btrfs_put_delayed_ref_head(head);
return 0; return ret;
} }
static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(

View File

@ -599,7 +599,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 end_byte = bytenr + len; u64 end_byte = bytenr + len;
u64 csum_end; u64 csum_end;
struct extent_buffer *leaf; struct extent_buffer *leaf;
int ret; int ret = 0;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int blocksize_bits = fs_info->sb->s_blocksize_bits; int blocksize_bits = fs_info->sb->s_blocksize_bits;
@ -618,6 +618,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
path->leave_spinning = 1; path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) { if (ret > 0) {
ret = 0;
if (path->slots[0] == 0) if (path->slots[0] == 0)
break; break;
path->slots[0]--; path->slots[0]--;
@ -674,7 +675,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, root, path, ret = btrfs_del_items(trans, root, path,
path->slots[0], del_nr); path->slots[0], del_nr);
if (ret) if (ret)
goto out; break;
if (key.offset == bytenr) if (key.offset == bytenr)
break; break;
} else if (key.offset < bytenr && csum_end > end_byte) { } else if (key.offset < bytenr && csum_end > end_byte) {
@ -718,8 +719,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_split_item(trans, root, path, &key, offset); ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) { if (ret && ret != -EAGAIN) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
goto out; break;
} }
ret = 0;
key.offset = end_byte - 1; key.offset = end_byte - 1;
} else { } else {
@ -729,8 +731,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
} }
btrfs_release_path(path); btrfs_release_path(path);
} }
ret = 0;
out:
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
} }

View File

@ -3359,6 +3359,18 @@ out:
if (ret || truncated) { if (ret || truncated) {
u64 start, end; u64 start, end;
/*
* If we failed to finish this ordered extent for any reason we
* need to make sure BTRFS_ORDERED_IOERR is set on the ordered
* extent, and mark the inode with the error if it wasn't
* already set. Any error during writeback would have already
* set the mapping error, so we need to set it if we're the ones
* marking this ordered extent as failed.
*/
if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
&ordered_extent->flags))
mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
if (truncated) if (truncated)
start = ordered_extent->file_offset + logical_len; start = ordered_extent->file_offset + logical_len;
else else

View File

@ -1285,22 +1285,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return -EUCLEAN; return -EUCLEAN;
} }
for (; ptr < end; ptr += sizeof(*dref)) { for (; ptr < end; ptr += sizeof(*dref)) {
u64 root_objectid;
u64 owner;
u64 offset; u64 offset;
u64 hash;
/*
* We cannot check the extent_data_ref hash due to possible
* overflow from the leaf due to hash collisions.
*/
dref = (struct btrfs_extent_data_ref *)ptr; dref = (struct btrfs_extent_data_ref *)ptr;
root_objectid = btrfs_extent_data_ref_root(leaf, dref);
owner = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref); offset = btrfs_extent_data_ref_offset(leaf, dref);
hash = hash_extent_data_ref(root_objectid, owner, offset);
if (hash != key->offset) {
extent_err(leaf, slot,
"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
hash, key->offset);
return -EUCLEAN;
}
if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) { if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
extent_err(leaf, slot, extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u", "invalid extent data backref offset, have %llu expect aligned to %u",

View File

@ -1775,6 +1775,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break; break;
if (ret == 1) { if (ret == 1) {
ret = 0;
if (path->slots[0] == 0) if (path->slots[0] == 0)
break; break;
path->slots[0]--; path->slots[0]--;
@ -1787,17 +1788,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
ret = btrfs_del_item(trans, root, path); ret = btrfs_del_item(trans, root, path);
if (ret) if (ret)
goto out; break;
btrfs_release_path(path); btrfs_release_path(path);
inode = read_one_inode(root, key.offset); inode = read_one_inode(root, key.offset);
if (!inode) if (!inode) {
return -EIO; ret = -EIO;
break;
}
ret = fixup_inode_link_count(trans, root, inode); ret = fixup_inode_link_count(trans, root, inode);
iput(inode); iput(inode);
if (ret) if (ret)
goto out; break;
/* /*
* fixup on a directory may create new entries, * fixup on a directory may create new entries,
@ -1806,8 +1809,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
*/ */
key.offset = (u64)-1; key.offset = (u64)-1;
} }
ret = 0;
out:
btrfs_release_path(path); btrfs_release_path(path);
return ret; return ret;
} }

View File

@ -3378,7 +3378,10 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_mark_unwritten(ex2); ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (err != -ENOSPC && err != -EDQUOT)
goto out;
if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) { if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2); err = ext4_ext_zeroout(inode, ex2);
@ -3404,30 +3407,30 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_pblock(&orig_ex)); ext4_ext_pblock(&orig_ex));
} }
if (err) if (!err) {
goto fix_extent_len; /* update the extent length and mark as initialized */
/* update the extent length and mark as initialized */ ex->ee_len = cpu_to_le16(ee_len);
ex->ee_len = cpu_to_le16(ee_len); ext4_ext_try_to_merge(handle, inode, path, ex);
ext4_ext_try_to_merge(handle, inode, path, ex); err = ext4_ext_dirty(handle, inode, path + path->p_depth);
err = ext4_ext_dirty(handle, inode, path + path->p_depth); if (!err)
if (err) /* update extent status tree */
goto fix_extent_len; err = ext4_zeroout_es(inode, &zero_ex);
/* If we failed at this point, we don't know in which
/* update extent status tree */ * state the extent tree exactly is so don't try to fix
err = ext4_zeroout_es(inode, &zero_ex); * length of the original extent as it may do even more
* damage.
goto out; */
} else if (err) goto out;
goto fix_extent_len; }
}
out:
ext4_ext_show_leaf(inode, path);
return err;
fix_extent_len: fix_extent_len:
ex->ee_len = orig_ex.ee_len; ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_ext_dirty(handle, inode, path + path->p_depth);
return err; return err;
out:
ext4_ext_show_leaf(inode, path);
return err;
} }
/* /*

View File

@ -1855,6 +1855,45 @@ out:
return ret; return ret;
} }
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
/* /*
* Parts of this function taken from xfs_change_file_space() * Parts of this function taken from xfs_change_file_space()
*/ */
@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
{ {
int ret; int ret;
s64 llen; s64 llen;
loff_t size; loff_t size, orig_isize;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL; struct buffer_head *di_bh = NULL;
handle_t *handle; handle_t *handle;
@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock; goto out_inode_unlock;
} }
orig_isize = i_size_read(inode);
switch (sr->l_whence) { switch (sr->l_whence) {
case 0: /*SEEK_SET*/ case 0: /*SEEK_SET*/
break; break;
@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos; sr->l_start += f_pos;
break; break;
case 2: /*SEEK_END*/ case 2: /*SEEK_END*/
sr->l_start += i_size_read(inode); sr->l_start += orig_isize;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
size - orig_isize);
if (!ret)
i_size_write(inode, size);
}
up_write(&OCFS2_I(inode)->ip_alloc_sem); up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock; goto out_inode_unlock;
} }
if (change_size && i_size_read(inode) < size)
i_size_write(inode, size);
inode->i_ctime = inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime = current_time(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0) if (ret < 0)

View File

@ -231,6 +231,19 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else else
return NULL; return NULL;
} }
/**
* thp_order - Order of a transparent huge page.
* @page: Head page of a transparent huge page.
*/
static inline unsigned int thp_order(struct page *page)
{
VM_BUG_ON_PGFLAGS(PageTail(page), page);
if (PageHead(page))
return HPAGE_PMD_ORDER;
return 0;
}
static inline int hpage_nr_pages(struct page *page) static inline int hpage_nr_pages(struct page *page)
{ {
if (unlikely(PageTransHuge(page))) if (unlikely(PageTransHuge(page)))
@ -290,6 +303,12 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
static inline unsigned int thp_order(struct page *page)
{
VM_BUG_ON_PGFLAGS(PageTail(page), page);
return 0;
}
#define hpage_nr_pages(x) 1 #define hpage_nr_pages(x) 1
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)

View File

@ -83,6 +83,8 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11 # define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12 # define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13 # define EVENT_NO_IP_ALIGN 13
u32 rx_speed; /* in bps - NOT Mbps */
u32 tx_speed; /* in bps - NOT Mbps */
}; };
static inline struct usb_driver *driver_of(struct usb_interface *intf) static inline struct usb_driver *driver_of(struct usb_interface *intf)

View File

@ -1470,6 +1470,28 @@ void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *); void xas_create_range(struct xa_state *);
#ifdef CONFIG_XARRAY_MULTI
int xa_get_order(struct xarray *, unsigned long index);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
#else
static inline int xa_get_order(struct xarray *xa, unsigned long index)
{
return 0;
}
static inline void xas_split(struct xa_state *xas, void *entry,
unsigned int order)
{
xas_store(xas, entry);
}
static inline void xas_split_alloc(struct xa_state *xas, void *entry,
unsigned int order, gfp_t gfp)
{
}
#endif
/** /**
* xas_reload() - Refetch an entry from the xarray. * xas_reload() - Refetch an entry from the xarray.
* @xas: XArray operation state. * @xas: XArray operation state.

View File

@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific * The link_support layer is used to add any Link Layer specific
* framing. * framing.
*/ */
void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room, struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)( struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *, struct sk_buff *, struct net_device *,

View File

@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer. * @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol. * @head_room: Head space needed by link specific protocol.
*/ */
void int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer, struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref, enum cfcnfg_phy_preference pref,

View File

@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h> #include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx); struct cflayer *cfserl_create(int instance, bool use_stx);
void cfserl_release(struct cflayer *layer);
#endif #endif

View File

@ -1174,7 +1174,7 @@ static noinline void __init kernel_init_freeable(void)
*/ */
set_mems_allowed(node_states[N_MEMORY]); set_mems_allowed(node_states[N_MEMORY]);
cad_pid = task_pid(current); cad_pid = get_pid(task_pid(current));
smp_prepare_cpus(setup_max_cpus); smp_prepare_cpus(setup_max_cpus);

View File

@ -260,7 +260,11 @@ static FORCE_INLINE int LZ4_decompress_generic(
} }
} }
memcpy(op, ip, length); /*
* supports overlapping memory regions; only matters
* for in-place decompression scenarios
*/
LZ4_memmove(op, ip, length);
ip += length; ip += length;
op += length; op += length;

View File

@ -137,6 +137,8 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
return put_unaligned_le16(value, memPtr); return put_unaligned_le16(value, memPtr);
} }
#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
static FORCE_INLINE void LZ4_copy8(void *dst, const void *src) static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
{ {
#if LZ4_ARCH64 #if LZ4_ARCH64

View File

@ -1503,6 +1503,49 @@ static noinline void check_store_range(struct xarray *xa)
} }
} }
#ifdef CONFIG_XARRAY_MULTI
static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order)
{
XA_STATE(xas, xa, index);
void *entry;
unsigned int i = 0;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
xas_split(&xas, xa, order);
xas_unlock(&xas);
xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, entry != xa);
i++;
}
XA_BUG_ON(xa, i != 1 << order);
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
xa_destroy(xa);
}
static noinline void check_split(struct xarray *xa)
{
unsigned int order;
XA_BUG_ON(xa, !xa_empty(xa));
for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
check_split_1(xa, 0, order);
check_split_1(xa, 1UL << order, order);
check_split_1(xa, 3UL << order, order);
}
}
#else
static void check_split(struct xarray *xa) { }
#endif
static void check_align_1(struct xarray *xa, char *name) static void check_align_1(struct xarray *xa, char *name)
{ {
int i; int i;
@ -1649,6 +1692,26 @@ static noinline void check_account(struct xarray *xa)
#endif #endif
} }
static noinline void check_get_order(struct xarray *xa)
{
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
unsigned int order;
unsigned long i, j;
for (i = 0; i < 3; i++)
XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
for (order = 0; order < max_order; order++) {
for (i = 0; i < 10; i++) {
xa_store_order(xa, i << order, order,
xa_mk_index(i << order), GFP_KERNEL);
for (j = i << order; j < (i + 1) << order; j++)
XA_BUG_ON(xa, xa_get_order(xa, j) != order);
xa_erase(xa, i << order);
}
}
}
static noinline void check_destroy(struct xarray *xa) static noinline void check_destroy(struct xarray *xa)
{ {
unsigned long index; unsigned long index;
@ -1697,6 +1760,7 @@ static int xarray_checks(void)
check_reserve(&array); check_reserve(&array);
check_reserve(&xa0); check_reserve(&xa0);
check_multi_store(&array); check_multi_store(&array);
check_get_order(&array);
check_xa_alloc(); check_xa_alloc();
check_find(&array); check_find(&array);
check_find_entry(&array); check_find_entry(&array);
@ -1708,6 +1772,7 @@ static int xarray_checks(void)
check_store_range(&array); check_store_range(&array);
check_store_iter(&array); check_store_iter(&array);
check_align(&xa0); check_align(&xa0);
check_split(&array);
check_workingset(&array, 0); check_workingset(&array, 0);
check_workingset(&array, 64); check_workingset(&array, 64);

View File

@ -266,13 +266,14 @@ static void xa_node_free(struct xa_node *node)
*/ */
static void xas_destroy(struct xa_state *xas) static void xas_destroy(struct xa_state *xas)
{ {
struct xa_node *node = xas->xa_alloc; struct xa_node *next, *node = xas->xa_alloc;
if (!node) while (node) {
return; XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); next = rcu_dereference_raw(node->parent);
kmem_cache_free(radix_tree_node_cachep, node); radix_tree_node_rcu_free(&node->rcu_head);
xas->xa_alloc = NULL; xas->xa_alloc = node = next;
}
} }
/** /**
@ -304,6 +305,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!xas->xa_alloc) if (!xas->xa_alloc)
return false; return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART; xas->xa_node = XAS_RESTART;
return true; return true;
@ -339,6 +341,7 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
} }
if (!xas->xa_alloc) if (!xas->xa_alloc)
return false; return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART; xas->xa_node = XAS_RESTART;
return true; return true;
@ -403,7 +406,7 @@ static unsigned long xas_size(const struct xa_state *xas)
/* /*
* Use this to calculate the maximum index that will need to be created * Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a * in order to add the entry described by @xas. Because we cannot store a
* multiple-index entry at index 0, the calculation is a little more complex * multi-index entry at index 0, the calculation is a little more complex
* than you might expect. * than you might expect.
*/ */
static unsigned long xas_max(struct xa_state *xas) static unsigned long xas_max(struct xa_state *xas)
@ -946,6 +949,153 @@ void xas_init_marks(const struct xa_state *xas)
} }
EXPORT_SYMBOL_GPL(xas_init_marks); EXPORT_SYMBOL_GPL(xas_init_marks);
#ifdef CONFIG_XARRAY_MULTI
static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
{
unsigned int marks = 0;
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (node_get_mark(node, offset, mark))
marks |= 1 << (__force unsigned int)mark;
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
return marks;
}
static void node_set_marks(struct xa_node *node, unsigned int offset,
struct xa_node *child, unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
node_mark_all(child, mark);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
/**
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
* @entry: New entry which will be stored in the array.
* @order: New entry order.
* @gfp: Memory allocation flags.
*
* This function should be called before calling xas_split().
* If necessary, it will allocate new nodes (and fill them with @entry)
* to prepare for the upcoming split of an entry of @order size into
* entries of the order stored in the @xas.
*
* Context: May sleep if @gfp flags permit.
*/
void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
gfp_t gfp)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
do {
unsigned int i;
void *sibling;
struct xa_node *node;
node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
if (!node)
goto nomem;
node->array = xas->xa;
for (i = 0; i < XA_CHUNK_SIZE; i++) {
if ((i & mask) == 0) {
RCU_INIT_POINTER(node->slots[i], entry);
sibling = xa_mk_sibling(0);
} else {
RCU_INIT_POINTER(node->slots[i], sibling);
}
}
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
} while (sibs-- > 0);
return;
nomem:
xas_destroy(xas);
xas_set_err(xas, -ENOMEM);
}
EXPORT_SYMBOL_GPL(xas_split_alloc);
/**
* xas_split() - Split a multi-index entry into smaller entries.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
* @order: New entry order.
*
* The value in the entry is copied to all the replacement entries.
*
* Context: Any context. The caller should hold the xa_lock.
*/
void xas_split(struct xa_state *xas, void *entry, unsigned int order)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int offset, marks;
struct xa_node *node;
void *curr = xas_load(xas);
int values = 0;
node = xas->xa_node;
if (xas_top(node))
return;
marks = node_get_marks(node, xas->xa_offset);
offset = xas->xa_offset + sibs;
do {
if (xas->xa_shift < node->shift) {
struct xa_node *child = xas->xa_alloc;
xas->xa_alloc = rcu_dereference_raw(child->parent);
child->shift = node->shift - XA_CHUNK_SHIFT;
child->offset = offset;
child->count = XA_CHUNK_SIZE;
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
node_set_marks(node, offset, child, marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
values--;
} else {
unsigned int canon = offset - xas->xa_sibs;
node_set_marks(node, canon, NULL, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
xa_mk_sibling(canon));
values += (xa_is_value(entry) - xa_is_value(curr)) *
(xas->xa_sibs + 1);
}
} while (offset-- > xas->xa_offset);
node->nr_values += values;
}
EXPORT_SYMBOL_GPL(xas_split);
#endif
/** /**
* xas_pause() - Pause a walk to drop a lock. * xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state. * @xas: XArray operation state.
@ -1407,7 +1557,7 @@ EXPORT_SYMBOL(__xa_store);
* @gfp: Memory allocation flags. * @gfp: Memory allocation flags.
* *
* After this function returns, loads from this index will return @entry. * After this function returns, loads from this index will return @entry.
* Storing into an existing multislot entry updates the entry of every index. * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL. * The marks associated with @index are unaffected unless @entry is %NULL.
* *
* Context: Any context. Takes and releases the xa_lock. * Context: Any context. Takes and releases the xa_lock.
@ -1549,7 +1699,7 @@ static void xas_set_range(struct xa_state *xas, unsigned long first,
* *
* After this function returns, loads from any index between @first and @last, * After this function returns, loads from any index between @first and @last,
* inclusive will return @entry. * inclusive will return @entry.
* Storing into an existing multislot entry updates the entry of every index. * Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL. * The marks associated with @index are unaffected unless @entry is %NULL.
* *
* Context: Process context. Takes and releases the xa_lock. May sleep * Context: Process context. Takes and releases the xa_lock. May sleep
@ -1592,6 +1742,46 @@ unlock:
return xas_result(&xas, NULL); return xas_result(&xas, NULL);
} }
EXPORT_SYMBOL(xa_store_range); EXPORT_SYMBOL(xa_store_range);
/**
* xa_get_order() - Get the order of an entry.
* @xa: XArray.
* @index: Index of the entry.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
int xa_get_order(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
void *entry;
int order = 0;
rcu_read_lock();
entry = xas_load(&xas);
if (!entry)
goto unlock;
if (!xas.xa_node)
goto unlock;
for (;;) {
unsigned int slot = xas.xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
if (!xa_is_sibling(xas.xa_node->slots[slot]))
break;
order++;
}
order += xas.xa_node->shift;
unlock:
rcu_read_unlock();
return order;
}
EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */ #endif /* CONFIG_XARRAY_MULTI */
/** /**

View File

@ -856,7 +856,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
int huge = PageHuge(page); int huge = PageHuge(page);
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
int error; int error;
void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page); VM_BUG_ON_PAGE(PageSwapBacked(page), page);
@ -872,21 +871,41 @@ noinline int __add_to_page_cache_locked(struct page *page,
get_page(page); get_page(page);
page->mapping = mapping; page->mapping = mapping;
page->index = offset; page->index = offset;
gfp_mask &= GFP_RECLAIM_MASK;
do { do {
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
void *entry, *old = NULL;
if (order > thp_order(page))
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
order, gfp_mask);
xas_lock_irq(&xas); xas_lock_irq(&xas);
old = xas_load(&xas); xas_for_each_conflict(&xas, entry) {
if (old && !xa_is_value(old)) old = entry;
xas_set_err(&xas, -EEXIST); if (!xa_is_value(entry)) {
xas_set_err(&xas, -EEXIST);
goto unlock;
}
}
if (old) {
if (shadowp)
*shadowp = old;
/* entry may have been split before we acquired lock */
order = xa_get_order(xas.xa, xas.xa_index);
if (order > thp_order(page)) {
xas_split(&xas, old, order);
xas_reset(&xas);
}
}
xas_store(&xas, page); xas_store(&xas, page);
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
if (xa_is_value(old)) { if (old)
mapping->nrexceptional--; mapping->nrexceptional--;
if (shadowp)
*shadowp = old;
}
mapping->nrpages++; mapping->nrpages++;
/* hugetlb pages do not participate in page cache accounting */ /* hugetlb pages do not participate in page cache accounting */
@ -894,7 +913,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
__inc_node_page_state(page, NR_FILE_PAGES); __inc_node_page_state(page, NR_FILE_PAGES);
unlock: unlock:
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK)); } while (xas_nomem(&xas, gfp_mask));
if (xas_error(&xas)) if (xas_error(&xas))
goto error; goto error;

View File

@ -4338,10 +4338,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
struct page *page; struct page *page;
if (!*pagep) { if (!*pagep) {
ret = -ENOMEM; /* If a page already exists, then it's UFFDIO_COPY for
page = alloc_huge_page(dst_vma, dst_addr, 0); * a non-missing case. Return -EEXIST.
if (IS_ERR(page)) */
if (vm_shared &&
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
ret = -EEXIST;
goto out; goto out;
}
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page)) {
ret = -ENOMEM;
goto out;
}
ret = copy_huge_page_from_user(page, ret = copy_huge_page_from_user(page,
(const void __user *) src_addr, (const void __user *) src_addr,

View File

@ -1584,8 +1584,13 @@ setup_failed:
} else { } else {
/* Init failed, cleanup */ /* Init failed, cleanup */
flush_work(&hdev->tx_work); flush_work(&hdev->tx_work);
flush_work(&hdev->cmd_work);
/* Since hci_rx_work() is possible to awake new cmd_work
* it should be flushed first to avoid unexpected call of
* hci_cmd_work()
*/
flush_work(&hdev->rx_work); flush_work(&hdev->rx_work);
flush_work(&hdev->cmd_work);
skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->rx_q);

View File

@ -755,7 +755,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
/* Detach sockets from device */ /* Detach sockets from device */
read_lock(&hci_sk_list.lock); read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) { sk_for_each(sk, &hci_sk_list.head) {
bh_lock_sock_nested(sk); lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) { if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL; hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE; sk->sk_err = EPIPE;
@ -764,7 +764,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
hci_dev_put(hdev); hci_dev_put(hdev);
} }
bh_unlock_sock(sk); release_sock(sk);
} }
read_unlock(&hci_sk_list.lock); read_unlock(&hci_sk_list.lock);
} }

View File

@ -307,7 +307,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
caifd_put(caifd); caifd_put(caifd);
} }
void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room, struct cflayer *link_support, int head_room,
struct cflayer **layer, struct cflayer **layer,
int (**rcv_func)(struct sk_buff *, struct net_device *, int (**rcv_func)(struct sk_buff *, struct net_device *,
@ -318,11 +318,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
enum cfcnfg_phy_preference pref; enum cfcnfg_phy_preference pref;
struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
struct caif_device_entry_list *caifdevs; struct caif_device_entry_list *caifdevs;
int res;
caifdevs = caif_device_list(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev));
caifd = caif_device_alloc(dev); caifd = caif_device_alloc(dev);
if (!caifd) if (!caifd)
return; return -ENOMEM;
*layer = &caifd->layer; *layer = &caifd->layer;
spin_lock_init(&caifd->flow_lock); spin_lock_init(&caifd->flow_lock);
@ -343,7 +344,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
strlcpy(caifd->layer.name, dev->name, strlcpy(caifd->layer.name, dev->name,
sizeof(caifd->layer.name)); sizeof(caifd->layer.name));
caifd->layer.transmit = transmit; caifd->layer.transmit = transmit;
cfcnfg_add_phy_layer(cfg, res = cfcnfg_add_phy_layer(cfg,
dev, dev,
&caifd->layer, &caifd->layer,
pref, pref,
@ -353,6 +354,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
mutex_unlock(&caifdevs->lock); mutex_unlock(&caifdevs->lock);
if (rcv_func) if (rcv_func)
*rcv_func = receive; *rcv_func = receive;
return res;
} }
EXPORT_SYMBOL(caif_enroll_dev); EXPORT_SYMBOL(caif_enroll_dev);
@ -367,6 +369,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support; struct cflayer *layer, *link_support;
int head_room = 0; int head_room = 0;
struct caif_device_entry_list *caifdevs; struct caif_device_entry_list *caifdevs;
int res;
cfg = get_cfcnfg(dev_net(dev)); cfg = get_cfcnfg(dev_net(dev));
caifdevs = caif_device_list(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev));
@ -392,8 +395,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break; break;
} }
} }
caif_enroll_dev(dev, caifdev, link_support, head_room, res = caif_enroll_dev(dev, caifdev, link_support, head_room,
&layer, NULL); &layer, NULL);
if (res)
cfserl_release(link_support);
caifdev->flowctrl = dev_flowctrl; caifdev->flowctrl = dev_flowctrl;
break; break;

View File

@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
return (struct cflayer *) this; return (struct cflayer *) this;
} }
static void cfusbl_release(struct cflayer *layer)
{
kfree(layer);
}
static struct packet_type caif_usb_type __read_mostly = { static struct packet_type caif_usb_type __read_mostly = {
.type = cpu_to_be16(ETH_P_802_EX1), .type = cpu_to_be16(ETH_P_802_EX1),
}; };
@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support; struct cflayer *layer, *link_support;
struct usbnet *usbnet; struct usbnet *usbnet;
struct usb_device *usbdev; struct usb_device *usbdev;
int res;
/* Check whether we have a NCM device, and find its VID/PID. */ /* Check whether we have a NCM device, and find its VID/PID. */
if (!(dev->dev.parent && dev->dev.parent->driver && if (!(dev->dev.parent && dev->dev.parent->driver &&
@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
if (dev->num_tx_queues > 1) if (dev->num_tx_queues > 1)
pr_warn("USB device uses more than one tx queue\n"); pr_warn("USB device uses more than one tx queue\n");
caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
&layer, &caif_usb_type.func); &layer, &caif_usb_type.func);
if (res)
goto err;
if (!pack_added) if (!pack_added)
dev_add_pack(&caif_usb_type); dev_add_pack(&caif_usb_type);
pack_added = true; pack_added = true;
@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
strlcpy(layer->name, dev->name, sizeof(layer->name)); strlcpy(layer->name, dev->name, sizeof(layer->name));
return 0; return 0;
err:
cfusbl_release(link_support);
return res;
} }
static struct notifier_block caif_device_notifier = { static struct notifier_block caif_device_notifier = {

View File

@ -450,7 +450,7 @@ unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
void int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer, struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref, enum cfcnfg_phy_preference pref,
@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
{ {
struct cflayer *frml; struct cflayer *frml;
struct cfcnfg_phyinfo *phyinfo = NULL; struct cfcnfg_phyinfo *phyinfo = NULL;
int i; int i, res = 0;
u8 phyid; u8 phyid;
mutex_lock(&cnfg->lock); mutex_lock(&cnfg->lock);
@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
goto got_phyid; goto got_phyid;
} }
pr_warn("Too many CAIF Link Layers (max 6)\n"); pr_warn("Too many CAIF Link Layers (max 6)\n");
res = -EEXIST;
goto out; goto out;
got_phyid: got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
if (!phyinfo) if (!phyinfo) {
res = -ENOMEM;
goto out_err; goto out_err;
}
phy_layer->id = phyid; phy_layer->id = phyid;
phyinfo->pref = pref; phyinfo->pref = pref;
@ -492,8 +495,10 @@ got_phyid:
frml = cffrml_create(phyid, fcs); frml = cffrml_create(phyid, fcs);
if (!frml) if (!frml) {
res = -ENOMEM;
goto out_err; goto out_err;
}
phyinfo->frm_layer = frml; phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux); layer_set_up(frml, cnfg->mux);
@ -511,11 +516,12 @@ got_phyid:
list_add_rcu(&phyinfo->node, &cnfg->phys); list_add_rcu(&phyinfo->node, &cnfg->phys);
out: out:
mutex_unlock(&cnfg->lock); mutex_unlock(&cnfg->lock);
return; return res;
out_err: out_err:
kfree(phyinfo); kfree(phyinfo);
mutex_unlock(&cnfg->lock); mutex_unlock(&cnfg->lock);
return res;
} }
EXPORT_SYMBOL(cfcnfg_add_phy_layer); EXPORT_SYMBOL(cfcnfg_add_phy_layer);

View File

@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid); int phyid);
void cfserl_release(struct cflayer *layer)
{
kfree(layer);
}
struct cflayer *cfserl_create(int instance, bool use_stx) struct cflayer *cfserl_create(int instance, bool use_stx)
{ {
struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);

View File

@ -239,6 +239,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_lock(&n->lock); write_lock(&n->lock);
if ((n->nud_state == NUD_FAILED) || if ((n->nud_state == NUD_FAILED) ||
(n->nud_state == NUD_NOARP) ||
(tbl->is_multicast && (tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) || tbl->is_multicast(n->primary_key)) ||
time_after(tref, n->updated)) time_after(tref, n->updated))

View File

@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) || be32_to_cpu(params.frame_counter)) ||
ieee802154_llsec_fill_key_id(msg, &params.out_key)) ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
rc = -ENOBUFS;
goto out_free; goto out_free;
}
dev_put(dev); dev_put(dev);

View File

@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
} }
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
rc = -EMSGSIZE;
goto nla_put_failure; goto nla_put_failure;
}
dev_put(dev); dev_put(dev);
wpan_phy_put(phy); wpan_phy_put(phy);

View File

@ -3688,11 +3688,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (nh) { if (nh) {
if (rt->fib6_src.plen) { if (rt->fib6_src.plen) {
NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
goto out; goto out_free;
} }
if (!nexthop_get(nh)) { if (!nexthop_get(nh)) {
NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
goto out; goto out_free;
} }
rt->nh = nh; rt->nh = nh;
fib6_nh = nexthop_fib6_nh(rt->nh); fib6_nh = nexthop_fib6_nh(rt->nh);
@ -3729,6 +3729,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
out: out:
fib6_info_release(rt); fib6_info_release(rt);
return ERR_PTR(err); return ERR_PTR(err);
out_free:
ip_fib_metrics_put(rt->fib6_metrics);
kfree(rt);
return ERR_PTR(err);
} }
int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,

View File

@ -1340,7 +1340,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port; svc->port = u->port;
svc->fwmark = u->fwmark; svc->fwmark = u->fwmark;
svc->flags = u->flags; svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ; svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask; svc->netmask = u->netmask;
svc->ipvs = ipvs; svc->ipvs = ipvs;

View File

@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void)
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
cleanup_sockopt: cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst6); nf_unregister_sockopt(&so_getorigdst);
#endif #endif
return ret; return ret;
} }

View File

@ -380,10 +380,14 @@ static int
nfnl_cthelper_update(const struct nlattr * const tb[], nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper) struct nf_conntrack_helper *helper)
{ {
u32 size;
int ret; int ret;
if (tb[NFCTH_PRIV_DATA_LEN]) if (tb[NFCTH_PRIV_DATA_LEN]) {
return -EBUSY; size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size != helper->data_len)
return -EBUSY;
}
if (tb[NFCTH_POLICY]) { if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);

View File

@ -1218,7 +1218,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
struct nf_conn *ct; struct nf_conn *ct;
ct = nf_ct_get(pkt->skb, &ctinfo); ct = nf_ct_get(pkt->skb, &ctinfo);
if (!ct || ctinfo == IP_CT_UNTRACKED) { if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
regs->verdict.code = NFT_BREAK; regs->verdict.code = NFT_BREAK;
return; return;
} }

View File

@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (!llcp_sock->service_name) { if (!llcp_sock->service_name) {
nfc_llcp_local_put(llcp_sock->local); nfc_llcp_local_put(llcp_sock->local);
llcp_sock->local = NULL; llcp_sock->local = NULL;
llcp_sock->dev = NULL;
ret = -ENOMEM; ret = -ENOMEM;
goto put_dev; goto put_dev;
} }
@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->local = NULL; llcp_sock->local = NULL;
kfree(llcp_sock->service_name); kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL; llcp_sock->service_name = NULL;
llcp_sock->dev = NULL;
ret = -EADDRINUSE; ret = -EADDRINUSE;
goto put_dev; goto put_dev;
} }

View File

@ -648,9 +648,6 @@ static int tcf_ct_fill_params(struct net *net,
sizeof(p->zone)); sizeof(p->zone));
} }
if (p->zone == NF_CT_DEFAULT_ZONE_ID)
return 0;
nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
if (!tmpl) { if (!tmpl) {

View File

@ -233,7 +233,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
*/ */
static int tipc_enable_bearer(struct net *net, const char *name, static int tipc_enable_bearer(struct net *net, const char *name,
u32 disc_domain, u32 prio, u32 disc_domain, u32 prio,
struct nlattr *attr[]) struct nlattr *attr[],
struct netlink_ext_ack *extack)
{ {
struct tipc_net *tn = tipc_net(net); struct tipc_net *tn = tipc_net(net);
struct tipc_bearer_names b_names; struct tipc_bearer_names b_names;
@ -244,20 +245,24 @@ static int tipc_enable_bearer(struct net *net, const char *name,
int bearer_id = 0; int bearer_id = 0;
int res = -EINVAL; int res = -EINVAL;
char *errstr = ""; char *errstr = "";
u32 i;
if (!bearer_name_validate(name, &b_names)) { if (!bearer_name_validate(name, &b_names)) {
errstr = "illegal name"; errstr = "illegal name";
NL_SET_ERR_MSG(extack, "Illegal name");
goto rejected; goto rejected;
} }
if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) { if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
errstr = "illegal priority"; errstr = "illegal priority";
NL_SET_ERR_MSG(extack, "Illegal priority");
goto rejected; goto rejected;
} }
m = tipc_media_find(b_names.media_name); m = tipc_media_find(b_names.media_name);
if (!m) { if (!m) {
errstr = "media not registered"; errstr = "media not registered";
NL_SET_ERR_MSG(extack, "Media not registered");
goto rejected; goto rejected;
} }
@ -265,33 +270,43 @@ static int tipc_enable_bearer(struct net *net, const char *name,
prio = m->priority; prio = m->priority;
/* Check new bearer vs existing ones and find free bearer id if any */ /* Check new bearer vs existing ones and find free bearer id if any */
while (bearer_id < MAX_BEARERS) { bearer_id = MAX_BEARERS;
b = rtnl_dereference(tn->bearer_list[bearer_id]); i = MAX_BEARERS;
if (!b) while (i-- != 0) {
break; b = rtnl_dereference(tn->bearer_list[i]);
if (!b) {
bearer_id = i;
continue;
}
if (!strcmp(name, b->name)) { if (!strcmp(name, b->name)) {
errstr = "already enabled"; errstr = "already enabled";
NL_SET_ERR_MSG(extack, "Already enabled");
goto rejected; goto rejected;
} }
bearer_id++;
if (b->priority != prio) if (b->priority == prio &&
continue; (++with_this_prio > 2)) {
if (++with_this_prio <= 2) pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
continue; name, prio);
pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
name, prio); if (prio == TIPC_MIN_LINK_PRI) {
if (prio == TIPC_MIN_LINK_PRI) { errstr = "cannot adjust to lower";
errstr = "cannot adjust to lower"; NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
goto rejected; goto rejected;
}
pr_warn("Bearer <%s>: trying with adjusted priority\n",
name);
prio--;
bearer_id = MAX_BEARERS;
i = MAX_BEARERS;
with_this_prio = 1;
} }
pr_warn("Bearer <%s>: trying with adjusted priority\n", name);
prio--;
bearer_id = 0;
with_this_prio = 1;
} }
if (bearer_id >= MAX_BEARERS) { if (bearer_id >= MAX_BEARERS) {
errstr = "max 3 bearers permitted"; errstr = "max 3 bearers permitted";
NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
goto rejected; goto rejected;
} }
@ -305,6 +320,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
if (res) { if (res) {
kfree(b); kfree(b);
errstr = "failed to enable media"; errstr = "failed to enable media";
NL_SET_ERR_MSG(extack, "Failed to enable media");
goto rejected; goto rejected;
} }
@ -320,6 +336,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
if (res) { if (res) {
bearer_disable(net, b); bearer_disable(net, b);
errstr = "failed to create discoverer"; errstr = "failed to create discoverer";
NL_SET_ERR_MSG(extack, "Failed to create discoverer");
goto rejected; goto rejected;
} }
@ -862,6 +879,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
bearer = tipc_bearer_find(net, name); bearer = tipc_bearer_find(net, name);
if (!bearer) { if (!bearer) {
err = -EINVAL; err = -EINVAL;
NL_SET_ERR_MSG(info->extack, "Bearer not found");
goto err_out; goto err_out;
} }
@ -901,8 +919,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
bearer = tipc_bearer_find(net, name); bearer = tipc_bearer_find(net, name);
if (!bearer) if (!bearer) {
NL_SET_ERR_MSG(info->extack, "Bearer not found");
return -EINVAL; return -EINVAL;
}
bearer_disable(net, bearer); bearer_disable(net, bearer);
@ -960,7 +980,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
} }
return tipc_enable_bearer(net, bearer, domain, prio, attrs); return tipc_enable_bearer(net, bearer, domain, prio, attrs,
info->extack);
} }
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
@ -999,6 +1020,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
b = tipc_bearer_find(net, name); b = tipc_bearer_find(net, name);
if (!b) { if (!b) {
rtnl_unlock(); rtnl_unlock();
NL_SET_ERR_MSG(info->extack, "Bearer not found");
return -EINVAL; return -EINVAL;
} }
@ -1039,8 +1061,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
b = tipc_bearer_find(net, name); b = tipc_bearer_find(net, name);
if (!b) if (!b) {
NL_SET_ERR_MSG(info->extack, "Bearer not found");
return -EINVAL; return -EINVAL;
}
if (attrs[TIPC_NLA_BEARER_PROP]) { if (attrs[TIPC_NLA_BEARER_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
@ -1059,12 +1083,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
if (props[TIPC_NLA_PROP_WIN]) if (props[TIPC_NLA_PROP_WIN])
b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
if (props[TIPC_NLA_PROP_MTU]) { if (props[TIPC_NLA_PROP_MTU]) {
if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
NL_SET_ERR_MSG(info->extack,
"MTU property is unsupported");
return -EINVAL; return -EINVAL;
}
#ifdef CONFIG_TIPC_MEDIA_UDP #ifdef CONFIG_TIPC_MEDIA_UDP
if (tipc_udp_mtu_bad(nla_get_u32 if (tipc_udp_mtu_bad(nla_get_u32
(props[TIPC_NLA_PROP_MTU]))) (props[TIPC_NLA_PROP_MTU]))) {
NL_SET_ERR_MSG(info->extack,
"MTU value is out-of-range");
return -EINVAL; return -EINVAL;
}
b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU); tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
#endif #endif
@ -1192,6 +1222,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
rtnl_lock(); rtnl_lock();
media = tipc_media_find(name); media = tipc_media_find(name);
if (!media) { if (!media) {
NL_SET_ERR_MSG(info->extack, "Media not found");
err = -EINVAL; err = -EINVAL;
goto err_out; goto err_out;
} }
@ -1228,9 +1259,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
m = tipc_media_find(name); m = tipc_media_find(name);
if (!m) if (!m) {
NL_SET_ERR_MSG(info->extack, "Media not found");
return -EINVAL; return -EINVAL;
}
if (attrs[TIPC_NLA_MEDIA_PROP]) { if (attrs[TIPC_NLA_MEDIA_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
@ -1246,12 +1278,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
if (props[TIPC_NLA_PROP_WIN]) if (props[TIPC_NLA_PROP_WIN])
m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
if (props[TIPC_NLA_PROP_MTU]) { if (props[TIPC_NLA_PROP_MTU]) {
if (m->type_id != TIPC_MEDIA_TYPE_UDP) if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
NL_SET_ERR_MSG(info->extack,
"MTU property is unsupported");
return -EINVAL; return -EINVAL;
}
#ifdef CONFIG_TIPC_MEDIA_UDP #ifdef CONFIG_TIPC_MEDIA_UDP
if (tipc_udp_mtu_bad(nla_get_u32 if (tipc_udp_mtu_bad(nla_get_u32
(props[TIPC_NLA_PROP_MTU]))) (props[TIPC_NLA_PROP_MTU]))) {
NL_SET_ERR_MSG(info->extack,
"MTU value is out-of-range");
return -EINVAL; return -EINVAL;
}
m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
#endif #endif
} }

View File

@ -433,6 +433,8 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev);
/* internal helpers */ /* internal helpers */
bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
int key_idx, bool pairwise);
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx, struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr); bool pairwise, const u8 *mac_addr);

View File

@ -3979,9 +3979,6 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
if (err) if (err)
return err; return err;
if (key.idx < 0)
return -EINVAL;
if (info->attrs[NL80211_ATTR_MAC]) if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@ -3997,6 +3994,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
key.type != NL80211_KEYTYPE_GROUP) key.type != NL80211_KEYTYPE_GROUP)
return -EINVAL; return -EINVAL;
if (!cfg80211_valid_key_idx(rdev, key.idx,
key.type == NL80211_KEYTYPE_PAIRWISE))
return -EINVAL;
if (!rdev->ops->del_key) if (!rdev->ops->del_key)
return -EOPNOTSUPP; return -EOPNOTSUPP;

View File

@ -229,11 +229,48 @@ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
return false; return false;
} }
static bool
cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev)
{
struct wiphy *wiphy = &rdev->wiphy;
int i;
for (i = 0; i < wiphy->n_cipher_suites; i++) {
switch (wiphy->cipher_suites[i]) {
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
return true;
}
}
return false;
}
bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev,
int key_idx, bool pairwise)
{
int max_key_idx;
if (pairwise)
max_key_idx = 3;
else if (cfg80211_igtk_cipher_supported(rdev))
max_key_idx = 5;
else
max_key_idx = 3;
if (key_idx < 0 || key_idx > max_key_idx)
return false;
return true;
}
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx, struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr) bool pairwise, const u8 *mac_addr)
{ {
if (key_idx < 0 || key_idx > 5) if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise))
return -EINVAL; return -EINVAL;
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))

View File

@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
if (format != DRM_FORMAT_XRGB8888) { if (format != DRM_FORMAT_XRGB8888) {
pci_err(pdev, "format mismatch (0x%x != 0x%x)\n", pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
format, DRM_FORMAT_XRGB8888); format, DRM_FORMAT_XRGB8888);
return -EINVAL; ret = -EINVAL;
goto err_release_regions;
} }
if (width < 100 || width > 10000) { if (width < 100 || width > 10000) {
pci_err(pdev, "width (%d) out of range\n", width); pci_err(pdev, "width (%d) out of range\n", width);
return -EINVAL; ret = -EINVAL;
goto err_release_regions;
} }
if (height < 100 || height > 10000) { if (height < 100 || height > 10000) {
pci_err(pdev, "height (%d) out of range\n", height); pci_err(pdev, "height (%d) out of range\n", height);
return -EINVAL; ret = -EINVAL;
goto err_release_regions;
} }
pci_info(pdev, "mdpy found: %dx%d framebuffer\n", pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
width, height); width, height);
info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev); info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
if (!info) if (!info) {
ret = -ENOMEM;
goto err_release_regions; goto err_release_regions;
}
pci_set_drvdata(pdev, info); pci_set_drvdata(pdev, info);
par = info->par; par = info->par;

View File

@ -491,9 +491,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
return; return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return; return;
event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
list_for_each_entry(ts, &ti->slave_active_head, active_list) list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback) if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution); ts->ccallback(ts, event, &tstamp, resolution);
} }
/* start/continue a master timer */ /* start/continue a master timer */

View File

@ -8062,6 +8062,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),

View File

@ -1697,7 +1697,7 @@ static struct snd_kcontrol_new snd_microii_mixer_spdif[] = {
static int snd_microii_controls_create(struct usb_mixer_interface *mixer) static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
{ {
int err, i; int err, i;
const static usb_mixer_elem_resume_func_t resume_funcs[] = { static const usb_mixer_elem_resume_func_t resume_funcs[] = {
snd_microii_spdif_default_update, snd_microii_spdif_default_update,
NULL, NULL,
snd_microii_spdif_switch_update snd_microii_spdif_switch_update