This is the 5.4.20 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5HElwACgkQONu9yGCS
 aT6GQBAAxBLl+L518k3/Jm7Fv5VGFtfk7QIJmLKSdI58Gj7aLib2CulB5dJpHu0Z
 uOJXEUKQoUC739MjS6IgrAUoee/GTgyeOS1gyI49IBVvrBgjQop/3FJ4Oe4EF6Wj
 aEy7xA1k1MRUM4XWy3PiMvIuaxWNWoEn22DS703adOKPEx2yS0sPtAf6RRRpzxW+
 oWR9aJv5y+wKRi7frRvTJ8juQoeo67XHNQWBybv7v+th7KqF33EYk/faLJqTbqNd
 caJAG+DuGsu/oLcwlWEE5CZ8rP5OAOh12505J9XG5uXoqA2BrQFCTLW6okG1PUNI
 I+GugtMKWwOSP8dHkfq/jPKInG3H+mCwVW3wWzKfWBJwIi4NWokYK31SQty1BNBe
 if9ytUT97ykgkovVjVbu+X+wMnEes2JMrVyBAzY2cOK01KD2PUR/cLdZZXTil4A0
 rEKXd+tJRN7+ko+z4EJRdstzNtB030tDeEUmwJSIlJoWPRROk69it8d4/OFXe+/u
 Le4T4V6w22tcP0H/2CtDSwTntDbjNoXWpTGzqp2HO0urObqZyX99leyCI8Ee9sRz
 00B6ykAOnOMPdLmAGmpBXnhKRK89VlnfG5A/d609km4EPJuKZyX9KS6tZSwpJIAd
 3W9FWaNyr8Z79BDJyeK0ftS5BD/WNGDLux7lylLzMsPAmF7YNsI=
 =Zp/p
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.20' into 5.4-1.0.0-imx

This is the 5.4.20 stable release
This commit is contained in:
Gary Bisson 2020-04-15 14:30:13 +02:00
commit 73859ab789
100 changed files with 899 additions and 420 deletions

View File

@ -85,7 +85,7 @@ properties:
Must be the device tree identifier of the over-sampling
mode pins. As the line is active high, it should be marked
GPIO_ACTIVE_HIGH.
maxItems: 1
maxItems: 3
adi,sw-mode:
description:
@ -128,9 +128,9 @@ examples:
adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>;
adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH
&gpio 23 GPIO_ACTIVE_HIGH
&gpio 26 GPIO_ACTIVE_HIGH>;
adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH>,
<&gpio 23 GPIO_ACTIVE_HIGH>,
<&gpio 26 GPIO_ACTIVE_HIGH>;
standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>;
adi,sw-mode;
};

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 19
SUBLEVEL = 20
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -77,6 +77,7 @@
interrupt-names = "macirq";
phy-mode = "rgmii";
snps,pbl = < 32 >;
snps,multicast-filter-bins = <256>;
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;

View File

@ -704,6 +704,60 @@
ti,bit-shift = <8>;
reg = <0x2a48>;
};
clkout1_osc_div_ck: clkout1-osc-div-ck {
#clock-cells = <0>;
compatible = "ti,divider-clock";
clocks = <&sys_clkin_ck>;
ti,bit-shift = <20>;
ti,max-div = <4>;
reg = <0x4100>;
};
clkout1_src2_mux_ck: clkout1-src2-mux-ck {
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&clk_rc32k_ck>, <&sysclk_div>, <&dpll_ddr_m2_ck>,
<&dpll_per_m2_ck>, <&dpll_disp_m2_ck>,
<&dpll_mpu_m2_ck>;
reg = <0x4100>;
};
clkout1_src2_pre_div_ck: clkout1-src2-pre-div-ck {
#clock-cells = <0>;
compatible = "ti,divider-clock";
clocks = <&clkout1_src2_mux_ck>;
ti,bit-shift = <4>;
ti,max-div = <8>;
reg = <0x4100>;
};
clkout1_src2_post_div_ck: clkout1-src2-post-div-ck {
#clock-cells = <0>;
compatible = "ti,divider-clock";
clocks = <&clkout1_src2_pre_div_ck>;
ti,bit-shift = <8>;
ti,max-div = <32>;
ti,index-power-of-two;
reg = <0x4100>;
};
clkout1_mux_ck: clkout1-mux-ck {
#clock-cells = <0>;
compatible = "ti,mux-clock";
clocks = <&clkout1_osc_div_ck>, <&clk_rc32k_ck>,
<&clkout1_src2_post_div_ck>, <&dpll_extdev_m2_ck>;
ti,bit-shift = <16>;
reg = <0x4100>;
};
clkout1_ck: clkout1-ck {
#clock-cells = <0>;
compatible = "ti,gate-clock";
clocks = <&clkout1_mux_ck>;
ti,bit-shift = <23>;
reg = <0x4100>;
};
};
&prcm {

View File

@ -187,7 +187,7 @@
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -221,7 +221,7 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -239,7 +239,7 @@
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -257,7 +257,7 @@
usart3 {
pinctrl_usart3: usart3-0 {
atmel,pins =
<AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -275,7 +275,7 @@
uart0 {
pinctrl_uart0: uart0-0 {
atmel,pins =
<AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_NONE
<AT91_PIOA 31 AT91_PERIPH_B AT91_PINCTRL_PULL_UP
AT91_PIOA 30 AT91_PERIPH_B AT91_PINCTRL_PULL_UP>;
};
};
@ -283,7 +283,7 @@
uart1 {
pinctrl_uart1: uart1-0 {
atmel,pins =
<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
};

View File

@ -329,7 +329,7 @@
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
<AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -347,7 +347,7 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -365,7 +365,7 @@
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
<AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};

View File

@ -183,7 +183,7 @@
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
<AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOA 26 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOA 27 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -201,7 +201,7 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOD 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOD 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -219,7 +219,7 @@
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
<AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOD 2 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOD 3 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};

View File

@ -556,7 +556,7 @@
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
<AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 19 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 18 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -574,7 +574,7 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -592,7 +592,7 @@
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -610,7 +610,7 @@
usart3 {
pinctrl_usart3: usart3-0 {
atmel,pins =
<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_NONE
<AT91_PIOB 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
AT91_PIOB 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};

View File

@ -682,7 +682,7 @@
usart0 {
pinctrl_usart0: usart0-0 {
atmel,pins =
<AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOA 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -721,7 +721,7 @@
usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOA 11 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -744,7 +744,7 @@
usart2 {
pinctrl_usart2: usart2-0 {
atmel,pins =
<AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOA 13 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOA 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};
@ -767,7 +767,7 @@
usart3 {
pinctrl_usart3: usart3-0 {
atmel,pins =
<AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_NONE>,
<AT91_PIOB 0 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
<AT91_PIOB 1 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
};

View File

@ -129,8 +129,8 @@
gpu_opp_table: gpu-opp-table {
compatible = "operating-points-v2";
opp-182150000 {
opp-hz = /bits/ 64 <182150000>;
opp-182142857 {
opp-hz = /bits/ 64 <182142857>;
opp-microvolt = <1150000>;
};
opp-318750000 {

View File

@ -125,8 +125,8 @@
opp-hz = /bits/ 64 <255000000>;
opp-microvolt = <1100000>;
};
opp-364300000 {
opp-hz = /bits/ 64 <364300000>;
opp-364285714 {
opp-hz = /bits/ 64 <364285714>;
opp-microvolt = <1100000>;
};
opp-425000000 {

View File

@ -1188,49 +1188,49 @@
usart0_clk: usart0_clk {
#clock-cells = <0>;
reg = <12>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
usart1_clk: usart1_clk {
#clock-cells = <0>;
reg = <13>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
usart2_clk: usart2_clk {
#clock-cells = <0>;
reg = <14>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
usart3_clk: usart3_clk {
#clock-cells = <0>;
reg = <15>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
uart0_clk: uart0_clk {
#clock-cells = <0>;
reg = <16>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
twi0_clk: twi0_clk {
reg = <18>;
#clock-cells = <0>;
atmel,clk-output-range = <0 16625000>;
atmel,clk-output-range = <0 41500000>;
};
twi1_clk: twi1_clk {
#clock-cells = <0>;
reg = <19>;
atmel,clk-output-range = <0 16625000>;
atmel,clk-output-range = <0 41500000>;
};
twi2_clk: twi2_clk {
#clock-cells = <0>;
reg = <20>;
atmel,clk-output-range = <0 16625000>;
atmel,clk-output-range = <0 41500000>;
};
mci0_clk: mci0_clk {
@ -1246,19 +1246,19 @@
spi0_clk: spi0_clk {
#clock-cells = <0>;
reg = <24>;
atmel,clk-output-range = <0 133000000>;
atmel,clk-output-range = <0 166000000>;
};
spi1_clk: spi1_clk {
#clock-cells = <0>;
reg = <25>;
atmel,clk-output-range = <0 133000000>;
atmel,clk-output-range = <0 166000000>;
};
tcb0_clk: tcb0_clk {
#clock-cells = <0>;
reg = <26>;
atmel,clk-output-range = <0 133000000>;
atmel,clk-output-range = <0 166000000>;
};
pwm_clk: pwm_clk {
@ -1269,7 +1269,7 @@
adc_clk: adc_clk {
#clock-cells = <0>;
reg = <29>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
dma0_clk: dma0_clk {
@ -1300,13 +1300,13 @@
ssc0_clk: ssc0_clk {
#clock-cells = <0>;
reg = <38>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
ssc1_clk: ssc1_clk {
#clock-cells = <0>;
reg = <39>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
sha_clk: sha_clk {

View File

@ -36,13 +36,13 @@
can0_clk: can0_clk {
#clock-cells = <0>;
reg = <40>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
can1_clk: can1_clk {
#clock-cells = <0>;
reg = <41>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
};
};

View File

@ -22,6 +22,7 @@
tcb1_clk: tcb1_clk {
#clock-cells = <0>;
reg = <27>;
atmel,clk-output-range = <0 166000000>;
};
};
};

View File

@ -41,13 +41,13 @@
uart0_clk: uart0_clk {
#clock-cells = <0>;
reg = <16>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
uart1_clk: uart1_clk {
#clock-cells = <0>;
reg = <17>;
atmel,clk-output-range = <0 66000000>;
atmel,clk-output-range = <0 83000000>;
};
};
};

View File

@ -691,6 +691,12 @@ static void __init at91_pm_use_default_mode(int pm_mode)
soc_pm.data.suspend_mode = AT91_PM_ULP0;
}
static const struct of_device_id atmel_shdwc_ids[] = {
{ .compatible = "atmel,sama5d2-shdwc" },
{ .compatible = "microchip,sam9x60-shdwc" },
{ /* sentinel. */ }
};
static void __init at91_pm_modes_init(void)
{
struct device_node *np;
@ -700,7 +706,7 @@ static void __init at91_pm_modes_init(void)
!at91_is_pm_mode_active(AT91_PM_ULP1))
return;
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-shdwc");
np = of_find_matching_node(NULL, atmel_shdwc_ids);
if (!np) {
pr_warn("%s: failed to find shdwc!\n", __func__);
goto ulp1_default;
@ -751,6 +757,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[1] },
{ /* sentinel */ },
};

View File

@ -323,7 +323,7 @@ static inline void poison_init_mem(void *s, size_t count)
*p++ = 0xe7fddef0;
}
static inline void
static inline void __init
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;

View File

@ -143,6 +143,7 @@
phy-mode = "sgmii";
status = "okay";
managed = "in-band-status";
phys = <&comphy1 0>;
sfp = <&sfp_eth0>;
};
@ -150,11 +151,14 @@
phy-mode = "sgmii";
status = "okay";
managed = "in-band-status";
phys = <&comphy0 1>;
sfp = <&sfp_eth1>;
};
&usb3 {
status = "okay";
phys = <&usb2_utmi_otg_phy>;
phy-names = "usb2-utmi-otg-phy";
};
&uart0 {

View File

@ -408,6 +408,8 @@
reg = <5>;
label = "cpu";
ethernet = <&cp1_eth2>;
phy-mode = "2500base-x";
managed = "in-band-status";
};
};

View File

@ -985,7 +985,7 @@
tcsr_mutex_regs: syscon@1f40000 {
compatible = "syscon";
reg = <0x01f40000 0x20000>;
reg = <0x01f40000 0x40000>;
};
tlmm: pinctrl@3400000 {

View File

@ -636,7 +636,6 @@
/* audio_clkout0/1/2/3 */
#clock-cells = <1>;
clock-frequency = <12288000 11289600>;
clkout-lr-synchronous;
status = "okay";

View File

@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
#define COMPAT_ELF_HWCAP_DEFAULT \
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
@ -1385,7 +1383,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
.min_field_value = 0,
.matches = has_no_fpsimd,
},
@ -1613,6 +1611,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.match_list = list, \
}
#define HWCAP_CAP_MATCH(match, cap_type, cap) \
{ \
__HWCAP_CAP(#cap, cap_type, cap) \
.matches = match, \
}
#ifdef CONFIG_ARM64_PTR_AUTH
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
{
@ -1686,8 +1690,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
{},
};
#ifdef CONFIG_COMPAT
static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
{
/*
* Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
* in line with that of arm32 as in vfp_init(). We make sure that the
* check is future proof, by making sure value is non-zero.
*/
u32 mvfr1;
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
else
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
}
#endif
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),

View File

@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
*/
static void task_fpsimd_load(void)
{
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
@ -289,6 +290,7 @@ static void fpsimd_save(void)
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
last->sve_vl = current->thread.sve_vl;
@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
WARN_ON(!system_supports_fpsimd());
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
*/
void fpsimd_restore_current_state(void)
{
if (!system_supports_fpsimd())
/*
* For the tasks that were created before we detected the absence of
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
* e.g, init. This could be then inherited by the children processes.
* If we later detect that the system doesn't support FP/SIMD,
* we must clear the flag for all the tasks to indicate that the
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
* do_notify_resume().
*/
if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE);
return;
}
get_cpu_fpsimd_context();
@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
if (!system_supports_fpsimd())
if (WARN_ON(!system_supports_fpsimd()))
return;
get_cpu_fpsimd_context();
@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
/*
* If we don't support fpsimd, bail out after we have
* reset the fpsimd_cpu for this task and clear the
* FPSTATE.
*/
if (!system_supports_fpsimd())
return;
barrier();
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
*/
static void fpsimd_flush_cpu_state(void)
{
WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
*/
void fpsimd_save_and_flush_cpu_state(void)
{
if (!system_supports_fpsimd())
return;
WARN_ON(preemptible());
__get_cpu_fpsimd_context();
fpsimd_save();

View File

@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
return 0;
}
static int fpr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!system_supports_fpsimd())
return -ENODEV;
return regset->n;
}
/*
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
*/
@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (!system_supports_fpsimd())
return -EINVAL;
if (target == current)
fpsimd_preserve_current_state();
@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
{
int ret;
if (!system_supports_fpsimd())
return -EINVAL;
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
if (ret)
return ret;
@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
*/
.size = sizeof(u32),
.align = sizeof(u32),
.active = fpr_active,
.get = fpr_get,
.set = fpr_set
},
@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
compat_ulong_t fpscr;
int ret, vregs_end_pos;
if (!system_supports_fpsimd())
return -EINVAL;
uregs = &target->thread.uw.fpsimd_state;
if (target == current)
@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
compat_ulong_t fpscr;
int ret, vregs_end_pos;
if (!system_supports_fpsimd())
return -EINVAL;
uregs = &target->thread.uw.fpsimd_state;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
.size = sizeof(compat_ulong_t),
.align = sizeof(compat_ulong_t),
.active = fpr_active,
.get = compat_vfp_get,
.set = compat_vfp_set
},

View File

@ -28,7 +28,15 @@
/* Check whether the FP regs were dirtied while in the host-side run loop: */
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
/*
* When the system doesn't support FP/SIMD, we cannot rely on
* the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
* abort on the very first access to FP and thus we should never
* see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
* trap the accesses.
*/
if (!system_supports_fpsimd() ||
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);

View File

@ -371,7 +371,7 @@ config PPC_PTDUMP
config PPC_DEBUG_WX
bool "Warn on W+X mappings at boot"
depends on PPC_PTDUMP
depends on PPC_PTDUMP && STRICT_KERNEL_RWX
help
Generate a warning if any W+X mappings are found at boot.

View File

@ -221,6 +221,7 @@ void mark_rodata_ro(void)
if (v_block_mapped((unsigned long)_sinittext)) {
mmu_mark_rodata_ro();
ptdump_check_wx();
return;
}

View File

@ -36,7 +36,6 @@
#include <asm/udbg.h>
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
#include <asm/svm.h>
#include "pseries.h"
@ -133,10 +132,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
return be64_to_cpu(*tcep);
}
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
static void tce_free_pSeriesLP(unsigned long liobn, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
@ -147,25 +146,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
int ret = 0;
long tcenum_start = tcenum, npages_start = npages;
rpn = __pa(uaddr) >> TCE_SHIFT;
rpn = __pa(uaddr) >> tceshift;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
while (npages--) {
tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_free_pSeriesLP(tbl, tcenum_start,
tce_free_pSeriesLP(liobn, tcenum_start,
(npages_start - (npages + 1)));
break;
}
if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttce val = 0x%llx\n", tce );
dump_stack();
@ -194,7 +193,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
unsigned long flags;
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tbl->it_page_shift, npages, uaddr,
direction, attrs);
}
@ -210,8 +210,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tbl->it_page_shift,
npages, uaddr, direction, attrs);
}
__this_cpu_write(tce_page, tcep);
}
@ -262,16 +263,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
{
u64 rc;
while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
@ -286,7 +287,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
u64 rc;
if (!firmware_has_feature(FW_FEATURE_MULTITCE))
return tce_free_pSeriesLP(tbl, tcenum, npages);
return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
@ -401,6 +402,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
u64 rc = 0;
long l, limit;
if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
be64_to_cpu(maprange->dma_base);
unsigned long tcenum = dmastart >> tceshift;
unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
void *uaddr = __va(start_pfn << PAGE_SHIFT);
return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
tcenum, tceshift, npages, (unsigned long) uaddr,
DMA_BIDIRECTIONAL, 0);
}
local_irq_disable(); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
@ -1320,15 +1334,7 @@ void iommu_init_early_pSeries(void)
of_reconfig_notifier_register(&iommu_reconfig_nb);
register_memory_notifier(&iommu_mem_nb);
/*
* Secure guest memory is inacessible to devices so regular DMA isn't
* possible.
*
* In that case keep devices' dma_map_ops as NULL so that the generic
* DMA code path will use SWIOTLB to bounce buffers for DMA.
*/
if (!is_secure_guest())
set_pci_dma_ops(&dma_iommu_ops);
set_pci_dma_ops(&dma_iommu_ops);
}
static int __init disable_multitce(char *str)

View File

@ -342,6 +342,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
if (!p->bus) {
dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
kfree(p->bus_desc.provider_name);
return -ENXIO;
}
@ -498,6 +499,7 @@ static int papr_scm_remove(struct platform_device *pdev)
nvdimm_bus_unregister(p->bus);
drc_pmem_unbind(p);
kfree(p->bus_desc.provider_name);
kfree(p);
return 0;

View File

@ -1176,6 +1176,8 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
if (tbl == NULL)
return NULL;
kref_init(&tbl->it_kref);
of_parse_dma_window(dev->dev.of_node, dma_window,
&tbl->it_index, &offset, &size);

View File

@ -393,7 +393,13 @@ int count_immovable_mem_regions(void)
table = table_addr + sizeof(struct acpi_table_srat);
while (table + sizeof(struct acpi_subtable_header) < table_end) {
sub_table = (struct acpi_subtable_header *)table;
if (!sub_table->length) {
debug_putstr("Invalid zero length SRAT subtable.\n");
return 0;
}
if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) {
struct acpi_srat_mem_affinity *ma;

View File

@ -2110,6 +2110,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
* If the key or authentication tag size couldn't be set, no need to
* continue to encrypt.
*/
vec->crypt_error = 0;
if (vec->setkey_error || vec->setauthsize_error)
goto done;
@ -2253,10 +2254,12 @@ static int test_aead_vs_generic_impl(const char *driver,
req, tsgls);
if (err)
goto out;
err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg,
req, tsgls);
if (err)
goto out;
if (vec.crypt_error == 0) {
err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name,
cfg, req, tsgls);
if (err)
goto out;
}
cond_resched();
}
err = 0;
@ -2907,6 +2910,15 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
if (vec->crypt_error != 0) {
/*
* The only acceptable error here is for an invalid length, so
* skcipher decryption should fail with the same error too.
* We'll test for this. But to keep the API usage well-defined,
* explicitly initialize the ciphertext buffer too.
*/
memset((u8 *)vec->ctext, 0, vec->len);
}
done:
snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
vec->len, vec->klen);

View File

@ -1486,11 +1486,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
for (i = 0; i < val_len / map->format.val_bytes; i++)
if (!regmap_writeable(map,
reg + regmap_get_offset(map, i)))
return -EINVAL;
/* Check for unwritable or noinc registers in range
* before we start
*/
if (!regmap_writeable_noinc(map, reg)) {
for (i = 0; i < val_len / map->format.val_bytes; i++) {
unsigned int element =
reg + regmap_get_offset(map, i);
if (!regmap_writeable(map, element) ||
regmap_writeable_noinc(map, element))
return -EINVAL;
}
}
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;

View File

@ -4692,6 +4692,7 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_bt656,
&g12a_usb1_to_ddr,
&g12a_mmc_pclk,
&g12a_uart2,
&g12a_vpu_intr,
&g12a_gic,
&g12a_sd_emmc_a_clk0,

View File

@ -1918,12 +1918,7 @@ static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
{
struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) {
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
}
static int atmel_sha_hmac_init(struct ahash_request *req)

View File

@ -1251,7 +1251,7 @@ static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
if (len != 16 && len != 24 && len != 32) {
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -1;
return -EINVAL;
}
ctx->key_length = len;

View File

@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
struct regmap *regmap;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dmac);
devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
&axi_dmac_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:

View File

@ -139,7 +139,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
if (ib_nl_is_good_ip_resp(nlh))
ib_nl_process_good_ip_rsep(nlh);
return skb->len;
return 0;
}
static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,

View File

@ -3091,6 +3091,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
atomic_inc(&id_priv->refcount);
cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@ -3117,6 +3118,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
atomic_inc(&id_priv->refcount);
cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;

View File

@ -1068,7 +1068,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
}
settimeout_out:
return skb->len;
return 0;
}
static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
@ -1139,7 +1139,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
}
resp_out:
return skb->len;
return 0;
}
static void free_sm_ah(struct kref *kref)

View File

@ -166,10 +166,13 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
* for any address.
*/
mask |= (sg_dma_address(sg) + pgoff) ^ va;
if (i && i != (umem->nmap - 1))
/* restrict by length as well for interior SGEs */
mask |= sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
/* Except for the last entry, the ending iova alignment sets
* the maximum possible page size as the low bits of the iova
* must be zero when starting the next chunk.
*/
if (i != (umem->nmap - 1))
mask |= va;
pgoff = 0;
}
best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);

View File

@ -220,7 +220,6 @@ void ib_uverbs_release_file(struct kref *ref)
}
static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
struct ib_uverbs_file *uverbs_file,
struct file *filp, char __user *buf,
size_t count, loff_t *pos,
size_t eventsz)
@ -238,19 +237,16 @@ static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
if (wait_event_interruptible(ev_queue->poll_wait,
(!list_empty(&ev_queue->event_list) ||
/* The barriers built into wait_event_interruptible()
* and wake_up() guarentee this will see the null set
* without using RCU
*/
!uverbs_file->device->ib_dev)))
ev_queue->is_closed)))
return -ERESTARTSYS;
/* If device was disassociated and no event exists set an error */
if (list_empty(&ev_queue->event_list) &&
!uverbs_file->device->ib_dev)
return -EIO;
spin_lock_irq(&ev_queue->lock);
/* If device was disassociated and no event exists set an error */
if (list_empty(&ev_queue->event_list) && ev_queue->is_closed) {
spin_unlock_irq(&ev_queue->lock);
return -EIO;
}
}
event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
@ -285,8 +281,7 @@ static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
{
struct ib_uverbs_async_event_file *file = filp->private_data;
return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
buf, count, pos,
return ib_uverbs_event_read(&file->ev_queue, filp, buf, count, pos,
sizeof(struct ib_uverbs_async_event_desc));
}
@ -296,9 +291,8 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
struct ib_uverbs_completion_event_file *comp_ev_file =
filp->private_data;
return ib_uverbs_event_read(&comp_ev_file->ev_queue,
comp_ev_file->uobj.ufile, filp,
buf, count, pos,
return ib_uverbs_event_read(&comp_ev_file->ev_queue, filp, buf, count,
pos,
sizeof(struct ib_uverbs_comp_event_desc));
}
@ -321,7 +315,9 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
return ib_uverbs_event_poll(filp->private_data, filp, wait);
struct ib_uverbs_async_event_file *file = filp->private_data;
return ib_uverbs_event_poll(&file->ev_queue, filp, wait);
}
static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
@ -335,9 +331,9 @@ static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
{
struct ib_uverbs_event_queue *ev_queue = filp->private_data;
struct ib_uverbs_async_event_file *file = filp->private_data;
return fasync_helper(fd, filp, on, &ev_queue->async_queue);
return fasync_helper(fd, filp, on, &file->ev_queue.async_queue);
}
static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)

View File

@ -1225,6 +1225,8 @@ static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
const struct in_ifaddr *ifa;
idev = in_dev_get(dev);
if (!idev)
continue;
in_dev_for_each_ifa_rtnl(ifa, idev) {
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
"IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,

View File

@ -186,23 +186,6 @@ out:
kfree(ent);
}
static void id_map_find_del(struct ib_device *ibdev, int pv_cm_id)
{
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
struct rb_root *sl_id_map = &sriov->sl_id_map;
struct id_map_entry *ent, *found_ent;
spin_lock(&sriov->id_map_lock);
ent = xa_erase(&sriov->pv_id_table, pv_cm_id);
if (!ent)
goto out;
found_ent = id_map_find_by_sl_id(ibdev, ent->slave_id, ent->sl_cm_id);
if (found_ent && found_ent == ent)
rb_erase(&found_ent->node, sl_id_map);
out:
spin_unlock(&sriov->id_map_lock);
}
static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
{
struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
@ -294,7 +277,7 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
spin_lock(&sriov->id_map_lock);
spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
if (!sriov->is_going_down && !id->scheduled_delete) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
@ -341,9 +324,6 @@ cont:
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
schedule_delayed(ibdev, id);
else if (mad->mad_hdr.attr_id == CM_DREP_ATTR_ID)
id_map_find_del(ibdev, pv_cm_id);
return 0;
}
@ -382,12 +362,9 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
*slave = id->slave_id;
set_remote_comm_id(mad, id->sl_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
schedule_delayed(ibdev, id);
else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_DREP_ATTR_ID) {
id_map_find_del(ibdev, (int) pv_cm_id);
}
return 0;
}

View File

@ -246,6 +246,13 @@ static int mlx4_ib_update_gids(struct gid_entry *gids,
return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
}
static void free_gid_entry(struct gid_entry *entry)
{
memset(&entry->gid, 0, sizeof(entry->gid));
kfree(entry->ctx);
entry->ctx = NULL;
}
static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
@ -306,6 +313,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
GFP_ATOMIC);
if (!gids) {
ret = -ENOMEM;
*context = NULL;
free_gid_entry(&port_gid_table->gids[free]);
} else {
for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
@ -317,6 +326,12 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!ret && hw_update) {
ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
if (ret) {
spin_lock_bh(&iboe->lock);
*context = NULL;
free_gid_entry(&port_gid_table->gids[free]);
spin_unlock_bh(&iboe->lock);
}
kfree(gids);
}
@ -346,10 +361,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx->refcount) {
unsigned int real_index = ctx->real_index;
memset(&port_gid_table->gids[real_index].gid, 0,
sizeof(port_gid_table->gids[real_index].gid));
kfree(port_gid_table->gids[real_index].ctx);
port_gid_table->gids[real_index].ctx = NULL;
free_gid_entry(&port_gid_table->gids[real_index]);
hw_update = 1;
}
}

View File

@ -2536,7 +2536,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
if (lrsp->opcode == SRP_LOGIN_RSP) {
ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
ch->use_imm_data = srp_use_imm_data &&
(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
ch->use_imm_data);
WARN_ON_ONCE(ch->max_it_iu_len >

View File

@ -856,6 +856,7 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;

View File

@ -417,10 +417,14 @@ err:
/* Journalling */
#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
unsigned int i, n;
unsigned int i, nr, ref_nr;
atomic_t *fifo_front_p, *now_fifo_front_p;
size_t mask;
if (c->journal.btree_flushing)
return;
@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
/* get the oldest journal entry and check its refcount */
spin_lock(&c->journal.lock);
fifo_front_p = &fifo_front(&c->journal.pin);
ref_nr = atomic_read(fifo_front_p);
if (ref_nr <= 0) {
/*
* do nothing if no btree node references
* the oldest journal entry
*/
spin_unlock(&c->journal.lock);
goto out;
}
spin_unlock(&c->journal.lock);
mask = c->journal.pin.mask;
nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
/*
* It is safe to get now_fifo_front_p without holding
* c->journal.lock here, because we don't need to know
* the exactly accurate value, just check whether the
* front pointer of c->journal.pin is changed.
*/
now_fifo_front_p = &fifo_front(&c->journal.pin);
/*
* If the oldest journal entry is reclaimed and front
* pointer of c->journal.pin changes, it is unnecessary
* to scan c->btree_cache anymore, just quit the loop and
* flush out what we have already.
*/
if (now_fifo_front_p != fifo_front_p)
break;
/*
* quit this loop if all matching btree nodes are
* scanned and record in btree_nodes[] already.
*/
ref_nr = atomic_read(fifo_front_p);
if (nr >= ref_nr)
break;
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
continue;
}
/*
* Only select the btree node which exactly references
* the oldest journal entry.
*
* If the journal entry pointed by fifo_front_p is
* reclaimed in parallel, don't worry:
* - the list_for_each_xxx loop will quit when checking
* next now_fifo_front_p.
* - If there are matched nodes recorded in btree_nodes[],
* they are clean now (this is why and how the oldest
* journal entry can be reclaimed). These selected nodes
* will be ignored and skipped in the folowing for-loop.
*/
if (nr_to_fifo_front(btree_current_write(b)->journal,
fifo_front_p,
mask) != 0) {
mutex_unlock(&b->write_lock);
continue;
}
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
btree_nodes[n++] = b;
if (n == BTREE_FLUSH_NR)
btree_nodes[nr++] = b;
/*
* To avoid holding c->bucket_lock too long time,
* only scan for BTREE_FLUSH_NR matched btree nodes
* at most. If there are more btree nodes reference
* the oldest journal entry, try to flush them next
* time when btree_flush_write() is called.
*/
if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
for (i = 0; i < n; i++) {
for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
mutex_unlock(&b->write_lock);
}
out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);

View File

@ -394,10 +394,10 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
#define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
#define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~(m)) | (v))
#define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, (r)+1)) & (m))
#define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
#define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
@ -405,11 +405,11 @@ int adv748x_write_block(struct adv748x_state *state, int client_page,
#define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
#define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~(m)) | (v))
#define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
#define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~(m)) | (v))
#define tx_read(t, r) adv748x_read(t->state, t->page, r)
#define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)

View File

@ -773,6 +773,7 @@ config MFD_MAX77650
depends on OF || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
Say Y here to add support for Maxim Semiconductor MAX77650 and
MAX77651 Power Management ICs. This is the core multifunction

View File

@ -1248,44 +1248,44 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
/* Read-while-load method */
/* Read-while-load method */
/* Do first load to bufferRAM */
if (read < len) {
if (!onenand_check_bufferram(mtd, from)) {
/* Do first load to bufferRAM */
if (read < len) {
if (!onenand_check_bufferram(mtd, from)) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
}
}
}
}
thislen = min_t(int, writesize, len - read);
column = from & (writesize - 1);
if (column + thislen > writesize)
thislen = writesize - column;
while (!ret) {
/* If there is more to load then start next load */
from += thislen;
if (read + thislen < len) {
while (!ret) {
/* If there is more to load then start next load */
from += thislen;
if (read + thislen < len) {
this->command(mtd, ONENAND_CMD_READ, from, writesize);
/*
* Chip boundary handling in DDP
* Now we issued chip 1 read and pointed chip 1
/*
* Chip boundary handling in DDP
* Now we issued chip 1 read and pointed chip 1
* bufferram so we have to point chip 0 bufferram.
*/
if (ONENAND_IS_DDP(this) &&
unlikely(from == (this->chipsize >> 1))) {
this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
boundary = 1;
} else
boundary = 0;
ONENAND_SET_PREV_BUFFERRAM(this);
}
/* While load is going, read from last bufferRAM */
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
*/
if (ONENAND_IS_DDP(this) &&
unlikely(from == (this->chipsize >> 1))) {
this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
boundary = 1;
} else
boundary = 0;
ONENAND_SET_PREV_BUFFERRAM(this);
}
/* While load is going, read from last bufferRAM */
this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
/* Read oob area if needed */
if (oobbuf) {
@ -1301,24 +1301,24 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
oobcolumn = 0;
}
/* See if we are done */
read += thislen;
if (read == len)
break;
/* Set up for next read from bufferRAM */
if (unlikely(boundary))
this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
ONENAND_SET_NEXT_BUFFERRAM(this);
buf += thislen;
/* See if we are done */
read += thislen;
if (read == len)
break;
/* Set up for next read from bufferRAM */
if (unlikely(boundary))
this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
ONENAND_SET_NEXT_BUFFERRAM(this);
buf += thislen;
thislen = min_t(int, writesize, len - read);
column = 0;
cond_resched();
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
column = 0;
cond_resched();
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
if (mtd_is_eccerr(ret))
ret = 0;
}
}
/*
* Return success, if no ECC failures, else -EBADMSG

View File

@ -165,10 +165,10 @@ static int sharpsl_nand_get_logical_num(u8 *oob)
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
unsigned int block_num, log_num, phymax;
unsigned int block_num, phymax;
int i, ret, log_num;
loff_t block_adr;
u8 *oob;
int i, ret;
oob = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!oob)

View File

@ -1604,11 +1604,22 @@ static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 i;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
ret = -EIO;
goto done;
}
for (i = 0; i < region->len; i += 4)
*(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
return region->len;
ret = region->len;
done:
mutex_unlock(&ar->conf_mutex);
return ret;
}
/* if an error happened returns < 0, otherwise the length */
@ -1704,7 +1715,11 @@ static void ath10k_pci_dump_memory(struct ath10k *ar,
count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
break;
case ATH10K_MEM_REGION_TYPE_IOREG:
count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
if (ret < 0)
break;
count = ret;
break;
default:
ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);

View File

@ -8,6 +8,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
* Copyright (C) 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -30,6 +31,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
* Copyright (C) 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -389,6 +391,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
if (req != mvm->ftm_initiator.req)
return;
iwl_mvm_ftm_reset(mvm);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
LOCATION_GROUP, 0),
0, sizeof(cmd), &cmd))
@ -502,7 +506,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
lockdep_assert_held(&mvm->mutex);
if (!mvm->ftm_initiator.req) {
IWL_ERR(mvm, "Got FTM response but have no request?\n");
return;
}

View File

@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
rates_max = rates_eid[1];
if (rates_max > MAX_RATES) {
lbs_deb_join("invalid rates");
rcu_read_unlock();
ret = -EINVAL;
goto out;
}
rates = cmd.bss.rates;

View File

@ -2886,6 +2886,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
vs_param_set->header.len =
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
& 0x00FF) + 2);
if (le16_to_cpu(vs_param_set->header.len) >
MWIFIEX_MAX_VSIE_LEN) {
mwifiex_dbg(priv->adapter, ERROR,
"Invalid param length!\n");
break;
}
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
le16_to_cpu(vs_param_set->header.len));
*buffer += le16_to_cpu(vs_param_set->header.len) +

View File

@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
"WMM Parameter Set Count: %d\n",
wmm_param_ie->qos_info_bitmap & mask);
if (wmm_param_ie->vend_hdr.len + 2 >
sizeof(struct ieee_types_wmm_parameter))
break;
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
wmm_param_ie->vend_hdr.len + 2);

View File

@ -2499,7 +2499,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.ports = tegra20_pcie_ports,
.msi_base_shift = 0,
.afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
.pads_refclk_cfg0 = 0xfa5cfa5c,
@ -2528,6 +2527,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.num_ports = 3,
.ports = tegra30_pcie_ports,
.msi_base_shift = 8,
.afi_pex2_ctrl = 0x128,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
.pads_refclk_cfg0 = 0xfa5cfa5c,

View File

@ -187,10 +187,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
goto failed2;
goto failed1;
rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
if (rc)
goto failed3;
goto failed2;
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
@ -198,11 +198,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
return 0;
failed3:
sysfs_remove_link(&dev->dev.kobj, buf);
failed2:
pci_stop_and_remove_bus_device(virtfn);
sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
failed0:
virtfn_remove_bus(dev->bus, bus);

View File

@ -1387,6 +1387,7 @@ static int aer_probe(struct pcie_device *dev)
return -ENOMEM;
rpc->rpd = port;
INIT_KFIFO(rpc->aer_fifo);
set_service_data(dev, rpc);
status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,

View File

@ -1785,12 +1785,18 @@ again:
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
if (fail_res->dev->subordinate)
res->flags = 0;
if (pci_is_bridge(fail_res->dev)) {
idx = res - &fail_res->dev->resource[0];
if (idx >= PCI_BRIDGE_RESOURCES &&
idx <= PCI_BRIDGE_RESOURCE_END)
res->flags = 0;
}
}
free_list(&fail_head);
@ -2037,12 +2043,18 @@ again:
/* Restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
if (fail_res->dev->subordinate)
res->flags = 0;
if (pci_is_bridge(fail_res->dev)) {
idx = res - &fail_res->dev->resource[0];
if (idx >= PCI_BRIDGE_RESOURCES &&
idx <= PCI_BRIDGE_RESOURCE_END)
res->flags = 0;
}
}
free_list(&fail_head);

View File

@ -1276,7 +1276,7 @@ static int switchtec_init_isr(struct switchtec_dev *stdev)
if (nvecs < 0)
return nvecs;
event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
if (event_irq < 0 || event_irq >= nvecs)
return -EFAULT;
@ -1349,7 +1349,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
if (rc)
return rc;
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
return rc;

View File

@ -2305,7 +2305,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATAG0_A, 0, FN_REMOCON_B, 0,
/* IP0_11_8 [4] */
FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
FN_PWM4_B, 0, 0, 0,
0, 0, 0, 0,
/* IP0_7_5 [3] */
@ -2349,7 +2349,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SDAT0_A, 0, 0, 0,
0, 0, 0, 0,
/* IP1_10_8 [3] */
FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
/* IP1_7_5 [3] */
FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,

View File

@ -5984,7 +5984,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */
} },
{ PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) {
{ PIN_DU_DOTCLKIN3, 28, 2 }, /* DU_DOTCLKIN3 */
{ PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */
{ PIN_FSCLKST, 20, 2 }, /* FSCLKST */
{ PIN_TMS, 4, 2 }, /* TMS */
} },
@ -6240,8 +6240,8 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[31] = PIN_DU_DOTCLKIN1, /* DU_DOTCLKIN1 */
} },
{ PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
[ 0] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
[ 1] = SH_PFC_PIN_NONE,
[ 0] = SH_PFC_PIN_NONE,
[ 1] = PIN_DU_DOTCLKIN3, /* DU_DOTCLKIN3 */
[ 2] = PIN_FSCLKST, /* FSCLKST */
[ 3] = PIN_EXTALR, /* EXTALR*/
[ 4] = PIN_TRST_N, /* TRST# */

View File

@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
ddata = (struct mid_pb_ddata *)id->driver_data;
ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data,
sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENODATA;
return -ENOMEM;
ddata->dev = &pdev->dev;
ddata->irq = irq;

View File

@ -850,7 +850,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = cmos_interrupt;
retval = request_irq(rtc_irq, rtc_cmos_int_handler,
IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
0, dev_name(&cmos_rtc.rtc->dev),
cmos_rtc.rtc);
if (retval < 0) {
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);

View File

@ -97,7 +97,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!hym8563->valid) {
dev_warn(&client->dev, "no valid clock/calendar values available\n");
return -EPERM;
return -EINVAL;
}
ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);

View File

@ -6953,7 +6953,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_icc_levels(hba);
/* Add required well known logical units to scsi mid layer */
if (ufshcd_scsi_add_wlus(hba))
ret = ufshcd_scsi_add_wlus(hba);
if (ret)
goto out;
/* Initialize devfreq after UFS device is detected */

View File

@ -93,6 +93,7 @@ static struct rpmhpd sdm845_mx = {
static struct rpmhpd sdm845_mx_ao = {
.pd = { .name = "mx_ao", },
.active_only = true,
.peer = &sdm845_mx,
.res_name = "mx.lvl",
};
@ -107,6 +108,7 @@ static struct rpmhpd sdm845_cx = {
static struct rpmhpd sdm845_cx_ao = {
.pd = { .name = "cx_ao", },
.active_only = true,
.peer = &sdm845_cx,
.parent = &sdm845_mx_ao.pd,
.res_name = "cx.lvl",

View File

@ -246,7 +246,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
}
/* check if there is pretimeout support */
irq = platform_get_irq(pdev, 0);
irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
ret = devm_request_irq(dev, irq, qcom_wdt_isr,
IRQF_TRIGGER_RISING,

View File

@ -262,6 +262,24 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
watchdog_init_timeout(wdd, 0, dev);
/*
* In case of CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED is set
* (Means U-Boot/bootloaders leaves the watchdog running)
* When we get here we should make a decision to prevent
* any side effects before user space daemon will take care of it.
* The best option, taking into consideration that there is no
* way to read values back from hardware, is to enforce watchdog
* being run with deterministic values.
*/
if (IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) {
ret = stm32_iwdg_start(wdd);
if (ret)
return ret;
/* Make sure the watchdog is serviced */
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
ret = devm_watchdog_register_device(dev, wdd);
if (ret)
return ret;

View File

@ -90,7 +90,7 @@ config NFS_V4
config NFS_SWAP
bool "Provide swap over NFS support"
default n
depends on NFS_FS
depends on NFS_FS && SWAP
select SUNRPC_SWAP
help
This option enables swapon to work on files located on NFS mounts.

View File

@ -245,10 +245,10 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
data->ds_commit_index);
/* verifier not set so always fail */
if (verfp->committed < 0)
if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
return 1;
return nfs_direct_cmp_verf(verfp, &data->verf);
return nfs_direct_cmp_verf(verfp, data->res.verf);
}
/**

View File

@ -2338,6 +2338,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
void *data)
{
struct nfs_commitres *result = data;
struct nfs_writeverf *verf = result->verf;
enum nfs_stat status;
int error;
@ -2350,7 +2351,9 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
result->op_status = status;
if (status != NFS3_OK)
goto out_status;
error = decode_writeverf3(xdr, &result->verf->verifier);
error = decode_writeverf3(xdr, &verf->verifier);
if (!error)
verf->committed = NFS_FILE_SYNC;
out:
return error;
out_status:

View File

@ -439,9 +439,7 @@ extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
extern void nfs4_set_lease_period(struct nfs_client *clp,
unsigned long lease,
unsigned long lastrenewed);
extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
/* nfs4state.c */

View File

@ -3187,6 +3187,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
exception.retry = 1;
continue;
}
if (status == -NFS4ERR_EXPIRED) {
nfs4_schedule_lease_recovery(server->nfs_client);
exception.retry = 1;
continue;
}
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
@ -5019,16 +5024,13 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
struct nfs4_exception exception = {
.interruptible = true,
};
unsigned long now = jiffies;
int err;
do {
err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
if (err == 0) {
nfs4_set_lease_period(server->nfs_client,
fsinfo->lease_time * HZ,
now);
nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
break;
}
err = nfs4_handle_exception(server, err, &exception);
@ -6084,6 +6086,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.callback_data = &setclientid,
.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
};
unsigned long now = jiffies;
int status;
/* nfs_client_id4 */
@ -6116,6 +6119,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
if (status == 0)
do_renew_lease(clp, now);
out:
trace_nfs4_setclientid(clp, status);
dprintk("NFS reply setclientid: %d\n", status);
@ -8199,6 +8205,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
struct rpc_task *task;
struct nfs41_exchange_id_args *argp;
struct nfs41_exchange_id_res *resp;
unsigned long now = jiffies;
int status;
task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
@ -8219,6 +8226,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
if (status != 0)
goto out;
do_renew_lease(clp, now);
clp->cl_clientid = resp->clientid;
clp->cl_exchange_flags = resp->flags;
clp->cl_seqid = resp->seqid;

View File

@ -138,15 +138,12 @@ nfs4_kill_renewd(struct nfs_client *clp)
*
* @clp: pointer to nfs_client
* @lease: new value for lease period
* @lastrenewed: time at which lease was last renewed
*/
void nfs4_set_lease_period(struct nfs_client *clp,
unsigned long lease,
unsigned long lastrenewed)
unsigned long lease)
{
spin_lock(&clp->cl_lock);
clp->cl_lease_time = lease;
clp->cl_last_renewal = lastrenewed;
spin_unlock(&clp->cl_lock);
/* Cap maximum reconnect timeout at 1/2 lease period */

View File

@ -91,17 +91,15 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
{
int status;
struct nfs_fsinfo fsinfo;
unsigned long now;
if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
nfs4_schedule_state_renewal(clp);
return 0;
}
now = jiffies;
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
nfs4_set_lease_period(clp, fsinfo.lease_time * HZ, now);
nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
nfs4_schedule_state_renewal(clp);
}

View File

@ -352,7 +352,7 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
),
TP_fast_assign(
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__assign_str(dstaddr, clp->cl_hostname);
),
@ -432,7 +432,8 @@ TRACE_EVENT(nfs4_sequence_done,
__entry->target_highest_slotid =
res->sr_target_highest_slotid;
__entry->status_flags = res->sr_status_flags;
__entry->error = res->sr_status;
__entry->error = res->sr_status < 0 ?
-res->sr_status : 0;
),
TP_printk(
"error=%ld (%s) session=0x%08x slot_nr=%u seq_nr=%u "
@ -566,7 +567,7 @@ TRACE_EVENT(nfs4_xdr_status,
TP_PROTO(
const struct xdr_stream *xdr,
u32 op,
int error
u32 error
),
TP_ARGS(xdr, op, error),
@ -756,7 +757,7 @@ TRACE_EVENT(nfs4_close,
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->fmode = (__force unsigned int)state->state;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(args->stateid.seqid);
__entry->stateid_hash =
@ -821,7 +822,7 @@ DECLARE_EVENT_CLASS(nfs4_lock_event,
TP_fast_assign(
const struct inode *inode = state->inode;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
__entry->type = request->fl_type;
__entry->start = request->fl_start;
@ -893,7 +894,7 @@ TRACE_EVENT(nfs4_set_lock,
TP_fast_assign(
const struct inode *inode = state->inode;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->cmd = cmd;
__entry->type = request->fl_type;
__entry->start = request->fl_start;
@ -989,7 +990,7 @@ TRACE_EVENT(nfs4_delegreturn_exit,
TP_fast_assign(
__entry->dev = res->server->s_dev;
__entry->fhandle = nfs_fhandle_hash(args->fhandle);
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(args->stateid->seqid);
__entry->stateid_hash =
@ -1029,7 +1030,7 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
TP_fast_assign(
const struct inode *inode = state->inode;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
@ -1131,7 +1132,7 @@ TRACE_EVENT(nfs4_lookupp,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = NFS_FILEID(inode);
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
),
TP_printk(
@ -1167,7 +1168,7 @@ TRACE_EVENT(nfs4_rename,
__entry->dev = olddir->i_sb->s_dev;
__entry->olddir = NFS_FILEID(olddir);
__entry->newdir = NFS_FILEID(newdir);
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__assign_str(oldname, oldname->name);
__assign_str(newname, newname->name);
),
@ -1258,7 +1259,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = NFS_FILEID(inode);
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(stateid->seqid);
__entry->stateid_hash =
@ -1314,7 +1315,7 @@ DECLARE_EVENT_CLASS(nfs4_getattr_event,
__entry->valid = fattr->valid;
__entry->fhandle = nfs_fhandle_hash(fhandle);
__entry->fileid = (fattr->valid & NFS_ATTR_FATTR_FILEID) ? fattr->fileid : 0;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
),
TP_printk(
@ -1361,7 +1362,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
),
TP_fast_assign(
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
@ -1418,7 +1419,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
),
TP_fast_assign(
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
@ -1721,7 +1722,7 @@ TRACE_EVENT(nfs4_layoutget,
__entry->iomode = args->iomode;
__entry->offset = args->offset;
__entry->count = args->length;
__entry->error = error;
__entry->error = error < 0 ? -error : 0;
__entry->stateid_seq =
be32_to_cpu(state->stateid.seqid);
__entry->stateid_hash =

View File

@ -4316,11 +4316,14 @@ static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifi
static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
{
struct nfs_writeverf *verf = res->verf;
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
if (!status)
status = decode_write_verifier(xdr, &res->verf->verifier);
status = decode_write_verifier(xdr, &verf->verifier);
if (!status)
verf->committed = NFS_FILE_SYNC;
return status;
}

View File

@ -1425,7 +1425,7 @@ retry:
/* lo ref dropped in pnfs_roc_release() */
layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
/* If the creds don't match, we can't compound the layoutreturn */
if (!layoutreturn || cred != lo->plh_lc_cred)
if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
goto out_noroc;
roc = layoutreturn;

View File

@ -31,12 +31,11 @@ EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
/* Fake up some data that will cause nfs_commit_release to retry the writes. */
void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
{
struct nfs_page *first = nfs_list_entry(data->pages.next);
struct nfs_writeverf *verf = data->res.verf;
data->task.tk_status = 0;
memcpy(&data->verf.verifier, &first->wb_verf,
sizeof(data->verf.verifier));
data->verf.verifier.data[0]++; /* ensure verifier mismatch */
memset(&verf->verifier, 0, sizeof(verf->verifier));
verf->committed = NFS_UNSTABLE;
}
EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);

View File

@ -243,7 +243,15 @@ out:
/* A writeback failed: mark the page as bad, and invalidate the page cache */
static void nfs_set_pageerror(struct address_space *mapping)
{
struct inode *inode = mapping->host;
nfs_zap_mapping(mapping->host, mapping);
/* Force file size revalidation */
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
NFS_INO_REVAL_PAGECACHE |
NFS_INO_INVALID_SIZE;
spin_unlock(&inode->i_lock);
}
static void nfs_mapping_set_error(struct page *page, int error)
@ -1829,6 +1837,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
static void nfs_commit_release_pages(struct nfs_commit_data *data)
{
const struct nfs_writeverf *verf = data->res.verf;
struct nfs_page *req;
int status = data->task.tk_status;
struct nfs_commit_info cinfo;
@ -1856,7 +1865,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
if (verf->committed > NFS_UNSTABLE &&
!nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
/* We have a match */
if (req->wb_page)
nfs_inode_remove_request(req);

View File

@ -4252,6 +4252,9 @@ static inline int ib_check_mr_access(int flags)
!(flags & IB_ACCESS_LOCAL_WRITE))
return -EINVAL;
if (flags & ~IB_ACCESS_SUPPORTED)
return -EINVAL;
return 0;
}

View File

@ -7090,6 +7090,12 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
if (parent)
sched_online_group(tg, parent);
#ifdef CONFIG_UCLAMP_TASK_GROUP
/* Propagate the effective uclamp value for the new group */
cpu_util_update_eff(css);
#endif
return 0;
}

View File

@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&smap->map, attr);
nbuckets = roundup_pow_of_two(num_possible_cpus());
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
nbuckets = 1U << smap->bucket_log;
nbuckets = max_t(u32, 2, nbuckets);
smap->bucket_log = ilog2(nbuckets);
cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
ret = bpf_map_charge_init(&smap->map.memory, cost);

View File

@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
int i;
synchronize_rcu();
rcu_read_lock();
raw_spin_lock_bh(&stab->lock);
for (i = 0; i < stab->map.max_entries; i++) {
struct sock **psk = &stab->sks[i];
@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
sk = xchg(psk, NULL);
if (sk) {
lock_sock(sk);
rcu_read_lock();
sock_map_unref(sk, psk);
rcu_read_unlock();
release_sock(sk);
}
}
raw_spin_unlock_bh(&stab->lock);
rcu_read_unlock();
/* wait for psock readers accessing its map link */
synchronize_rcu();
bpf_map_area_free(stab->sks);
@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
ret = -EINVAL;
goto out;
}
if (!sock_map_sk_is_suitable(sk) ||
sk->sk_state != TCP_ESTABLISHED) {
if (!sock_map_sk_is_suitable(sk)) {
ret = -EOPNOTSUPP;
goto out;
}
sock_map_sk_acquire(sk);
ret = sock_map_update_common(map, idx, sk, flags);
if (sk->sk_state != TCP_ESTABLISHED)
ret = -EOPNOTSUPP;
else
ret = sock_map_update_common(map, idx, sk, flags);
sock_map_sk_release(sk);
out:
fput(sock->file);
@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
ret = -EINVAL;
goto out;
}
if (!sock_map_sk_is_suitable(sk) ||
sk->sk_state != TCP_ESTABLISHED) {
if (!sock_map_sk_is_suitable(sk)) {
ret = -EOPNOTSUPP;
goto out;
}
sock_map_sk_acquire(sk);
ret = sock_hash_update_common(map, key, sk, flags);
if (sk->sk_state != TCP_ESTABLISHED)
ret = -EOPNOTSUPP;
else
ret = sock_hash_update_common(map, key, sk, flags);
sock_map_sk_release(sk);
out:
fput(sock->file);
@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
int i;
synchronize_rcu();
rcu_read_lock();
for (i = 0; i < htab->buckets_num; i++) {
bucket = sock_hash_select_bucket(htab, i);
raw_spin_lock_bh(&bucket->lock);
hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
hlist_del_rcu(&elem->node);
lock_sock(elem->sk);
rcu_read_lock();
sock_map_unref(elem->sk, elem);
rcu_read_unlock();
release_sock(elem->sk);
}
raw_spin_unlock_bh(&bucket->lock);
}
rcu_read_unlock();
/* wait for psock readers accessing its map link */
synchronize_rcu();
bpf_map_area_free(htab->buckets);
kfree(htab);

View File

@ -136,28 +136,15 @@ struct hvsock {
****************************************************************************
* The only valid Service GUIDs, from the perspectives of both the host and *
* Linux VM, that can be connected by the other end, must conform to this *
* format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in *
* this range [0, 0x7FFFFFFF]. *
* format: <port>-facb-11e6-bd58-64006a7986d3. *
****************************************************************************
*
* When we write apps on the host to connect(), the GUID ServiceID is used.
* When we write apps in Linux VM to connect(), we only need to specify the
* port and the driver will form the GUID and use that to request the host.
*
* From the perspective of Linux VM:
* 1. the local ephemeral port (i.e. the local auto-bound port when we call
* connect() without explicit bind()) is generated by __vsock_bind_stream(),
* and the range is [1024, 0xFFFFFFFF).
* 2. the remote ephemeral port (i.e. the auto-generated remote port for
* a connect request initiated by the host's connect()) is generated by
* hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
*/
#define MAX_LISTEN_PORT ((u32)0x7FFFFFFF)
#define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT
#define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT
#define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1)
/* 00000000-facb-11e6-bd58-64006a7986d3 */
static const guid_t srv_id_template =
GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
@ -180,33 +167,6 @@ static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
vsock_addr_init(addr, VMADDR_CID_ANY, port);
}
static void hvs_remote_addr_init(struct sockaddr_vm *remote,
struct sockaddr_vm *local)
{
static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
struct sock *sk;
vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
while (1) {
/* Wrap around ? */
if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
host_ephemeral_port == VMADDR_PORT_ANY)
host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
remote->svm_port = host_ephemeral_port++;
sk = vsock_find_connected_socket(remote, local);
if (!sk) {
/* Found an available ephemeral port */
return;
}
/* Release refcnt got in vsock_find_connected_socket */
sock_put(sk);
}
}
static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
{
set_channel_pending_send_size(chan,
@ -336,12 +296,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if_type = &chan->offermsg.offer.if_type;
if_instance = &chan->offermsg.offer.if_instance;
conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
/* The host or the VM should only listen on a port in
* [0, MAX_LISTEN_PORT]
*/
if (!is_valid_srv_id(if_type) ||
get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
if (!is_valid_srv_id(if_type))
return;
hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
@ -365,6 +320,13 @@ static void hvs_open_connection(struct vmbus_channel *chan)
new->sk_state = TCP_SYN_SENT;
vnew = vsock_sk(new);
hvs_addr_init(&vnew->local_addr, if_type);
/* Remote peer is always the host */
vsock_addr_init(&vnew->remote_addr,
VMADDR_CID_HOST, VMADDR_PORT_ANY);
vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
hvs_new = vnew->trans;
hvs_new->chan = chan;
} else {
@ -429,8 +391,6 @@ static void hvs_open_connection(struct vmbus_channel *chan)
sk->sk_ack_backlog++;
hvs_addr_init(&vnew->local_addr, if_type);
hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
hvs_new->vm_srv_id = *if_type;
hvs_new->host_srv_id = *if_instance;
@ -753,16 +713,6 @@ static bool hvs_stream_is_active(struct vsock_sock *vsk)
static bool hvs_stream_allow(u32 cid, u32 port)
{
/* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
* reserved as ephemeral ports, which are used as the host's ports
* when the host initiates connections.
*
* Perform this check in the guest so an immediate error is produced
* instead of a timeout.
*/
if (port > MAX_HOST_LISTEN_PORT)
return false;
if (cid == VMADDR_CID_HOST)
return true;

View File

@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
if (likely(!audited))
return 0;
return slow_avc_audit(state, ssid, tsid, tclass, requested,
audited, denied, result, ad, 0);
audited, denied, result, ad);
}
static void avc_node_free(struct rcu_head *rhead)
@ -758,8 +758,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
noinline int slow_avc_audit(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
struct common_audit_data *a,
unsigned int flags)
struct common_audit_data *a)
{
struct common_audit_data stack_data;
struct selinux_audit_data sad;
@ -772,17 +771,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
a->type = LSM_AUDIT_DATA_NONE;
}
/*
* When in a RCU walk do the audit on the RCU retry. This is because
* the collection of the dname in an inode audit message is not RCU
* safe. Note this may drop some audits when the situation changes
* during retry. However this is logically just as if the operation
* happened a little later.
*/
if ((a->type == LSM_AUDIT_DATA_INODE) &&
(flags & MAY_NOT_BLOCK))
return -ECHILD;
sad.tclass = tclass;
sad.requested = requested;
sad.ssid = ssid;
@ -855,15 +843,14 @@ static int avc_update_node(struct selinux_avc *avc,
/*
* If we are in a non-blocking code path, e.g. VFS RCU walk,
* then we must not add permissions to a cache entry
* because we cannot safely audit the denial. Otherwise,
* because we will not audit the denial. Otherwise,
* during the subsequent blocking retry (e.g. VFS ref walk), we
* will find the permissions already granted in the cache entry
* and won't audit anything at all, leading to silent denials in
* permissive mode that only appear when in enforcing mode.
*
* See the corresponding handling in slow_avc_audit(), and the
* logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
* which is transliterated into AVC_NONBLOCKING.
* See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
* and selinux_inode_permission().
*/
if (flags & AVC_NONBLOCKING)
return 0;
@ -1205,6 +1192,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
return rc;
}
int avc_has_perm_flags(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct common_audit_data *auditdata,
int flags)
{
struct av_decision avd;
int rc, rc2;
rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
(flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
&avd);
rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
auditdata, flags);
if (rc2)
return rc2;
return rc;
}
u32 avc_policy_seqno(struct selinux_state *state)
{
return state->avc->avc_cache.latest_notif;

View File

@ -2766,6 +2766,14 @@ static int selinux_mount(const char *dev_name,
return path_has_perm(cred, path, FILE__MOUNTON);
}
static int selinux_move_mount(const struct path *from_path,
const struct path *to_path)
{
const struct cred *cred = current_cred();
return path_has_perm(cred, to_path, FILE__MOUNTON);
}
static int selinux_umount(struct vfsmount *mnt, int flags)
{
const struct cred *cred = current_cred();
@ -3008,14 +3016,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
if (IS_ERR(isec))
return PTR_ERR(isec);
return avc_has_perm(&selinux_state,
sid, isec->sid, isec->sclass, FILE__READ, &ad);
return avc_has_perm_flags(&selinux_state,
sid, isec->sid, isec->sclass, FILE__READ, &ad,
rcu ? MAY_NOT_BLOCK : 0);
}
static noinline int audit_inode_permission(struct inode *inode,
u32 perms, u32 audited, u32 denied,
int result,
unsigned flags)
int result)
{
struct common_audit_data ad;
struct inode_security_struct *isec = selinux_inode(inode);
@ -3026,7 +3034,7 @@ static noinline int audit_inode_permission(struct inode *inode,
rc = slow_avc_audit(&selinux_state,
current_sid(), isec->sid, isec->sclass, perms,
audited, denied, result, &ad, flags);
audited, denied, result, &ad);
if (rc)
return rc;
return 0;
@ -3073,7 +3081,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (likely(!audited))
return rc;
rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
/* fall back to ref-walk if we have to generate audit */
if (flags & MAY_NOT_BLOCK)
return -ECHILD;
rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
if (rc2)
return rc2;
return rc;
@ -6834,6 +6846,8 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
LSM_HOOK_INIT(move_mount, selinux_move_mount),
LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),

View File

@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
int slow_avc_audit(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
struct common_audit_data *a,
unsigned flags);
struct common_audit_data *a);
/**
* avc_audit - Audit the granting or denial of permissions.
@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
audited = avc_audit_required(requested, avd, result, 0, &denied);
if (likely(!audited))
return 0;
/* fall back to ref-walk if we have to generate audit */
if (flags & MAY_NOT_BLOCK)
return -ECHILD;
return slow_avc_audit(state, ssid, tsid, tclass,
requested, audited, denied, result,
a, flags);
a);
}
#define AVC_STRICT 1 /* Ignore permissive mode. */
@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata);
int avc_has_perm_flags(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata,
int flags);
int avc_has_extended_perms(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass, u32 requested,

View File

@ -2297,42 +2297,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
int cmd, bool fe_first)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
int ret;
/* call trigger on the frontend before the backend. */
if (fe_first) {
dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0)
return ret;
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
return ret;
}
/* call trigger on the frontend after the backend. */
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
if (ret < 0)
return ret;
dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
return ret;
}
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
int stream = substream->stream, ret;
int stream = substream->stream;
int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
/* call trigger on the frontend before the backend. */
dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0) {
dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
break;
default:
ret = -EINVAL;
break;
}
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_POST:
/* call trigger on the frontend after the backend. */
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
if (ret < 0) {
dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
break;
default:
ret = -EINVAL;
break;
}
dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
@ -2341,10 +2380,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
if (ret < 0) {
dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
goto out;
}
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@ -2353,6 +2388,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
goto out;
}
if (ret < 0) {
dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
cmd, ret);
goto out;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:

View File

@ -500,7 +500,7 @@ static int do_dump(int argc, char **argv)
buf = (unsigned char *)(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0) {
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}

View File

@ -15,7 +15,7 @@ include $(srctree)/../../scripts/Makefile.include
OUTPUT=$(srctree)/
ifeq ("$(origin O)", "command line")
OUTPUT := $(O)/power/acpi/
OUTPUT := $(O)/tools/power/acpi/
endif
#$(info Determined 'OUTPUT' to be $(OUTPUT))

View File

@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Cloudflare
#include "test_progs.h"
static int connected_socket_v4(void)
{
struct sockaddr_in addr = {
.sin_family = AF_INET,
.sin_port = htons(80),
.sin_addr = { inet_addr("127.0.0.1") },
};
socklen_t len = sizeof(addr);
int s, repair, err;
s = socket(AF_INET, SOCK_STREAM, 0);
if (CHECK_FAIL(s == -1))
goto error;
repair = TCP_REPAIR_ON;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
if (CHECK_FAIL(err))
goto error;
err = connect(s, (struct sockaddr *)&addr, len);
if (CHECK_FAIL(err))
goto error;
repair = TCP_REPAIR_OFF_NO_WP;
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
if (CHECK_FAIL(err))
goto error;
return s;
error:
perror(__func__);
close(s);
return -1;
}
/* Create a map, populate it with one socket, and free the map. */
static void test_sockmap_create_update_free(enum bpf_map_type map_type)
{
const int zero = 0;
int s, map, err;
s = connected_socket_v4();
if (CHECK_FAIL(s == -1))
return;
map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
if (CHECK_FAIL(map == -1)) {
perror("bpf_create_map");
goto out;
}
err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
if (CHECK_FAIL(err)) {
perror("bpf_map_update");
goto out;
}
out:
close(map);
close(s);
}
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash create_update_free"))
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
}

View File

@ -15,6 +15,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#define DFSR_FSC_EXTABT_LPAE 0x10
#define DFSR_FSC_EXTABT_nLPAE 0x08
#define DFSR_LPAE BIT(9)
/*
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
*/
@ -181,10 +185,12 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* Give the guest an IMPLEMENTATION DEFINED exception */
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
if (is_lpae)
*fsr = 1 << 9 | 0x34;
else
*fsr = 0x14;
if (is_lpae) {
*fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
} else {
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}
}
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)

View File

@ -805,6 +805,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
switch (treg) {
case TIMER_REG_TVAL:
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff;
val &= lower_32_bits(val);
break;
case TIMER_REG_CTL:
@ -850,7 +851,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
{
switch (treg) {
case TIMER_REG_TVAL:
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val;
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val;
break;
case TIMER_REG_CTL:

View File

@ -2193,7 +2193,8 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
if (!kvm->arch.pgd)
return 0;
trace_kvm_test_age_hva(hva);
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
kvm_test_age_hva_handler, NULL);
}
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)

View File

@ -480,25 +480,45 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
*/
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
{
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
u64 type, enable, reg;
if (val == 0)
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
/* Weed out disabled counters */
val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
u64 type, reg;
if (!(val & BIT(i)))
continue;
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
& ARMV8_PMU_EVTYPE_EVENT;
if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
&& (enable & BIT(i))) {
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
/* PMSWINC only applies to ... SW_INC! */
type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
type &= ARMV8_PMU_EVTYPE_EVENT;
if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
continue;
/* increment this even SW_INC counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
if (reg) /* no overflow on the low part */
continue;
if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
/* increment the high counter */
reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
reg = lower_32_bits(reg);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
if (!reg)
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
if (!reg) /* mark overflow on the high counter */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
} else {
/* mark overflow on low counter */
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
}
}
}

View File

@ -2472,7 +2472,8 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
coll_id = val & KVM_ITS_CTE_ICID_MASK;
if (target_addr >= atomic_read(&kvm->online_vcpus))
if (target_addr != COLLECTION_NOT_MAPPED &&
target_addr >= atomic_read(&kvm->online_vcpus))
return -EINVAL;
collection = find_collection(its, coll_id);