Merge 4.4-rc5 into usb-next as we want those fixes here for testing

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2015-12-13 19:20:27 -08:00
commit 252ca494ac
603 changed files with 6707 additions and 2949 deletions

View File

@ -11,6 +11,10 @@ Required properties:
0 = active high
1 = active low
Optional properties:
- little-endian : GPIO registers are used as little endian. If not
present registers are used as big endian by default.
Example:
gpio0: gpio@1100 {

View File

@ -8,6 +8,11 @@ Required properties:
- phy-mode: See ethernet.txt file in the same directory
- clocks: a pointer to the reference clock for this device.
Optional properties:
- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
Value is presented in bytes. If not used, by default 1600B is set for
"marvell,armada-370-neta" and 9800B for others.
Example:
ethernet@d0070000 {
@ -15,6 +20,7 @@ ethernet@d0070000 {
reg = <0xd0070000 0x2500>;
interrupts = <8>;
clocks = <&gate_clk 4>;
tx-csum-limit = <9800>
status = "okay";
phy = <&phy0>;
phy-mode = "rgmii-id";

View File

@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
W: https://01.org/linux-acpi
S: Supported
F: drivers/acpi/video.c
F: drivers/acpi/acpi_video.c
ACPI WMI DRIVER
L: platform-driver-x86@vger.kernel.org
@ -1847,7 +1847,7 @@ S: Supported
F: drivers/net/wireless/ath/ath6kl/
WILOCITY WIL6210 WIRELESS DRIVER
M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
M: Maya Erez <qca_merez@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
L: wil6210@qca.qualcomm.com
S: Supported
@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Vladimir Davydov <vdavydov@virtuozzo.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@ -8286,7 +8287,7 @@ F: include/linux/delayacct.h
F: kernel/delayacct.c
PERFORMANCE EVENTS SUBSYSTEM
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
L: linux-kernel@vger.kernel.org
@ -9427,8 +9428,10 @@ F: include/scsi/sg.h
SCSI SUBSYSTEM
M: "James E.J. Bottomley" <JBottomley@odin.com>
L: linux-scsi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
M: "Martin K. Petersen" <martin.petersen@oracle.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/
F: include/scsi/
@ -10903,9 +10906,9 @@ S: Maintained
F: drivers/media/tuners/tua9001*
TULIP NETWORK DRIVERS
M: Grant Grundler <grundler@parisc-linux.org>
L: netdev@vger.kernel.org
S: Maintained
L: linux-parisc@vger.kernel.org
S: Orphan
F: drivers/net/ethernet/dec/tulip/
TUN/TAP driver

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Blurry Fish Butt
# *DOCUMENTATION*

View File

@ -74,7 +74,7 @@
reg = <0x48240200 0x100>;
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&dpll_mpu_m2_ck>;
clocks = <&mpu_periphclk>;
};
local_timer: timer@48240600 {
@ -82,7 +82,7 @@
reg = <0x48240600 0x100>;
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
clocks = <&dpll_mpu_m2_ck>;
clocks = <&mpu_periphclk>;
};
l2-cache-controller@48242000 {

View File

@ -259,6 +259,14 @@
ti,invert-autoidle-bit;
};
mpu_periphclk: mpu_periphclk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clocks = <&dpll_mpu_m2_ck>;
clock-mult = <1>;
clock-div = <2>;
};
dpll_ddr_ck: dpll_ddr_ck {
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";

View File

@ -498,6 +498,7 @@
reg = <0x70000 0x4000>;
interrupts-extended = <&mpic 8>;
clocks = <&gateclk 4>;
tx-csum-limit = <9800>;
status = "disabled";
};

View File

@ -184,6 +184,7 @@
regulator-name = "VDD_SDHC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
};
};

View File

@ -118,7 +118,8 @@
sdhci0: sdhci@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@ -126,7 +127,8 @@
sdhci1: sdhci@ab0800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0800 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@ -135,7 +137,7 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
clock-names = "io", "core";
status = "disabled";
};

View File

@ -218,6 +218,7 @@
reg = <0x480c8000 0x2000>;
interrupts = <77>;
ti,hwmods = "mailbox";
#mbox-cells = <1>;
ti,mbox-num-users = <4>;
ti,mbox-num-fifos = <12>;
mbox_dsp: mbox_dsp {
@ -279,8 +280,11 @@
ti,spi-num-cs = <4>;
ti,hwmods = "mcspi1";
dmas = <&edma 16 &edma 17
&edma 18 &edma 19>;
dma-names = "tx0", "rx0", "tx1", "rx1";
&edma 18 &edma 19
&edma 20 &edma 21
&edma 22 &edma 23>;
dma-names = "tx0", "rx0", "tx1", "rx1",
"tx2", "rx2", "tx3", "rx3";
};
mmc1: mmc@48060000 {

View File

@ -18,8 +18,3 @@
reg = <0x80000000 0x10000000>;
};
};
&L2 {
arm,data-latency = <2 1 2>;
arm,tag-latency = <3 2 3>;
};

View File

@ -19,7 +19,7 @@
reg = <0x40006000 0x1000>;
cache-unified;
cache-level = <2>;
arm,data-latency = <1 1 1>;
arm,data-latency = <3 3 3>;
arm,tag-latency = <2 2 2>;
};
};

View File

@ -178,8 +178,10 @@
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_SAI2>;
clock-names = "sai";
clocks = <&clks VF610_CLK_SAI2>,
<&clks VF610_CLK_SAI2_DIV>,
<&clks 0>, <&clks 0>;
clock-names = "bus", "mclk1", "mclk2", "mclk3";
dma-names = "tx", "rx";
dmas = <&edma0 0 21>,
<&edma0 0 20>;

View File

@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
#include <linux/io.h>
#include <asm/barrier.h>
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm

View File

@ -28,6 +28,18 @@
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
return *vcpu_reg(vcpu, reg_num);
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
*vcpu_reg(vcpu, reg_num) = val;
}
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);

View File

@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
return 0;
@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
if (is_write) {
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);

View File

@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
if (!kvm_is_device_pfn(pte_pfn(old_pte)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}

View File

@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
return PSCI_RET_INVALID_PARAMS;
}
target_pc = *vcpu_reg(source_vcpu, 2);
context_id = *vcpu_reg(source_vcpu, 3);
target_pc = vcpu_get_reg(source_vcpu, 2);
context_id = vcpu_get_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu);
@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
*vcpu_reg(vcpu, 0) = context_id;
vcpu_set_reg(vcpu, 0, context_id);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
target_affinity = *vcpu_reg(vcpu, 1);
lowest_affinity_level = *vcpu_reg(vcpu, 2);
target_affinity = vcpu_get_reg(vcpu, 1);
lowest_affinity_level = vcpu_get_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
*vcpu_reg(vcpu, 0) = val;
vcpu_set_reg(vcpu, 0, val);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
*vcpu_reg(vcpu, 0) = val;
vcpu_set_reg(vcpu, 0, val);
return 1;
}

View File

@ -4,7 +4,6 @@ menuconfig ARCH_AT91
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_AT91
select PINCTRL
select PINCTRL_AT91
select SOC_BUS
if ARCH_AT91
@ -17,6 +16,7 @@ config SOC_SAMA5D2
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
select HAVE_AT91_GENERATED_CLK
select PINCTRL_AT91PIO4
help
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
@ -27,6 +27,7 @@ config SOC_SAMA5D3
select HAVE_AT91_UTMI
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D3 family SoC.
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@ -40,6 +41,7 @@ config SOC_SAMA5D4
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
select PINCTRL_AT91
help
Select this if you are using one of Atmel's SAMA5D4 family SoC.
@ -50,6 +52,7 @@ config SOC_AT91RM9200
select CPU_ARM920T
select HAVE_AT91_USB_CLK
select MIGHT_HAVE_PCI
select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help
@ -65,6 +68,7 @@ config SOC_AT91SAM9
select HAVE_AT91_UTMI
select HAVE_FB_ATMEL
select MEMORY
select PINCTRL_AT91
select SOC_SAM_V4_V5
select SRAM if PM
help

View File

@ -41,8 +41,10 @@
* implementation should be moved down into the pinctrl driver and get
* called as part of the generic suspend/resume path.
*/
#ifdef CONFIG_PINCTRL_AT91
extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
#endif
static struct {
unsigned long uhp_udp_mask;
@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
static int at91_pm_enter(suspend_state_t state)
{
#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_suspend();
#endif
switch (state) {
/*
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
error:
target_state = PM_SUSPEND_ON;
#ifdef CONFIG_PINCTRL_AT91
at91_pinctrl_gpio_resume();
#endif
return 0;
}

View File

@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
const struct exynos_pmu_data *pmu_data;
const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
if (!pmu_context)
return;
pmu_data = pmu_context->pmu_data;
if (pmu_data->powerdown_conf)
pmu_data->powerdown_conf(mode);

View File

@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
writel(*vaddr++, bus_addr);
}
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
static inline u8 __indirect_readb(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
*vaddr++ = readb(bus_addr);
}
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
static inline u16 __indirect_readw(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
*vaddr++ = readw(bus_addr);
}
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
static inline u32 __indirect_readl(const volatile void __iomem *p)
{
u32 addr = (__force u32)p;
u32 data;
@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
#define ioread8(p) ioread8(p)
static inline unsigned int ioread8(const void __iomem *addr)
static inline u8 ioread8(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
}
#define ioread16(p) ioread16(p)
static inline unsigned int ioread16(const void __iomem *addr)
static inline u16 ioread16(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
}
#define ioread32(p) ioread32(p)
static inline unsigned int ioread32(const void __iomem *addr)
static inline u32 ioread32(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))

View File

@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
select NEON if CPU_V7
select PM
select REGULATOR
select REGULATOR_FIXED_VOLTAGE
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
select VFP
@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_NOKIA_N810
bool

View File

@ -889,6 +889,7 @@ static void __init e680_init(void)
pxa_set_keypad_info(&e680_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
}
@ -956,6 +957,7 @@ static void __init a1200_init(void)
pxa_set_keypad_info(&a1200_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
}
@ -1148,6 +1150,7 @@ static void __init a910_init(void)
platform_device_register(&a910_camera);
}
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
}
@ -1215,6 +1218,7 @@ static void __init e6_init(void)
pxa_set_keypad_info(&e6_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
}
@ -1256,6 +1260,7 @@ static void __init e2_init(void)
pxa_set_keypad_info(&e2_keypad_platform_data);
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
}

View File

@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */

View File

@ -20,7 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */

View File

@ -269,6 +269,7 @@
clock-frequency = <0>; /* Updated by bootloader */
voltage-ranges = <1800 1800 3300 3300>;
sdhci,auto-cmd12;
little-endian;
bus-width = <4>;
};
@ -277,6 +278,7 @@
reg = <0x0 0x2300000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -287,6 +289,7 @@
reg = <0x0 0x2310000 0x0 0x10000>;
interrupts = <0 36 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -297,6 +300,7 @@
reg = <0x0 0x2320000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
@ -307,6 +311,7 @@
reg = <0x0 0x2330000 0x0 0x10000>;
interrupts = <0 37 0x4>; /* Level high type */
gpio-controller;
little-endian;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;

View File

@ -77,6 +77,7 @@
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <asm/barrier.h>
/*
* Low-level accessors

View File

@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
}
/*
* vcpu_reg should always be passed a register number coming from a
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
* with banked registers.
* vcpu_get_reg and vcpu_set_reg should always be passed a register number
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
* AArch32 with banked registers.
*/
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}
/* Get vcpu SPSR for current mode */

View File

@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
pte_valid(*ptep)) {
BUG_ON(!pte_young(pte));
BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
pte_valid(*ptep) && pte_valid(pte)) {
VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
}
set_pte(ptep, pte);

View File

@ -5,6 +5,7 @@
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/kernel-pgtable.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@ -140,7 +141,7 @@ SECTIONS
ARM_EXIT_KEEP(EXIT_DATA)
}
PERCPU_SECTION(64)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
@ -158,7 +159,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_data = .;
_sdata = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
PECOFF_EDATA_PADDING
_edata = .;

View File

@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
ret = kvm_psci_call(vcpu);

View File

@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!p->is_write)
@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
val = *vcpu_reg(vcpu, p->Rt);
if (!p->is_aarch32) {
vcpu_sys_reg(vcpu, r->reg) = val;
vcpu_sys_reg(vcpu, r->reg) = p->regval;
} else {
if (!p->is_32bit)
vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
}
kvm_toggle_cache(vcpu, was_enabled);
@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
* for both AArch64 and AArch32 accesses.
*/
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u64 val;
if (!p->is_write)
return read_from_write_only(vcpu, p);
val = *vcpu_reg(vcpu, p->Rt);
vgic_v3_dispatch_sgi(vcpu, val);
vgic_v3_dispatch_sgi(vcpu, p->regval);
return true;
}
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
}
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
*vcpu_reg(vcpu, p->Rt) = (1 << 3);
p->regval = (1 << 3);
return true;
}
}
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
} else {
u32 val;
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
*vcpu_reg(vcpu, p->Rt) = val;
p->regval = val;
return true;
}
}
@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
* now use the debug registers.
*/
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
vcpu_sys_reg(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
p->regval = vcpu_sys_reg(vcpu, r->reg);
}
trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
return true;
}
@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* hyp.S code switches between host and guest values in future.
*/
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
u64 *dbg_reg)
{
u64 val = *vcpu_reg(vcpu, p->Rt);
u64 val = p->regval;
if (p->is_32bit) {
val &= 0xffffffffUL;
@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
}
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
u64 *dbg_reg)
{
u64 val = *dbg_reg;
p->regval = *dbg_reg;
if (p->is_32bit)
val &= 0xffffffffUL;
*vcpu_reg(vcpu, p->Rt) = val;
p->regval &= 0xffffffffUL;
}
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
};
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
*vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
(6 << 16) | (el3 << 14) | (el3 << 12));
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
| (6 << 16) | (el3 << 14) | (el3 << 12));
return true;
}
}
static bool trap_debug32(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
vcpu_cp14(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
p->regval = vcpu_cp14(vcpu, r->reg);
}
return true;
@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
*/
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
u64 val = *dbg_reg;
val &= 0xffffffffUL;
val |= *vcpu_reg(vcpu, p->Rt) << 32;
val |= p->regval << 32;
*dbg_reg = val;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
p->regval = *dbg_reg >> 32;
}
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Return 0 if the access has been handled, and -1 if not.
*/
static int emulate_cp(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params,
struct sys_reg_params *params,
const struct sys_reg_desc *table,
size_t num)
{
@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
int Rt2 = (hsr >> 10) & 0xf;
params.is_aarch32 = true;
params.is_32bit = false;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
params.Op0 = 0;
@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
params.CRn = 0;
/*
* Massive hack here. Store Rt2 in the top 32bits so we only
* have one register to deal with. As we use the same trap
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
* backends between AArch32 and AArch64, we get away with it.
*/
if (params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val &= 0xffffffff;
val |= *vcpu_reg(vcpu, Rt2) << 32;
*vcpu_reg(vcpu, params.Rt) = val;
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
}
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
unhandled_cp_access(vcpu, &params);
out:
/* Do the opposite hack for the read side */
/* Split up the value between registers for the read side */
if (!params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val >>= 32;
*vcpu_reg(vcpu, Rt2) = val;
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
}
return 1;
@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
params.is_aarch32 = true;
params.is_32bit = true;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = ((hsr & 1) == 0);
params.CRn = (hsr >> 10) & 0xf;
params.Op0 = 0;
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
return 1;
if (!emulate_cp(vcpu, &params, global, nr_global))
if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
!emulate_cp(vcpu, &params, global, nr_global)) {
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
return 1;
}
unhandled_cp_access(vcpu, &params);
return 1;
@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
struct sys_reg_params *params)
{
size_t num;
const struct sys_reg_desc *table, *r;
@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
int Rt = (esr >> 5) & 0x1f;
int ret;
trace_kvm_handle_sys_reg(esr);
@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
params.Rt = (esr >> 5) & 0x1f;
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = !(esr & 1);
return emulate_sys_reg(vcpu, &params);
ret = emulate_sys_reg(vcpu, &params);
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
return ret;
}
/******************************************************************************

View File

@ -28,7 +28,7 @@ struct sys_reg_params {
u8 CRn;
u8 CRm;
u8 Op2;
u8 Rt;
u64 regval;
bool is_write;
bool is_aarch32;
bool is_32bit; /* Only valid if is_aarch32 is true */
@ -44,7 +44,7 @@ struct sys_reg_desc {
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct sys_reg_params *,
struct sys_reg_params *,
const struct sys_reg_desc *);
/* Initialization for vcpu. */
@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p)
struct sys_reg_params *p)
{
*vcpu_reg(vcpu, p->Rt) = 0;
p->regval = 0;
return true;
}

View File

@ -31,13 +31,13 @@
#include "sys_regs.h"
static bool access_actlr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
return true;
}

View File

@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
#define _STACK_SIZE \
(MAX_BPF_STACK \
+ 4 /* extra for skb_copy_bits buffer */)
#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
static void build_prologue(struct jit_ctx *ctx)
{
const u8 r6 = bpf2a64[BPF_REG_6];
@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 rx = bpf2a64[BPF_REG_X];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
int stack_size = MAX_BPF_STACK;
stack_size += 4; /* extra for skb_copy_bits buffer */
stack_size = STACK_ALIGN(stack_size);
/*
* BPF prog stack layout
@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx)
* | ... | callee saved registers
* +-----+
* | | x25/x26
* BPF fp register => -80:+-----+
* BPF fp register => -80:+-----+ <= (BPF_FP)
* | |
* | ... | BPF prog stack
* | |
* | |
* current A64_SP => +-----+
* +-----+ <= (BPF_FP - MAX_BPF_STACK)
* |RSVD | JIT scratchpad
* current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
* | |
* | ... | Function call stack
* | |
@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx)
emit(A64_MOV(1, fp, A64_SP), ctx);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
/* Clear registers A and X */
emit_a64_mov_i64(ra, 0, ctx);
@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx)
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 tmp1 = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
int stack_size = MAX_BPF_STACK;
stack_size += 4; /* extra for skb_copy_bits buffer */
stack_size = STACK_ALIGN(stack_size);
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
/* Restore fs (x25) and x26 */
emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@ -591,7 +590,25 @@ emit_cond_jmp:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_DW:
goto notyet;
/* Load imm to a register then store it */
ctx->tmp_used = 1;
emit_a64_mov_i(1, tmp2, off, ctx);
emit_a64_mov_i(1, tmp, imm, ctx);
switch (BPF_SIZE(code)) {
case BPF_W:
emit(A64_STR32(tmp, dst, tmp2), ctx);
break;
case BPF_H:
emit(A64_STRH(tmp, dst, tmp2), ctx);
break;
case BPF_B:
emit(A64_STRB(tmp, dst, tmp2), ctx);
break;
case BPF_DW:
emit(A64_STR64(tmp, dst, tmp2), ctx);
break;
}
break;
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_W:
@ -658,7 +675,7 @@ emit_cond_jmp:
return -EINVAL;
}
emit_a64_mov_i64(r3, size, ctx);
emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx);
emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
emit(A64_MOV(1, A64_FP, A64_SP), ctx);

View File

@ -14,7 +14,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:

View File

@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
gfp = massage_gfp_flags(dev, gfp);
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)

View File

@ -1,6 +1,7 @@
config MN10300
def_bool y
select HAVE_OPROFILE
select HAVE_UID16
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_ARCH_TRACEHOOK
@ -37,9 +38,6 @@ config HIGHMEM
config NUMA
def_bool n
config UID16
def_bool y
config RWSEM_GENERIC_SPINLOCK
def_bool y

View File

@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
*/
#ifdef CONFIG_HUGETLB_PAGE
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
(parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
#else
#define pte_huge(pte) (0)
#define pte_mkhuge(pte) (pte)

View File

@ -360,8 +360,9 @@
#define __NR_execveat (__NR_Linux + 342)
#define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344)
#define __NR_mlock2 (__NR_Linux + 345)
#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
#define __IGNORE_select /* newselect */

View File

@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
}
void __init pcibios_init_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
unsigned short bridge_ctl;
/* We deal only with pci controllers and pci-pci bridges. */
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
/* PCI-PCI bridge - set the cache line and default latency
(32) for primary and secondary buses. */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
}
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for

View File

@ -440,6 +440,7 @@
ENTRY_COMP(execveat)
ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

View File

@ -227,23 +227,15 @@
reg = <0x520 0x20>;
phy0: ethernet-phy@1f {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0x1f>;
};
phy1: ethernet-phy@0 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <0>;
};
phy2: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <1>;
};
phy3: ethernet-phy@2 {
interrupt-parent = <&mpic>;
interrupts = <10 1>;
reg = <2>;
};
tbi0: tbi-phy@11 {

View File

@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
/*
* If it's PHB PE, the frozen state on all available PEs should have
* been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
* child PEs because they might be in frozen state.
*/
if (!(pe->type & EEH_PE_PHB)) {
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc)
return rc;
}
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false);
if (rc)
return rc;
/* Give the system 5 seconds to finish running the user-space
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,

View File

@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
static unsigned int *opal_irqs;
static void opal_handle_irq_work(struct irq_work *work);
static __be64 last_outstanding_events;
static u64 last_outstanding_events;
static struct irq_work opal_event_irq_work = {
.func = opal_handle_irq_work,
};
void opal_handle_events(uint64_t events)
{
int virq, hwirq = 0;
u64 mask = opal_event_irqchip.mask;
if (!in_irq() && (events & mask)) {
last_outstanding_events = events;
irq_work_queue(&opal_event_irq_work);
return;
}
while (events & mask) {
hwirq = fls64(events) - 1;
if (BIT_ULL(hwirq) & mask) {
virq = irq_find_mapping(opal_event_irqchip.domain,
hwirq);
if (virq)
generic_handle_irq(virq);
}
events &= ~BIT_ULL(hwirq);
}
}
static void opal_event_mask(struct irq_data *d)
{
clear_bit(d->hwirq, &opal_event_irqchip.mask);
@ -55,12 +78,12 @@ static void opal_event_mask(struct irq_data *d)
static void opal_event_unmask(struct irq_data *d)
{
__be64 events;
set_bit(d->hwirq, &opal_event_irqchip.mask);
opal_poll_events(&last_outstanding_events);
if (last_outstanding_events & opal_event_irqchip.mask)
/* Need to retrigger the interrupt */
irq_work_queue(&opal_event_irq_work);
opal_poll_events(&events);
opal_handle_events(be64_to_cpu(events));
}
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
@ -96,29 +119,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
return 0;
}
void opal_handle_events(uint64_t events)
{
int virq, hwirq = 0;
u64 mask = opal_event_irqchip.mask;
if (!in_irq() && (events & mask)) {
last_outstanding_events = events;
irq_work_queue(&opal_event_irq_work);
return;
}
while (events & mask) {
hwirq = fls64(events) - 1;
if (BIT_ULL(hwirq) & mask) {
virq = irq_find_mapping(opal_event_irqchip.domain,
hwirq);
if (virq)
generic_handle_irq(virq);
}
events &= ~BIT_ULL(hwirq);
}
}
static irqreturn_t opal_interrupt(int irq, void *data)
{
__be64 events;
@ -131,7 +131,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
static void opal_handle_irq_work(struct irq_work *work)
{
opal_handle_events(be64_to_cpu(last_outstanding_events));
opal_handle_events(last_outstanding_events);
}
static int opal_event_match(struct irq_domain *h, struct device_node *node,

View File

@ -278,7 +278,7 @@
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
#define __NR_fgetxattr 269
#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262

View File

@ -10,7 +10,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
*
* ppc:

View File

@ -9,7 +9,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/perf_event.h>

View File

@ -21,7 +21,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*/

View File

@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
# Used by link-vmlinux.sh which has special support for um link
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)

View File

@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
char *split_if_spec(char *str, ...)
{
char **arg, *end;
char **arg, *end, *ret = NULL;
va_list ap;
va_start(ap, str);
while ((arg = va_arg(ap, char **)) != NULL) {
if (*str == '\0')
return NULL;
goto out;
end = strchr(str, ',');
if (end != str)
*arg = str;
if (end == NULL)
return NULL;
goto out;
*end++ = '\0';
str = end;
}
ret = str;
out:
va_end(ap);
return str;
return ret;
}

View File

@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig;
int handled_sig = 0;
while (get_signal(&ksig)) {
if (get_signal(&ksig)) {
handled_sig = 1;
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);

View File

@ -23,7 +23,6 @@
#include <stdarg.h>
#include <linux/types.h>
#include <linux/edd.h>
#include <asm/boot.h>
#include <asm/setup.h>
#include "bitops.h"
#include "ctype.h"

View File

@ -19,6 +19,8 @@
#include "video.h"
#include "vesa.h"
#include <uapi/asm/boot.h>
/*
* Common variables
*/

View File

@ -13,6 +13,8 @@
* Select video mode
*/
#include <uapi/asm/boot.h>
#include "boot.h"
#include "video.h"
#include "vesa.h"

View File

@ -509,6 +509,17 @@ END(irq_entries_start)
* tracking that we're in kernel mode.
*/
SWAPGS
/*
* We need to tell lockdep that IRQs are off. We can't do this until
* we fix gsbase, and we should do it before enter_from_user_mode
* (which can take locks). Since TRACE_IRQS_OFF idempotent,
* the simplest way to handle it is to just call it twice if
* we enter from user mode. There's no reason to optimize this since
* TRACE_IRQS_OFF is a no-op if lockdep is off.
*/
TRACE_IRQS_OFF
#ifdef CONFIG_CONTEXT_TRACKING
call enter_from_user_mode
#endif
@ -1049,12 +1060,18 @@ ENTRY(error_entry)
SWAPGS
.Lerror_entry_from_usermode_after_swapgs:
/*
* We need to tell lockdep that IRQs are off. We can't do this until
* we fix gsbase, and we should do it before enter_from_user_mode
* (which can take locks).
*/
TRACE_IRQS_OFF
#ifdef CONFIG_CONTEXT_TRACKING
call enter_from_user_mode
#endif
ret
.Lerror_entry_done:
TRACE_IRQS_OFF
ret

View File

@ -9,20 +9,22 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))

View File

@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
static inline pudval_t pud_pfn_mask(pud_t pud)
{
if (native_pud_val(pud) & _PAGE_PSE)
return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
return PHYSICAL_PUD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
static inline pudval_t pud_flags_mask(pud_t pud)
{
if (native_pud_val(pud) & _PAGE_PSE)
return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
else
return ~PTE_PFN_MASK;
return ~pud_pfn_mask(pud);
}
static inline pudval_t pud_flags(pud_t pud)
@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & _PAGE_PSE)
return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
return PHYSICAL_PMD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & _PAGE_PSE)
return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
else
return ~PTE_PFN_MASK;
return ~pmd_pfn_mask(pmd);
}
static inline pmdval_t pmd_flags(pmd_t pmd)

View File

@ -1,7 +1,6 @@
#ifndef _ASM_X86_PLATFORM_H
#define _ASM_X86_PLATFORM_H
#include <asm/pgtable_types.h>
#include <asm/bootparam.h>
struct mpc_bus;

View File

@ -698,3 +698,4 @@ int __init microcode_init(void)
return error;
}
late_initcall(microcode_init);

View File

@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*

View File

@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
* Copyright (C) 2009 Google, Inc., Stephane Eranian
*
@ -387,7 +387,7 @@ struct cpu_hw_events {
/* Check flags and event code/umask, and set the HSW N/A flag */
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
__EVENT_CONSTRAINT(code, n, \
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
@ -627,6 +627,7 @@ struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
int lbr_callstack_users;
int lbr_stack_state;
};

View File

@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */

View File

@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
{
if (event->attach_state & PERF_ATTACH_TASK)
return perf_cgroup_from_task(event->hw.target);
return perf_cgroup_from_task(event->hw.target, event->ctx);
return event->cgrp;
}

View File

@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
}
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
}

View File

@ -1,7 +1,7 @@
/*
* x86 specific code for irq_work
*
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
*/
#include <linux/kernel.h>

View File

@ -4,10 +4,22 @@
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ioport.h>
static int found(u64 start, u64 end, void *data)
{
return 1;
}
static __init int register_e820_pmem(void)
{
char *pmem = "Persistent Memory (legacy)";
struct platform_device *pdev;
int rc;
rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
if (rc <= 0)
return 0;
/*
* See drivers/nvdimm/e820.c for the implementation, this is

View File

@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_apply_memmap_quirks();
#endif
microcode_init();
}
#ifdef CONFIG_X86_32

View File

@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
signal_setup_done(failed, ksig, stepping);
}
#ifdef CONFIG_X86_32
#define NR_restart_syscall __NR_restart_syscall
#else /* !CONFIG_X86_32 */
#define NR_restart_syscall \
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
#endif /* CONFIG_X86_32 */
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{
#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
return __NR_restart_syscall;
#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
break;
case -ERESTART_RESTARTBLOCK:
regs->ax = NR_restart_syscall;
regs->ax = get_nr_restart_syscall(regs);
regs->ip -= 2;
break;
}

View File

@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
*/
#define UDELAY_10MS_DEFAULT 10000
static unsigned int init_udelay = INT_MAX;
static unsigned int init_udelay = UINT_MAX;
static int __init cpu_init_udelay(char *str)
{
@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
static void __init smp_quirk_init_udelay(void)
{
/* if cmdline changed it from default, leave it alone */
if (init_udelay != INT_MAX)
if (init_udelay != UINT_MAX)
return;
/* if modern processor, use no delay */
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
init_udelay = 0;
return;
}
/* else, use legacy delay */
init_udelay = UDELAY_10MS_DEFAULT;
}

View File

@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
switch (type) {
case REG_TYPE_RM:
regno = X86_MODRM_RM(insn->modrm.value);
if (X86_REX_B(insn->rex_prefix.value) == 1)
if (X86_REX_B(insn->rex_prefix.value))
regno += 8;
break;
case REG_TYPE_INDEX:
regno = X86_SIB_INDEX(insn->sib.value);
if (X86_REX_X(insn->rex_prefix.value) == 1)
if (X86_REX_X(insn->rex_prefix.value))
regno += 8;
break;
case REG_TYPE_BASE:
regno = X86_SIB_BASE(insn->sib.value);
if (X86_REX_B(insn->rex_prefix.value) == 1)
if (X86_REX_B(insn->rex_prefix.value))
regno += 8;
break;

View File

@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
if (!found)
pci_add_resource(resources, &info->busn);
list_for_each_entry(root_res, &info->resources, list) {
struct resource *res;
struct resource *root;
list_for_each_entry(root_res, &info->resources, list)
pci_add_resource(resources, &root_res->res);
res = &root_res->res;
pci_add_resource(resources, res);
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
root = &iomem_resource;
insert_resource(root, res);
}
return;
default_resources:

View File

@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
err = convert_fxsr_from_user(&fpx, sc.fpstate);
err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
if (err)
return 1;
@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
{
struct user_i387_struct fp;
err = copy_from_user(&fp, sc.fpstate,
err = copy_from_user(&fp, (void *)sc.fpstate,
sizeof(struct user_i387_struct));
if (err)
return 1;
@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
#endif
#undef PUTREG
sc.oldmask = mask;
sc.fpstate = to_fp;
sc.fpstate = (unsigned long)to_fp;
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
if (err)
@ -468,12 +468,10 @@ long sys_sigreturn(void)
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
sigset_t set;
struct sigcontext __user *sc = &frame->sc;
unsigned long __user *oldmask = &sc->oldmask;
unsigned long __user *extramask = frame->extramask;
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], extramask, sig_size))
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
copy_from_user(&set.sig[1], frame->extramask, sig_size))
goto segfault;
set_current_blocked(&set);
@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
unsigned long fp_to;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0]);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
fp_to = (unsigned long)&frame->fpstate;
err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);

View File

@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
static int blkcg_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
static int blkcg_can_attach(struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_subsys_state *dst_css;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each(task, tset) {
cgroup_taskset_for_each(task, dst_css, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)

View File

@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
/**
* blk_rq_check_limits - Helper function to check a request for the queue limit
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
* @q: the queue
* @rq: the request being checked
*
@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio);
* after it is inserted to @q, it should be checked against @q before
* the insertion using this generic function.
*
* This function should also be useful for request stacking drivers
* in some cases below, so export this function.
* Request stacking drivers like request-based dm may change the queue
* limits while requests are in the queue (e.g. dm's table swapping).
* Such request stacking drivers should check those requests against
* the new queue limits again when they dispatch those requests,
* although such checkings are also done against the old queue limits
* when submitting requests.
* limits when retrying requests on other queues. Those requests need
* to be checked against the new queue limits again during dispatch.
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
if (!rq_mergeable(rq))
return 0;
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
return 0;
}
EXPORT_SYMBOL_GPL(blk_rq_check_limits);
/**
* blk_insert_cloned_request - Helper for stacking drivers to submit a request
@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
unsigned long flags;
int where = ELEVATOR_INSERT_BACK;
if (blk_rq_check_limits(q, rq))
if (blk_cloned_rq_check_limits(q, rq))
return -EIO;
if (rq->rq_disk &&
@ -3412,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
{
int ret = 0;
if (!q->dev)
return ret;
spin_lock_irq(q->queue_lock);
if (q->nr_pending) {
ret = -EBUSY;
@ -3439,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
*/
void blk_post_runtime_suspend(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
@ -3463,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
*/
void blk_pre_runtime_resume(struct request_queue *q)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
q->rpm_status = RPM_RESUMING;
spin_unlock_irq(q->queue_lock);
@ -3485,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
*/
void blk_post_runtime_resume(struct request_queue *q, int err)
{
if (!q->dev)
return;
spin_lock_irq(q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;

View File

@ -103,6 +103,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprv = bv;
bvprvp = &bvprv;
sectors += bv.bv_len >> 9;
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
continue;
}
new_segment:

View File

@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
BLK_SAFE_MAX_SECTORS;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_hw_sectors = UINT_MAX;
lim->max_segment_size = UINT_MAX;
lim->max_sectors = UINT_MAX;
lim->max_dev_sectors = UINT_MAX;
lim->max_write_same_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
* @limits: the queue limits
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* the device driver based upon the capabilities of the I/O
* controller.
*
* max_dev_sectors is a hard limit imposed by the storage device for
* READ/WRITE requests. It is set by the disk driver.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors.
**/
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
}
limits->max_hw_sectors = max_hw_sectors;
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
/**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
* See description for blk_limits_max_hw_sectors().
**/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
limits->max_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_same_sectors = min(t->max_write_same_sectors,
b->max_write_same_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);

View File

@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
q->limits.max_dev_sectors >> 1);
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;

View File

@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
struct hd_struct *part;
int res;
if (bdev->bd_part_count)
if (bdev->bd_part_count || bdev->bd_super)
return -EBUSY;
res = invalidate_partition(disk, 0);
if (res)

View File

@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}

View File

@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
if (flags & MSG_DONTWAIT)
return -EAGAIN;
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (signal_pending(current))
@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
return -EAGAIN;
}
set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
}
finish_wait(sk_sleep(sk), &wait);
clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}

View File

@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
bool
config ACPI_DEBUGGER
bool "In-kernel debugger (EXPERIMENTAL)"
bool "AML debugger interface (EXPERIMENTAL)"
select ACPI_DEBUG
help
Enable in-kernel debugging facilities: statistics, internal
Enable in-kernel debugging of AML facilities: statistics, internal
object dump, single step control method execution.
This is still under development, currently enabling this only
results in the compilation of the ACPICA debugger files.

View File

@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_system_address *spa)
{
size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
list_for_each_entry(nfit_spa, &prev->spas, list) {
if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
if (memcmp(nfit_spa->spa, spa, length) == 0) {
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
return true;
}
@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_memory_map *memdev)
{
size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_memdev *nfit_memdev;
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
return true;
}
@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_control_region *dcr)
{
size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_dcr *nfit_dcr;
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) {
if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
return true;
}
@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_data_region *bdw)
{
size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_bdw *nfit_bdw;
list_for_each_entry(nfit_bdw, &prev->bdws, list)
if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
return true;
}
@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_interleave *idt)
{
size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_idt *nfit_idt;
list_for_each_entry(nfit_idt, &prev->idts, list)
if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) {
if (memcmp(nfit_idt->idt, idt, length) == 0) {
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
return true;
}
@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_flush_address *flush)
{
size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
struct device *dev = acpi_desc->dev;
struct nfit_flush *nfit_flush;
list_for_each_entry(nfit_flush, &prev->flushes, list)
if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) {
if (memcmp(nfit_flush->flush, flush, length) == 0) {
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
return true;
}
@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
}
static DEVICE_ATTR_RO(revision);
@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
data = (u8 *) acpi_desc->nfit;
end = data + sz;
data += sizeof(struct acpi_table_nfit);
while (!IS_ERR_OR_NULL(data))
data = add_table(acpi_desc, &prev, data, end);
@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
return PTR_ERR(acpi_desc);
}
acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
/*
* Save the acpi header for later and then skip it,
* making nfit point to the first nfit table header.
*/
acpi_desc->acpi_header = *tbl;
acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
sz -= sizeof(struct acpi_table_nfit);
/* Evaluate _FIT and override with that if present */
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
if (ACPI_SUCCESS(status) && buf.length > 0) {
acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
sz = buf.length;
union acpi_object *obj;
/*
* Adjust for the acpi_object header of the _FIT
*/
obj = buf.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
acpi_desc->nfit =
(struct acpi_nfit_header *)obj->buffer.pointer;
sz = obj->buffer.length;
} else
dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
__func__, (int) obj->type);
}
rc = acpi_nfit_init(acpi_desc, sz);
@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_table_nfit *nfit_saved;
struct acpi_nfit_header *nfit_saved;
union acpi_object *obj;
struct device *dev = &adev->dev;
acpi_status status;
int ret;
@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}
nfit_saved = acpi_desc->nfit;
acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
ret = acpi_nfit_init(acpi_desc, buf.length);
if (!ret) {
/* Merge failed, restore old nfit, and exit */
acpi_desc->nfit = nfit_saved;
dev_err(dev, "failed to merge updated NFIT\n");
obj = buf.pointer;
if (obj->type == ACPI_TYPE_BUFFER) {
acpi_desc->nfit =
(struct acpi_nfit_header *)obj->buffer.pointer;
ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
if (ret) {
/* Merge failed, restore old nfit, and exit */
acpi_desc->nfit = nfit_saved;
dev_err(dev, "failed to merge updated NFIT\n");
}
} else {
/* Bad _FIT, restore old nfit */
dev_err(dev, "Invalid _FIT\n");
}
kfree(buf.pointer);

View File

@ -96,7 +96,8 @@ struct nfit_mem {
struct acpi_nfit_desc {
struct nvdimm_bus_descriptor nd_desc;
struct acpi_table_nfit *nfit;
struct acpi_table_header acpi_header;
struct acpi_nfit_header *nfit;
struct mutex spa_map_mutex;
struct mutex init_mutex;
struct list_head spa_maps;

View File

@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
else
continue;
/*
* Some legacy x86 host bridge drivers use iomem_resource and
* ioport_resource as default resource pool, skip it.
*/
if (res == root)
continue;
conflict = insert_resource_conflict(root, res);
if (conflict) {
dev_info(&info->bridge->dev,

View File

@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
return ahci_platform_suspend_host(&pdev->dev);
@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
return ahci_platform_resume_host(&pdev->dev);
}
#else
#define ahci_mvebu_suspend NULL
#define ahci_mvebu_resume NULL
#endif
static const struct ata_port_info ahci_mvebu_port_info = {
.flags = AHCI_FLAG_COMMON,

View File

@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
ata_tf_to_fis(tf, pmp, is_cmd, fis);
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
/* set port value for softreset of Port Multiplier */
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
tmp = readl(port_mmio + PORT_FBS);
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
tmp |= pmp << PORT_FBS_DEV_OFFSET;
writel(tmp, port_mmio + PORT_FBS);
pp->fbs_last_dev = pmp;
}
/* issue & wait */
writel(1, port_mmio + PORT_CMD_ISSUE);

View File

@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors)
{
unsigned long ap_flags = dev->link->ap->flags;
struct ata_taskfile tf;
unsigned int err_mask;
bool dma = false;
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
/*
* Return error without actually issuing the command on controllers
* which e.g. lockup on a read log page.
*/
if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
return AC_ERR_DEV;
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&

View File

@ -45,7 +45,8 @@ enum {
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
ATA_FLAG_PMP | ATA_FLAG_NCQ |
ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */

View File

@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
unsigned int n, quirks = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
/* This controller doesn't support trim */
dev->horkage |= ATA_HORKAGE_NOTRIM;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
for (n = 0; sil_blacklist[n].product; n++)

View File

@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
/* Can't offline block with non-present sections */
if (mem->section_count != sections_per_block)
return -EINVAL;
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}

View File

@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev)
}
pd = of_genpd_get_from_provider(&pd_args);
of_node_put(pd_args.np);
if (IS_ERR(pd)) {
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
of_node_put(dev->of_node);
return -EPROBE_DEFER;
}
@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev)
if (ret < 0) {
dev_err(dev, "failed to add to PM domain %s: %d",
pd->name, ret);
of_node_put(dev->of_node);
goto out;
}

View File

@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct gpd_timing_data *td;
s64 constraint_ns;
if (!pdd->dev->driver)
continue;
/*
* Check if the device is allowed to be off long enough for the
* domain to turn off and on (that's how much time it will

View File

@ -18,6 +18,7 @@ struct nullb_cmd {
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
struct hrtimer timer;
};
struct nullb_queue {
@ -49,17 +50,6 @@ static int null_major;
static int nullb_indexes;
static struct kmem_cache *ppa_cache;
struct completion_queue {
struct llist_head list;
struct hrtimer timer;
};
/*
* These are per-cpu for now, they will need to be configured by the
* complete_queues parameter and appropriately mapped.
*/
static DEFINE_PER_CPU(struct completion_queue, completion_queues);
enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
@ -142,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
static int completion_nsec = 10000;
module_param(completion_nsec, int, S_IRUGO);
static unsigned long completion_nsec = 10000;
module_param(completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
static int hw_queue_depth = 64;
@ -180,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
put_tag(cmd->nq, cmd->tag);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
@ -190,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
if (irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
return cmd;
}
@ -220,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@ -230,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
break;
goto free_cmd;
}
if (cmd->rq)
q = cmd->rq->q;
/* Restart queue if needed, as we are freeing a tag */
if (q && !q->mq_ops && blk_queue_stopped(q)) {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
if (blk_queue_stopped(q))
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
free_cmd:
free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
struct completion_queue *cq;
struct llist_node *entry;
struct nullb_cmd *cmd;
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
entry = llist_reverse_order(entry);
do {
struct request_queue *q = NULL;
cmd = container_of(entry, struct nullb_cmd, ll_list);
entry = entry->next;
if (cmd->rq)
q = cmd->rq->q;
end_cmd(cmd);
if (q && !q->mq_ops && blk_queue_stopped(q)) {
spin_lock(q->queue_lock);
if (blk_queue_stopped(q))
blk_start_queue(q);
spin_unlock(q->queue_lock);
}
} while (entry);
}
end_cmd(container_of(timer, struct nullb_cmd, timer));
return HRTIMER_NORESTART;
}
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
ktime_t kt = ktime_set(0, completion_nsec);
cmd->ll_list.next = NULL;
if (llist_add(&cmd->ll_list, &cq->list)) {
ktime_t kt = ktime_set(0, completion_nsec);
hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
}
put_cpu();
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
@ -376,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
if (irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
cmd->nq = hctx->driver_data;
@ -459,8 +444,9 @@ static void null_lnvm_end_io(struct request *rq, int error)
blk_put_request(rq);
}
static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
struct request_queue *q = dev->q;
struct request *rq;
struct bio *bio = rqd->bio;
@ -485,7 +471,7 @@ static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
return 0;
}
static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
sector_t blksize;
@ -538,7 +524,7 @@ static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
return 0;
}
static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
{
mempool_t *virtmem_pool;
@ -556,7 +542,7 @@ static void null_lnvm_destroy_dma_pool(void *pool)
mempool_destroy(pool);
}
static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
@ -780,7 +766,9 @@ out:
static int __init null_init(void)
{
int ret = 0;
unsigned int i;
struct nullb *nullb;
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
@ -813,19 +801,6 @@ static int __init null_init(void)
mutex_init(&lock);
/* Initialize a separate list for each CPU for issuing softirqs */
for_each_possible_cpu(i) {
struct completion_queue *cq = &per_cpu(completion_queues, i);
init_llist_head(&cq->list);
if (irqmode != NULL_IRQ_TIMER)
continue;
hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cq->timer.function = null_cmd_timer_expired;
}
null_major = register_blkdev(0, "nullb");
if (null_major < 0)
return null_major;
@ -835,22 +810,29 @@ static int __init null_init(void)
0, 0, NULL);
if (!ppa_cache) {
pr_err("null_blk: unable to create ppa cache\n");
return -ENOMEM;
}
}
for (i = 0; i < nr_devices; i++) {
if (null_add_dev()) {
unregister_blkdev(null_major, "nullb");
ret = -ENOMEM;
goto err_ppa;
}
}
for (i = 0; i < nr_devices; i++) {
ret = null_add_dev();
if (ret)
goto err_dev;
}
pr_info("null: module loaded\n");
return 0;
err_ppa:
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
null_del_dev(nullb);
}
kmem_cache_destroy(ppa_cache);
return -EINVAL;
err_ppa:
unregister_blkdev(null_major, "nullb");
return ret;
}
static void __exit null_exit(void)

View File

@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
img_request->rq = rq;
snapc = NULL; /* img_request consumes a ref */
if (op_type == OBJ_OP_DISCARD)
result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,

View File

@ -1230,14 +1230,14 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
/* Try to claim any interrupts. */
if (new_smi->irq_setup)
new_smi->irq_setup(new_smi);
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
if (new_smi->irq_setup)
new_smi->irq_setup(new_smi);
/*
* Check if the user forcefully enabled the daemon.
*/

View File

@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
struct clk_gpio_delayed_register_data {
const char *gpio_name;
int num_parents;
const char **parent_names;
struct device_node *node;
struct mutex lock;
struct clk *clk;
@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
{
struct clk_gpio_delayed_register_data *data = _data;
struct clk *clk;
const char **parent_names;
int i, num_parents;
int gpio;
enum of_gpio_flags of_flags;
@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
return ERR_PTR(gpio);
}
num_parents = of_clk_get_parent_count(data->node);
parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names) {
clk = ERR_PTR(-ENOMEM);
goto out;
}
for (i = 0; i < num_parents; i++)
parent_names[i] = of_clk_get_parent_name(data->node, i);
clk = data->clk_register_get(data->node->name, parent_names,
num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
clk = data->clk_register_get(data->node->name, data->parent_names,
data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
if (IS_ERR(clk))
goto out;
data->clk = clk;
out:
mutex_unlock(&data->lock);
kfree(parent_names);
return clk;
}
@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
unsigned gpio, bool active_low))
{
struct clk_gpio_delayed_register_data *data;
const char **parent_names;
int i, num_parents;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
num_parents = of_clk_get_parent_count(node);
parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
return;
for (i = 0; i < num_parents; i++)
parent_names[i] = of_clk_get_parent_name(node, i);
data->num_parents = num_parents;
data->parent_names = parent_names;
data->node = node;
data->gpio_name = gpio_name;
data->clk_register_get = clk_register_get;

View File

@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
*/
clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
div = get_pll_div(cg, hwc, clksel);
if (!div)
if (!div) {
kfree(hwc);
return NULL;
}
pct80_rate = clk_get_rate(div->clk);
pct80_rate *= 8;

Some files were not shown because too many files have changed in this diff Show More