This is the 5.4.102 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmBAqDoACgkQONu9yGCS
 aT7R4A//RC4/R+Uc+cX8I2al+B017epRXRtfMDz7cd/dO1SAAhgDi4zrebAxs1XP
 6g/t37NuDZ0rjKxMBRzATSwizDLP9gKpeWCVQTtvlHGf+tm/5sn2bt7pckoPvXvo
 GqXPT4YgUgZQSHE+YG5Rhjtv0xMcOEu9yNTsPNZJU6BDdYJylQX/D97MPVjJjbXJ
 Sz+U98wHt0zIbwkg13/2FZvPMdEKL0z8Ub/SIKDaXfFSPJMDYb/5UcEfdnDctSbI
 B3i2i1/IXa97EmNG/MNDi1zPI2l9+PtRrtIzpfLASRNx3ySceiC25EyDk0mp5JnZ
 czxXJ0NxG9z9Pk9X6Isvaz6X5Nqv70LORTFeZRBEp0ohYbsxH/yBuPZ0T8bukjgU
 MA/uZDQryfeNgBN1aEJlTRCAmGyyD6NIICsNPnetmmowgqYxhHXt0tVafMvWpH9F
 vbM3eHcOfOfNejoQiPqTj5vX7NF0BZGQYa5LywKHeGe5q2nwaMj++Kffj9ERCo49
 OZFylFPiQVdEjse07JJb5vGWQkvvTv1FDB+zb7GVgHwJNnb9Lswv2VQbjdZBS++h
 YUuDSxkhEYR+vdKKLcFBbjAYkJXrpiSeXzywjR5N0c90OJdaBX1kpAbBHHXYiwo1
 P39l5/hsxWljQ1ZJqbeFWr2ef27xDiEz7aPojLUlyjBRgBC4eYc=
 =JSQX
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmBBQn0ACgkQ7G51OISz
 Hs1bUw/8C8T64CcvBDZhx2MyFqP5Wmc3h2li2/mAGYBPXnq9bQqhHVwNxBoeOY+d
 8jmPuao6Rb5xWWDCK/7bpshOyM7M4oBKDwJK6tgg9+ntPhllL29H8NYnH+hd45Zm
 oeM9gzKk4DU2jxZMt9BOi/ByrCApVKxcbiddPKmgXqaz568JQg4P10E7D2ceRudq
 uGlZbtEVqw6ftAyygzqiVmioGCwdgd/5BC+HPGcSaodF63z5Dc5LPOG+uefAVxq/
 oXawWdqScGqJXsbbkncgbvNSkIlYdVJIPqnMYrYnD2saAq4qeisrEkUzTUJwRCYT
 CeKxKL6nP4vqzgc21YGyxhtxJf+Sv2pU9QMgN/qfuhG9Nnz+7hQ3TntEjviPf6kC
 73a7cyQeLCVM2uECVN1b3fXweXRvllzkYCsomO5hU2J8mHHcUjJhwyYqsyE80Vpl
 w8qmZcyR0J0BmKXnYISGfe/S77Ze2nqLhOkGmP/PQWiTrULtLR/7wDEOap2xZbmz
 LPqK7nIrTD9lntyksNg1C1Az+So/tZQLh2hCX0C2efGqRyP2UbzoR6HgaI84LWf7
 L6qG9zVjl88lHLPZSO6WpOlxoR8RpmMprHs0bHJS79ucU+XzD0o9LyWhMBm6++qE
 WuDsSYYJtKPl7Qe04uBJNfO8cbmY9YcorYkgAcUz/Ar+QgCT0js=
 =aicv
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.102' into 5.4-2.3.x-imx

This is the 5.4.102 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-03-04 20:26:33 +00:00
commit d51b217cf8
336 changed files with 2486 additions and 1254 deletions

View File

@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time
is a reasonable thing to do. The seq_file code will also avoid taking any
other locks while the iterator is active.
The iterater value returned by start() or next() is guaranteed to be
passed to a subsequent next() or stop() call. This allows resources
such as locks that were taken to be reliably released. There is *no*
guarantee that the iterator will be passed to show(), though in practice
it often will be.
Formatted output

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 101
SUBLEVEL = 102
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -1142,9 +1142,9 @@ __armv4_mmu_cache_off:
__armv7_mmu_cache_off:
mrc p15, 0, r0, c1, c0
#ifdef CONFIG_MMU
bic r0, r0, #0x000d
bic r0, r0, #0x0005
#else
bic r0, r0, #0x000c
bic r0, r0, #0x0004
#endif
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r12, lr

View File

@ -70,6 +70,9 @@
system-leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&helios_system_led_pins>;
status-led {
label = "helios4:green:status";
gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
@ -86,6 +89,9 @@
io-leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&helios_io_led_pins>;
sata1-led {
label = "helios4:green:ata1";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
@ -121,11 +127,15 @@
fan1: j10-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
pinctrl-names = "default";
pinctrl-0 = <&helios_fan1_pins>;
};
fan2: j17-pwm {
compatible = "pwm-fan";
pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
pinctrl-names = "default";
pinctrl-0 = <&helios_fan2_pins>;
};
usb2_phy: usb2-phy {
@ -291,16 +301,22 @@
"mpp39", "mpp40";
marvell,function = "sd0";
};
helios_led_pins: helios-led-pins {
marvell,pins = "mpp24", "mpp25",
"mpp49", "mpp50",
helios_system_led_pins: helios-system-led-pins {
marvell,pins = "mpp24", "mpp25";
marvell,function = "gpio";
};
helios_io_led_pins: helios-io-led-pins {
marvell,pins = "mpp49", "mpp50",
"mpp52", "mpp53",
"mpp54";
marvell,function = "gpio";
};
helios_fan_pins: helios-fan-pins {
marvell,pins = "mpp41", "mpp43",
"mpp48", "mpp55";
helios_fan1_pins: helios_fan1_pins {
marvell,pins = "mpp41", "mpp43";
marvell,function = "gpio";
};
helios_fan2_pins: helios_fan2_pins {
marvell,pins = "mpp48", "mpp55";
marvell,function = "gpio";
};
microsom_spi1_cs_pins: spi1-cs-pins {

View File

@ -371,6 +371,7 @@
compatible = "aspeed,ast2400-ibt-bmc";
reg = <0xc0 0x18>;
interrupts = <8>;
clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
};

View File

@ -464,6 +464,7 @@
compatible = "aspeed,ast2500-ibt-bmc";
reg = <0xc0 0x18>;
interrupts = <8>;
clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
status = "disabled";
};
};

View File

@ -75,7 +75,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx3>;
interrupts = <5 IRQ_TYPE_NONE>;
interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps14_irq>;
reg = <0x66>;

View File

@ -195,7 +195,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
interrupts = <7 IRQ_TYPE_NONE>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;

View File

@ -260,7 +260,7 @@
s2mps14_pmic@66 {
compatible = "samsung,s2mps14-pmic";
interrupt-parent = <&gpx0>;
interrupts = <7 IRQ_TYPE_NONE>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
wakeup-source;

View File

@ -108,7 +108,7 @@
compatible = "samsung,s5m8767-pmic";
reg = <0x66>;
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_NONE>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
wakeup-source;

View File

@ -349,7 +349,7 @@
reg = <0x66>;
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;

View File

@ -141,7 +141,7 @@
samsung,s2mps11-acokb-ground;
interrupt-parent = <&gpx0>;
interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&s2mps11_irq>;

View File

@ -33,10 +33,12 @@
};
ocp {
/* 4430 has only gpio_86 tshut and no talert interrupt */
bandgap: bandgap@4a002260 {
reg = <0x4a002260 0x4
0x4a00232C 0x4>;
compatible = "ti,omap4430-bandgap";
gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
};

View File

@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
select I2C
select I2C_IOP3XX
select PCI
select TIMER_OF
select USE_OF
help
Say 'Y' here to support Device Tree-based IXP4xx platforms.

View File

@ -489,7 +489,7 @@ config ARM64_ERRATUM_1024718
help
This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
Affected Cortex-A55 cores (all revisions) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
without a break-before-make. The workaround is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by

View File

@ -103,8 +103,6 @@
};
&ehci0 {
phys = <&usbphy 0>;
phy-names = "usb";
status = "okay";
};
@ -142,6 +140,7 @@
pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>;
vmmc-supply = <&reg_dcdc1>;
vqmmc-supply = <&reg_eldo1>;
max-frequency = <200000000>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
@ -150,8 +149,6 @@
};
&ohci0 {
phys = <&usbphy 0>;
phy-names = "usb";
status = "okay";
};

View File

@ -55,7 +55,6 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_dcdc1>;
non-removable;
disable-wp;
bus-width = <4>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */

View File

@ -476,7 +476,7 @@
resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
max-frequency = <200000000>;
max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@ -530,6 +530,8 @@
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>,
<&ccu RST_BUS_EHCI0>;
phys = <&usbphy 0>;
phy-names = "usb";
status = "disabled";
};
@ -540,6 +542,8 @@
clocks = <&ccu CLK_BUS_OHCI0>,
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
phys = <&usbphy 0>;
phy-names = "usb";
status = "disabled";
};

View File

@ -332,6 +332,7 @@
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@ -348,6 +349,7 @@
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@ -364,6 +366,7 @@
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
max-frequency = <150000000>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@ -533,6 +536,8 @@
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>,
<&ccu RST_BUS_EHCI0>;
phys = <&usb2phy 0>;
phy-names = "usb";
status = "disabled";
};
@ -543,6 +548,8 @@
clocks = <&ccu CLK_BUS_OHCI0>,
<&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
phys = <&usb2phy 0>;
phy-names = "usb";
status = "disabled";
};

View File

@ -389,7 +389,7 @@
s2mps13-pmic@66 {
compatible = "samsung,s2mps13-pmic";
interrupt-parent = <&gpa0>;
interrupts = <7 IRQ_TYPE_NONE>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x66>;
samsung,s2mps11-wrstbi-ground;

View File

@ -90,7 +90,7 @@
s2mps15_pmic@66 {
compatible = "samsung,s2mps15-pmic";
reg = <0x66>;
interrupts = <2 IRQ_TYPE_NONE>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpa0>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_irq>;

View File

@ -202,7 +202,7 @@
};
partition@20000 {
label = "u-boot";
label = "a53-firmware";
reg = <0x20000 0x160000>;
};

View File

@ -698,6 +698,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
<&topckgen CLK_TOP_AXI_SEL>;
clock-names = "source", "hclk";
resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
reset-names = "hrst";
status = "disabled";
};

View File

@ -53,7 +53,7 @@
no-map;
};
reserved@8668000 {
reserved@86680000 {
reg = <0x0 0x86680000 0x0 0x80000>;
no-map;
};
@ -66,7 +66,7 @@
qcom,client-id = <1>;
};
rfsa@867e00000 {
rfsa@867e0000 {
reg = <0x0 0x867e0000 0x0 0x20000>;
no-map;
};

View File

@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#define aes_mac_update neon_aes_mac_update
MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
#endif
#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
@ -668,7 +668,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
}
static struct skcipher_alg aes_algs[] = { {
#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
.base = {
.cra_name = "__ecb(aes)",
.cra_driver_name = "__ecb-aes-" MODE,

View File

@ -19,6 +19,7 @@
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha1");
struct sha1_ce_state {
struct sha1_state sst;

View File

@ -19,6 +19,8 @@
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha256");
struct sha256_ce_state {
struct sha256_state sst;

View File

@ -23,6 +23,10 @@
MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha3-224");
MODULE_ALIAS_CRYPTO("sha3-256");
MODULE_ALIAS_CRYPTO("sha3-384");
MODULE_ALIAS_CRYPTO("sha3-512");
asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
int md_len);

View File

@ -23,6 +23,8 @@
MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks);

View File

@ -1110,7 +1110,7 @@ static bool cpu_has_broken_dbm(void)
/* List of CPUs which have broken DBM support. */
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
#endif
{},
};

View File

@ -970,6 +970,7 @@ __primary_switch:
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
isb
msr sctlr_el1, x19 // re-enable the MMU
isb

View File

@ -150,8 +150,10 @@ static int create_dtb(struct kimage *image,
/* duplicate a device tree blob */
ret = fdt_open_into(initial_boot_params, buf, buf_size);
if (ret)
if (ret) {
vfree(buf);
return -EINVAL;
}
ret = setup_dtb(image, initrd_load_addr, initrd_len,
cmdline, buf);

View File

@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
/* TODO: Currently we do not support AARCH32 instruction probing */
if (mm->context.flags & MMCF_AARCH32)
return -ENOTSUPP;
return -EOPNOTSUPP;
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;

View File

@ -20,10 +20,27 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
#ifndef __VDSO__
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* We don't do DWARF unwinding at runtime, so only the offline DWARF
* information is useful to anyone. Note we should change this if we
* ever decide to enable DWARF unwinding at runtime.
*/
#define CFI_SECTIONS .cfi_sections .debug_frame
#else
/*
* For the vDSO, emit both runtime unwind information and debug
* symbols for the .dbg file.
*/
#define CFI_SECTIONS
#endif
/*
* LEAF - declare leaf routine
*/
#define LEAF(symbol) \
CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \
@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
* NESTED - declare nested routine entry point
*/
#define NESTED(symbol, framesize, rpc) \
CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \

View File

@ -93,6 +93,7 @@ SECTIONS
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
@ -225,6 +226,5 @@ SECTIONS
*(.options)
*(.pdr)
*(.reginfo)
*(.eh_frame)
}
}

View File

@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}

View File

@ -1560,7 +1560,7 @@ static int probe_scache(void)
return 1;
}
static void __init loongson2_sc_init(void)
static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;

View File

@ -723,7 +723,7 @@ config PPC_64K_PAGES
config PPC_256K_PAGES
bool "256k page size"
depends on 44x && !STDBINUTILS
depends on 44x && !STDBINUTILS && !PPC_47x
help
Make the page size 256k.

View File

@ -336,6 +336,9 @@ trace_syscall_entry_irq_off:
.globl transfer_to_syscall
transfer_to_syscall:
#ifdef CONFIG_PPC_BOOK3S_32
kuep_lock r11, r12
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
andi. r12,r9,MSR_EE
beq- trace_syscall_entry_irq_off

View File

@ -191,7 +191,7 @@ SystemCall:
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
#ifdef CONFIG_PERF_EVENTS

View File

@ -1305,14 +1305,10 @@ static void __init prom_check_platform_support(void)
if (prop_len > sizeof(vec))
prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
prop_len);
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
&vec, sizeof(vec));
for (i = 0; i < sizeof(vec); i += 2) {
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
, vec[i]
, vec[i + 1]);
prom_parse_platform_support(vec[i], vec[i + 1],
&supported);
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
for (i = 0; i < prop_len; i += 2) {
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
prom_parse_platform_support(vec[i], vec[i + 1], &supported);
}
}

View File

@ -1513,7 +1513,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@ -1531,7 +1531,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@ -1549,7 +1549,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@ -1567,7 +1567,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;

View File

@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define NEXT_PROPERTY 3
#define PREV_PARENT 4
#define MORE_MEMORY 5
#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
struct device_node *dlpar_configure_connector(__be32 drc_index,
@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
spin_unlock(&rtas_data_buf_lock);
if (rtas_busy_delay(rc))
continue;
switch (rc) {
case COMPLETE:
break;
@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
last_dn = last_dn->parent;
break;
case CALL_AGAIN:
break;
case MORE_MEMORY:
case ERR_CFG_USE:
default:

View File

@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk)
" stck %1" /* Store current tod clock value */
#endif
: "=Q" (S390_lowcore.last_update_timer),
"=Q" (S390_lowcore.last_update_clock));
"=Q" (S390_lowcore.last_update_clock)
: : "cc");
clock = S390_lowcore.last_update_clock - clock;
timer -= S390_lowcore.last_update_timer;

View File

@ -524,7 +524,7 @@ config COMPAT
bool
depends on SPARC64
default y
select COMPAT_BINFMT_ELF
select COMPAT_BINFMT_ELF if BINFMT_ELF
select HAVE_UID16
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION

View File

@ -142,6 +142,7 @@ __bzero:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
EXT(12b, 13b, 21f)
be 8f
andcc %o1, 4, %g0

View File

@ -126,6 +126,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
struct host_vm_op *last;
int fd = -1, ret = 0;
if (virt + len > STUB_START && virt < STUB_END)
return -EINVAL;
if (hvc->userspace)
fd = phys_mapping(phys, &offset);
else
@ -163,7 +166,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
if ((addr >= STUB_START) && (addr < STUB_END))
if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
@ -193,6 +196,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
struct host_vm_op *last;
int ret = 0;
if (addr + len > STUB_START && addr < STUB_END)
return -EINVAL;
if (hvc->index != 0) {
last = &hvc->ops[hvc->index - 1];
if ((last->type == MPROTECT) &&
@ -433,6 +439,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
struct mm_id *mm_id;
address &= PAGE_MASK;
if (address >= STUB_START && address < STUB_END)
goto kill;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto kill;

View File

@ -707,7 +707,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
struct gcm_context_data data AESNI_ALIGN_ATTR;
u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
struct scatter_walk dst_sg_walk = {};
unsigned long left = req->cryptlen;
unsigned long len, srclen, dstlen;
@ -760,8 +761,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
}
kernel_fpu_begin();
gcm_tfm->init(aes_ctx, &data, iv,
hash_subkey, assoc, assoclen);
gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
if (req->src != req->dst) {
while (left) {
src = scatterwalk_map(&src_sg_walk);
@ -771,10 +771,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
len = min(srclen, dstlen);
if (len) {
if (enc)
gcm_tfm->enc_update(aes_ctx, &data,
gcm_tfm->enc_update(aes_ctx, data,
dst, src, len);
else
gcm_tfm->dec_update(aes_ctx, &data,
gcm_tfm->dec_update(aes_ctx, data,
dst, src, len);
}
left -= len;
@ -792,10 +792,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
len = scatterwalk_clamp(&src_sg_walk, left);
if (len) {
if (enc)
gcm_tfm->enc_update(aes_ctx, &data,
gcm_tfm->enc_update(aes_ctx, data,
src, src, len);
else
gcm_tfm->dec_update(aes_ctx, &data,
gcm_tfm->dec_update(aes_ctx, data,
src, src, len);
}
left -= len;
@ -804,7 +804,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_done(&src_sg_walk, 1, left);
}
}
gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
kernel_fpu_end();
if (!assocmem)
@ -853,7 +853,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
__be32 counter = cpu_to_be32(1);
@ -880,7 +881,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@ -1010,7 +1012,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
__be32 counter = cpu_to_be32(1);
memcpy(iv, req->iv, 12);
@ -1026,7 +1029,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;

View File

@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
}
/** Disable VMX on the current CPU
/**
* cpu_vmxoff() - Disable VMX on the current CPU
*
* vmxoff causes a undefined-opcode exception if vmxon was not run
* on the CPU previously. Only call this function if you know VMX
* is enabled.
* Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
*
* Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
* atomically track post-VMXON state, e.g. this may be called in NMI context.
* Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
* faults are guaranteed to be due to the !post-VMXON check unless the CPU is
* magically in RM, VM86, compat mode, or at CPL>0.
*/
static inline void cpu_vmxoff(void)
{
asm volatile ("vmxoff");
asm_volatile_goto("1: vmxoff\n\t"
_ASM_EXTABLE(1b, %l[fault]) :::: fault);
fault:
cr4_clear_bits(X86_CR4_VMXE);
}

View File

@ -538,29 +538,20 @@ static void emergency_vmx_disable_all(void)
local_irq_disable();
/*
* We need to disable VMX on all CPUs before rebooting, otherwise
* we risk hanging up the machine, because the CPU ignore INIT
* signals when VMX is enabled.
* Disable VMX on all CPUs before rebooting, otherwise we risk hanging
* the machine, because the CPU blocks INIT when it's in VMX root.
*
* We can't take any locks and we may be on an inconsistent
* state, so we use NMIs as IPIs to tell the other CPUs to disable
* VMX and halt.
* We can't take any locks and we may be on an inconsistent state, so
* use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
*
* For safety, we will avoid running the nmi_shootdown_cpus()
* stuff unnecessarily, but we don't have a way to check
* if other CPUs have VMX enabled. So we will call it only if the
* CPU we are running on has VMX enabled.
*
* We will miss cases where VMX is not enabled on all CPUs. This
* shouldn't do much harm because KVM always enable VMX on all
* CPUs anyway. But we can miss it on the small window where KVM
* is still enabling VMX.
* Do the NMI shootdown even if VMX if off on _this_ CPU, as that
* doesn't prevent a different CPU from being in VMX root operation.
*/
if (cpu_has_vmx() && cpu_vmx_enabled()) {
/* Disable VMX on this CPU. */
cpu_vmxoff();
if (cpu_has_vmx()) {
/* Safely force _this_ CPU out of VMX root operation. */
__cpu_emergency_vmxoff();
/* Halt and disable VMX on the other CPUs */
/* Halt and exit VMX root operation on the other CPUs. */
nmi_shootdown_cpus(vmxoff_nmi);
}

View File

@ -4327,7 +4327,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
!guest_has_spec_ctrl_msr(vcpu))
return 1;
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
if (kvm_spec_ctrl_test_value(data))
return 1;
svm->spec_ctrl = data;

View File

@ -1974,7 +1974,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_has_spec_ctrl_msr(vcpu))
return 1;
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
if (kvm_spec_ctrl_test_value(data))
return 1;
vmx->spec_ctrl = data;

View File

@ -10374,28 +10374,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
int kvm_spec_ctrl_test_value(u64 value)
{
uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
/*
* test that setting IA32_SPEC_CTRL to given value
* is allowed by the host processor
*/
/* The STIBP bit doesn't fault even if it's not advertised */
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
!boot_cpu_has(X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
u64 saved_value;
unsigned long flags;
int ret = 0;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
local_irq_save(flags);
return bits;
if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
ret = 1;
else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
ret = 1;
else
wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);

View File

@ -368,6 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
#endif

View File

@ -1132,12 +1132,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
kfree(v);
++*pos;
return memtype_get_idx(*pos);
}
static void memtype_seq_stop(struct seq_file *seq, void *v)
{
kfree(v);
}
static int memtype_seq_show(struct seq_file *seq, void *v)
@ -1146,7 +1148,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
print_entry->start, print_entry->end);
kfree(print_entry);
return 0;
}

View File

@ -2937,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
}
bfqd->in_service_queue = bfqq;
bfqd->in_serv_last_pos = 0;
}
/*

View File

@ -473,6 +473,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
}
EXPORT_SYMBOL(blk_queue_stack_limits);
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
{
sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
sectors = PAGE_SIZE >> SECTOR_SHIFT;
return sectors;
}
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
@ -586,6 +594,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
/* Discard alignment and granularity */
if (b->discard_granularity) {
alignment = queue_limit_discard_alignment(b, start);

View File

@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
return PTR_ERR(rq);
ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
if (ret)
if (ret) {
blk_put_request(rq);
return ret;
}
rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)

View File

@ -153,7 +153,7 @@ static int __init blacklist_init(void)
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA |
KEY_FLAG_KEEP,
KEY_ALLOC_SET_KEEP,
NULL, NULL);
if (IS_ERR(blacklist_keyring))
panic("Can't allocate system blacklist keyring\n");

View File

@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
return -EINVAL;
if (unlikely(len < secret.len))
return -EINVAL;
ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))

View File

@ -267,7 +267,12 @@ static int __init acpi_configfs_init(void)
acpi_table_group = configfs_register_default_group(root, "table",
&acpi_tables_type);
return PTR_ERR_OR_ZERO(acpi_table_group);
if (IS_ERR(acpi_table_group)) {
configfs_unregister_subsystem(&acpi_configfs);
return PTR_ERR(acpi_table_group);
}
return 0;
}
module_init(acpi_configfs_init);

View File

@ -794,9 +794,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const union acpi_object *obj;
int ret;
if (!val)
return -EINVAL;
if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
if (ret)
@ -806,28 +803,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
case DEV_PROP_U8:
if (obj->integer.value > U8_MAX)
return -EOVERFLOW;
*(u8 *)val = obj->integer.value;
if (val)
*(u8 *)val = obj->integer.value;
break;
case DEV_PROP_U16:
if (obj->integer.value > U16_MAX)
return -EOVERFLOW;
*(u16 *)val = obj->integer.value;
if (val)
*(u16 *)val = obj->integer.value;
break;
case DEV_PROP_U32:
if (obj->integer.value > U32_MAX)
return -EOVERFLOW;
*(u32 *)val = obj->integer.value;
if (val)
*(u32 *)val = obj->integer.value;
break;
default:
*(u64 *)val = obj->integer.value;
if (val)
*(u64 *)val = obj->integer.value;
break;
}
if (!val)
return 1;
} else if (proptype == DEV_PROP_STRING) {
ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
if (ret)
return ret;
*(char **)val = obj->string.pointer;
if (val)
*(char **)val = obj->string.pointer;
return 1;
} else {
@ -841,7 +853,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
{
int ret;
if (!adev)
if (!adev || !val)
return -EINVAL;
ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
@ -935,10 +947,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
const union acpi_object *items;
int ret;
if (val && nval == 1) {
if (nval == 1 || !val) {
ret = acpi_data_prop_read_single(data, propname, proptype, val);
if (ret >= 0)
/*
* The overflow error means that the property is there and it is
* single-value, but its type does not match, so return.
*/
if (ret >= 0 || ret == -EOVERFLOW)
return ret;
/*
* Reading this property as a single-value one failed, but its
* value may still be represented as one-element array, so
* continue.
*/
}
ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);

View File

@ -299,10 +299,11 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
int ret;
int ret = 0;
pm_runtime_get_sync(dev);
ret = drv->remove(pcdev);
if (drv->remove)
ret = drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
@ -319,7 +320,9 @@ static int amba_remove(struct device *dev)
static void amba_shutdown(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
drv->shutdown(to_amba_device(dev));
if (drv->shutdown)
drv->shutdown(to_amba_device(dev));
}
/**
@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev)
*/
int amba_driver_register(struct amba_driver *drv)
{
drv->drv.bus = &amba_bustype;
if (!drv->probe)
return -EINVAL;
#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
SETFN(probe);
SETFN(remove);
SETFN(shutdown);
drv->drv.bus = &amba_bustype;
drv->drv.probe = amba_probe;
drv->drv.remove = amba_remove;
drv->drv.shutdown = amba_shutdown;
return driver_register(&drv->drv);
}

View File

@ -361,6 +361,10 @@ static int brcm_ahci_resume(struct device *dev)
if (ret)
return ret;
ret = ahci_platform_enable_regulators(hpriv);
if (ret)
goto out_disable_clks;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
@ -390,6 +394,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
return ret;
}
@ -463,6 +469,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (ret)
goto out_reset;
ret = ahci_platform_enable_regulators(hpriv);
if (ret)
goto out_disable_clks;
/* Must be first so as to configure endianness including that
* of the standard AHCI register space.
*/
@ -472,7 +482,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
if (!priv->port_mask) {
ret = -ENODEV;
goto out_disable_clks;
goto out_disable_regulators;
}
/* Must be done before ahci_platform_enable_phys() */
@ -497,6 +507,8 @@ out_disable_platform_phys:
ahci_platform_disable_phys(hpriv);
out_disable_phys:
brcm_sata_phys_disable(priv);
out_disable_regulators:
ahci_platform_disable_regulators(hpriv);
out_disable_clks:
ahci_platform_disable_clks(hpriv);
out_reset:

View File

@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
{
struct ht16k33_fbdev *fbdev = &priv->fbdev;
schedule_delayed_work(&fbdev->work,
msecs_to_jiffies(HZ / fbdev->refresh_rate));
schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
}
/*

View File

@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
struct device *dev = context;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
return sdw_write(slave, reg, val);
return sdw_write_no_pm(slave, reg, val);
}
static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
int read;
read = sdw_read(slave, reg);
read = sdw_read_no_pm(slave, reg);
if (read < 0)
return read;

View File

@ -534,14 +534,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
struct swnode *c = to_swnode(child);
if (!p || list_empty(&p->children) ||
(c && list_is_last(&c->entry, &p->children)))
(c && list_is_last(&c->entry, &p->children))) {
fwnode_handle_put(child);
return NULL;
}
if (c)
c = list_next_entry(c, entry);
else
c = list_first_entry(&p->children, struct swnode, entry);
return &c->fwnode;
fwnode_handle_put(child);
return fwnode_handle_get(&c->fwnode);
}
static struct fwnode_handle *

View File

@ -4063,21 +4063,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;

View File

@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
btqcomsmd_cmd_callback, btq);
if (IS_ERR(btq->cmd_channel))
return PTR_ERR(btq->cmd_channel);
if (IS_ERR(btq->cmd_channel)) {
ret = PTR_ERR(btq->cmd_channel);
goto destroy_acl_channel;
}
hdev = hci_alloc_dev();
if (!hdev)
return -ENOMEM;
if (!hdev) {
ret = -ENOMEM;
goto destroy_cmd_channel;
}
hci_set_drvdata(hdev, btq);
btq->hdev = hdev;
@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev);
if (ret < 0) {
hci_free_dev(hdev);
return ret;
}
if (ret < 0)
goto hci_free_dev;
platform_set_drvdata(pdev, btq);
return 0;
hci_free_dev:
hci_free_dev(hdev);
destroy_cmd_channel:
rpmsg_destroy_ept(btq->cmd_channel);
destroy_acl_channel:
rpmsg_destroy_ept(btq->acl_channel);
return ret;
}
static int btqcomsmd_remove(struct platform_device *pdev)

View File

@ -2569,7 +2569,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
goto err_out;
return;
}
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
@ -2587,13 +2587,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb)
goto err_out;
if (!data->evt_skb) {
kfree_skb(skb);
return;
}
}
err = hci_recv_frame(hdev, skb);
if (err < 0)
goto err_free_skb;
if (err < 0) {
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
return;
}
if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
&data->flags)) {
@ -2602,11 +2607,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
err_out:
return;
err_free_skb:
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */

View File

@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
goto no_schedule;
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
goto no_schedule;
}
BT_DBG("");
@ -174,10 +173,10 @@ restart:
kfree_skb(skb);
}
clear_bit(HCI_UART_SENDING, &hu->tx_state);
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
}

View File

@ -85,9 +85,9 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb);
}
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
clear_bit(HCI_UART_SENDING, &hu->tx_state);
clear_bit(HCI_UART_SENDING, &hu->tx_state);
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
}
/* ------- Interface to HCI layer ------ */

View File

@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
*/
if (retval > 0)
usleep_range(period_us,
period_us + min(1, period_us / 100));
period_us + max(1, period_us / 100));
*(u32 *)data = readl(priv->io_base);
retval += sizeof(u32);

View File

@ -2149,7 +2149,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, NULL);
crng_reseed(&primary_crng, &input_pool);
crng_global_init_time = jiffies - 1;
return 0;
default:

View File

@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
if (rc < 0)
return false;
if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
| TPM_ACCESS_REQUEST_USE)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
priv->locality = l;
return true;
@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}
static bool locality_inactive(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
u8 access;
rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
if (rc < 0)
return false;
if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
== TPM_ACCESS_VALID)
return true;
return false;
}
static int release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop, timeout;
long rc;
tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
stop = jiffies + chip->timeout_a;
if (chip->flags & TPM_CHIP_FLAG_IRQ) {
again:
timeout = stop - jiffies;
if ((long)timeout <= 0)
return -1;
rc = wait_event_interruptible_timeout(priv->int_queue,
(locality_inactive(chip, l)),
timeout);
if (rc > 0)
return 0;
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
}
} else {
do {
if (locality_inactive(chip, l))
return 0;
tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
}
return -1;
return 0;
}
static int request_locality(struct tpm_chip *chip, int l)

View File

@ -17,7 +17,8 @@
#define ASPEED_G6_NUM_CLKS 67
#define ASPEED_G6_SILICON_REV 0x004
#define ASPEED_G6_SILICON_REV 0x014
#define CHIP_REVISION_ID GENMASK(23, 16)
#define ASPEED_G6_RESET_CTRL 0x040
#define ASPEED_G6_RESET_CTRL2 0x050
@ -189,18 +190,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
{
unsigned int mult, div;
u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
if (val & BIT(20)) {
/* Pass through mode */
mult = div = 1;
if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
if (val & BIT(24)) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
u32 m = val & 0x1fff;
u32 n = (val >> 13) & 0x3f;
u32 p = (val >> 19) & 0xf;
mult = (m + 1);
div = (n + 1) * (p + 1);
}
} else {
/* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
u32 m = (val >> 5) & 0x3f;
u32 od = (val >> 4) & 0x1;
u32 n = val & 0xf;
if (val & BIT(20)) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
u32 m = (val >> 5) & 0x3f;
u32 od = (val >> 4) & 0x1;
u32 n = val & 0xf;
mult = (2 - od) * (m + 2);
div = n + 1;
mult = (2 - od) * (m + 2);
div = n + 1;
}
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);

View File

@ -363,13 +363,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
unsigned int enabled, m, n, frac = 0, ret;
unsigned int enabled, m, n, frac = 0;
unsigned long old_rate;
int ret;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
old_rate = rate;
old_rate = clk_hw_get_rate(hw);
ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
if (ret)
@ -391,7 +392,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (!enabled)
return 0;
if (meson_clk_pll_enable(hw)) {
ret = meson_clk_pll_enable(hw);
if (ret) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
/*
@ -403,7 +405,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
meson_clk_pll_set_rate(hw, old_rate, parent_rate);
}
return 0;
return ret;
}
/*

View File

@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
.name = "gpll0",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll0_out_even = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_main = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll0_out_test = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
.parent_names = (const char *[]){ "gpll0" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
.name = "gpll1",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll1_out_even = {
.offset = 0x1000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_main = {
.offset = 0x1000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.offset = 0x1000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll1_out_test = {
.offset = 0x1000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
.parent_names = (const char *[]){ "gpll1" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll2 = {
.offset = 0x2000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
.name = "gpll2",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll2_out_even = {
.offset = 0x2000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_main = {
.offset = 0x2000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.offset = 0x2000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll2_out_test = {
.offset = 0x2000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
.parent_names = (const char *[]){ "gpll2" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
.name = "gpll3",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll3_out_even = {
.offset = 0x3000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_main = {
.offset = 0x3000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.offset = 0x3000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll3_out_test = {
.offset = 0x3000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
.parent_names = (const char *[]){ "gpll3" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll gpll4 = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.vco_table = fabia_vco,
.num_vco = ARRAY_SIZE(fabia_vco),
.clkr = {
@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
.name = "gpll4",
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
},
};
static struct clk_alpha_pll_postdiv gpll4_out_even = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_main = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
static struct clk_alpha_pll_postdiv gpll4_out_test = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
.parent_names = (const char *[]){ "gpll4" },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ops,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};

View File

@ -228,7 +228,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
psi_ahb1_ahb2_parents,
0x510,
0, 5, /* M */
0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@ -237,19 +237,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
"psi-ahb1-ahb2",
"pll-periph0" };
static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
0, 5, /* M */
0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
0, 5, /* M */
0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
0, 5, /* M */
0, 2, /* M */
8, 2, /* P */
24, 2, /* mux */
0);
@ -673,7 +673,7 @@ static struct ccu_mux hdmi_cec_clk = {
.common = {
.reg = 0xb10,
.features = CCU_FEATURE_VARIABLE_PREDIV,
.features = CCU_FEATURE_FIXED_PREDIV,
.hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
hdmi_cec_parents,
&ccu_mux_ops,

View File

@ -79,6 +79,7 @@ config IXP4XX_TIMER
bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select CLKSRC_MMIO
select TIMER_OF if OF
help
Enables support for the Intel XScale IXP4xx SoC timer.

View File

@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state)
/* Clear pending interrupt */
timrot_irq_acknowledge();
#ifdef DEBUG
pr_info("%s: changing mode to %s\n", __func__, state)
#endif /* DEBUG */
pr_debug("%s: changing mode to %s\n", __func__, state);
}
static int mxs_shutdown(struct clock_event_device *evt)

View File

@ -566,6 +566,16 @@ unmap_base:
return ret;
}
static void brcm_avs_prepare_uninit(struct platform_device *pdev)
{
struct private_data *priv;
priv = platform_get_drvdata(pdev);
iounmap(priv->avs_intr_base);
iounmap(priv->base);
}
static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
brcm_avs_driver.driver_data = pdev;
return cpufreq_register_driver(&brcm_avs_driver);
ret = cpufreq_register_driver(&brcm_avs_driver);
if (ret)
brcm_avs_prepare_uninit(pdev);
return ret;
}
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
struct private_data *priv;
int ret;
ret = cpufreq_unregister_driver(&brcm_avs_driver);
if (ret)
return ret;
WARN_ON(ret);
priv = platform_get_drvdata(pdev);
iounmap(priv->base);
iounmap(priv->avs_intr_base);
brcm_avs_prepare_uninit(pdev);
return 0;
}

View File

@ -1566,11 +1566,9 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
cpu->pstate.max_pstate = pstate_funcs.get_max();
cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (hwp_active && !hwp_mode_bdw) {
unsigned int phy_max, current_max;
@ -1578,9 +1576,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
cpu->pstate.turbo_pstate = phy_max;
cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
} else {
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max();
}
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();

View File

@ -41,7 +41,7 @@
/* ================= Device Structure ================== */
struct device_private iproc_priv;
struct bcm_device_private iproc_priv;
/* ==================== Parameters ===================== */

View File

@ -418,7 +418,7 @@ struct spu_hw {
u32 num_chan;
};
struct device_private {
struct bcm_device_private {
struct platform_device *pdev;
struct spu_hw spu;
@ -465,6 +465,6 @@ struct device_private {
struct mbox_chan **mbox;
};
extern struct device_private iproc_priv;
extern struct bcm_device_private iproc_priv;
#endif

View File

@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct device_private *ipriv;
struct bcm_device_private *ipriv;
char *buf;
ssize_t ret, out_offset, out_count;
int i;

View File

@ -50,9 +50,6 @@
#define MIN_RCV_WND (24 * 1024U)
#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
/* ulp_mem_io + ulptx_idata + payload + padding */
#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))

View File

@ -30,6 +30,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
unsigned int ileft = areq->cryptlen;
unsigned int oleft = areq->cryptlen;
unsigned int todo;
unsigned long pi = 0, po = 0; /* progress for in and out */
bool miter_err;
struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */
unsigned long flags;
@ -44,50 +46,62 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
for (i = 0; i < op->keylen / 4; i++)
writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
writel(v, ss->base + SS_IV0 + i * 4);
writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
sg_miter_start(&mi, areq->src, sg_nents(areq->src),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
SG_MITER_TO_SG | SG_MITER_ATOMIC);
sg_miter_next(&mi);
sg_miter_next(&mo);
if (!mi.addr || !mo.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
ileft = areq->cryptlen / 4;
oleft = areq->cryptlen / 4;
oi = 0;
oo = 0;
do {
todo = min(rx_cnt, ileft);
todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
oi += todo * 4;
}
if (oi == mi.length) {
sg_miter_next(&mi);
oi = 0;
if (ileft) {
sg_miter_start(&mi, areq->src, sg_nents(areq->src),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
if (pi)
sg_miter_skip(&mi, pi);
miter_err = sg_miter_next(&mi);
if (!miter_err || !mi.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
todo = min(rx_cnt, ileft);
todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
oi += todo * 4;
}
if (oi == mi.length) {
pi += mi.length;
oi = 0;
}
sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
SG_MITER_TO_SG | SG_MITER_ATOMIC);
if (po)
sg_miter_skip(&mo, po);
miter_err = sg_miter_next(&mo);
if (!miter_err || !mo.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
todo = min(tx_cnt, oleft);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
@ -96,9 +110,10 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
oo += todo * 4;
}
if (oo == mo.length) {
sg_miter_next(&mo);
oo = 0;
po += mo.length;
}
sg_miter_stop(&mo);
} while (oleft);
if (areq->iv) {
@ -109,8 +124,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
}
release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);
return err;
@ -164,12 +177,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int oleft = areq->cryptlen;
unsigned int todo;
struct sg_mapping_iter mi, mo;
unsigned long pi = 0, po = 0; /* progress for in and out */
bool miter_err;
unsigned int oi, oo; /* offset for in and out */
unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
bool need_fallback;
bool need_fallback = false;
if (!areq->cryptlen)
return 0;
@ -188,12 +203,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we can use the SS optimized function
*/
while (in_sg && no_chunk == 1) {
if (in_sg->length % 4)
if ((in_sg->length | in_sg->offset) & 3u)
no_chunk = 0;
in_sg = sg_next(in_sg);
}
while (out_sg && no_chunk == 1) {
if (out_sg->length % 4)
if ((out_sg->length | out_sg->offset) & 3u)
no_chunk = 0;
out_sg = sg_next(out_sg);
}
@ -206,28 +221,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
for (i = 0; i < op->keylen / 4; i++)
writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
v = *(u32 *)(areq->iv + i * 4);
writel(v, ss->base + SS_IV0 + i * 4);
writesl(ss->base + SS_IV0 + i * 4, &v, 1);
}
}
writel(mode, ss->base + SS_CTL);
sg_miter_start(&mi, areq->src, sg_nents(areq->src),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
SG_MITER_TO_SG | SG_MITER_ATOMIC);
sg_miter_next(&mi);
sg_miter_next(&mo);
if (!mi.addr || !mo.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
ileft = areq->cryptlen;
oleft = areq->cryptlen;
oi = 0;
@ -235,8 +239,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
while (oleft) {
if (ileft) {
char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
sg_miter_start(&mi, areq->src, sg_nents(areq->src),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
if (pi)
sg_miter_skip(&mi, pi);
miter_err = sg_miter_next(&mi);
if (!miter_err || !mi.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
/*
* todo is the number of consecutive 4byte word that we
* can read from current SG
@ -258,52 +270,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min(rx_cnt * 4 - ob, ileft);
todo = min_t(size_t, todo, mi.length - oi);
memcpy(buf + ob, mi.addr + oi, todo);
memcpy(ss->buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
ob += todo;
if (!(ob % 4)) {
writesl(ss->base + SS_RXFIFO, buf,
writesl(ss->base + SS_RXFIFO, ss->buf,
ob / 4);
ob = 0;
}
}
if (oi == mi.length) {
sg_miter_next(&mi);
pi += mi.length;
oi = 0;
}
sg_miter_stop(&mi);
}
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
dev_dbg(ss->dev,
"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
if (!tx_cnt)
continue;
sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
SG_MITER_TO_SG | SG_MITER_ATOMIC);
if (po)
sg_miter_skip(&mo, po);
miter_err = sg_miter_next(&mo);
if (!miter_err || !mo.addr) {
dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
err = -EINVAL;
goto release_ss;
}
/* todo in 4bytes word */
todo = min(tx_cnt, oleft / 4);
todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
oo += todo * 4;
if (oo == mo.length) {
sg_miter_next(&mo);
po += mo.length;
oo = 0;
}
} else {
char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
/*
* read obl bytes in bufo, we read at maximum for
* emptying the device
*/
readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
obl = tx_cnt * 4;
obo = 0;
do {
@ -315,17 +332,19 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
*/
todo = min_t(size_t,
mo.length - oo, obl - obo);
memcpy(mo.addr + oo, bufo + obo, todo);
memcpy(mo.addr + oo, ss->bufo + obo, todo);
oleft -= todo;
obo += todo;
oo += todo;
if (oo == mo.length) {
po += mo.length;
sg_miter_next(&mo);
oo = 0;
}
} while (obo < obl);
/* bufo must be fully used here */
}
sg_miter_stop(&mo);
}
if (areq->iv) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
@ -335,8 +354,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
}
release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
spin_unlock_irqrestore(&ss->slock, flags);

View File

@ -138,6 +138,8 @@ struct sun4i_ss_ctx {
struct reset_control *reset;
struct device *dev;
struct resource *res;
char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
spinlock_t slock; /* control the use of the device */
#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
u32 seed[SS_SEED_LEN / BITS_PER_LONG];

View File

@ -1097,11 +1097,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
*/
static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
unsigned int offset, int datalen, int elen,
struct talitos_ptr *link_tbl_ptr)
struct talitos_ptr *link_tbl_ptr, int align)
{
int n_sg = elen ? sg_count + 1 : sg_count;
int count = 0;
int cryptlen = datalen + elen;
int padding = ALIGN(cryptlen, align) - cryptlen;
while (cryptlen && sg && n_sg--) {
unsigned int len = sg_dma_len(sg);
@ -1125,7 +1126,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
offset += datalen;
}
to_talitos_ptr(link_tbl_ptr + count,
sg_dma_address(sg) + offset, len, 0);
sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@ -1148,10 +1149,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr, int sg_count,
unsigned int offset, int tbl_off, int elen,
bool force)
bool force, int align)
{
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
int aligned_len = ALIGN(len, align);
if (!src) {
to_talitos_ptr(ptr, 0, 0, is_sec1);
@ -1159,22 +1161,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
}
to_talitos_ptr_ext_set(ptr, elen, is_sec1);
if (sg_count == 1 && !force) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
return sg_count;
}
if (is_sec1) {
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
&edesc->link_tbl[tbl_off]);
&edesc->link_tbl[tbl_off], align);
if (sg_count == 1 && !force) {
/* Only one segment now, so no link tbl needed*/
copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@ -1186,7 +1188,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int offset, int tbl_off)
{
return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
tbl_off, 0, false);
tbl_off, 0, false, 1);
}
/*
@ -1255,7 +1257,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
sg_count, areq->assoclen, tbl_off, elen,
false);
false, 1);
if (ret > 1) {
tbl_off += ret;
@ -1275,7 +1277,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
elen = 0;
ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
sg_count, areq->assoclen, tbl_off, elen,
is_ipsec_esp && !encrypt);
is_ipsec_esp && !encrypt, 1);
tbl_off += ret;
if (!encrypt && is_ipsec_esp) {
@ -1583,6 +1585,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
(desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
/* first DWORD empty */
@ -1603,8 +1607,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/*
* cipher in
*/
sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
&desc->ptr[3], sg_count, 0, 0);
sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
if (sg_count > 1)
sync_needed = true;

View File

@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
/* primary execution unit mode (MODE0) and derivatives */
#define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
#define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
#define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)

View File

@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
{
struct fsldma_device *fdev;
struct device_node *child;
unsigned int i;
int err;
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
return 0;
out_free_fdev:
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
}
irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
out_free:
@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
}
irq_dispose_mapping(fdev->irq);
iounmap(fdev->regs);
kfree(fdev);

View File

@ -26,22 +26,12 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
struct pci_dev *pdev = to_pci_dev(chip->dev);
u32 dmaisr;
u32 status;
unsigned short i;
int ret = 0;
int err;
/*
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
* to have different numbers, is shared between HSU DMA and UART IPs.
* Thus on such SoCs we are expecting that IRQ handler is called in
* UART driver only.
*/
if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
return IRQ_HANDLED;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
if (dmaisr & 0x1) {
@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_register_irq;
/*
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
* to have different numbers, is shared between HSU DMA and UART IPs.
* Thus on such SoCs we are expecting that IRQ handler is called in
* UART driver only. Instead of handling the spurious interrupt
* from HSU DMA here and waste CPU time and delay HSU UART interrupt
* handling, disable the interrupt entirely.
*/
if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
disable_irq_nosync(chip->irq);
pci_set_drvdata(pdev, chip);
return 0;

View File

@ -1201,6 +1201,7 @@ static int owl_dma_remove(struct platform_device *pdev)
owl_dma_free(od);
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
return 0;
}

View File

@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
* reset state. Otherwise it flags pins to be driven low.
*/
gpio->out = ~n_latch;
gpio->status = gpio->out;
gpio->status = gpio->read(gpio->client);
status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
if (status < 0)

View File

@ -870,7 +870,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
{
int ret;
long level;
unsigned long level;
char *sub_str = NULL;
char *tmp;
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
@ -886,8 +886,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
ret = kstrtoul(sub_str, 0, &level);
if (ret || level > 31)
return -EINVAL;
*mask |= 1 << level;
} else

View File

@ -21,7 +21,7 @@
*
*/
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>

View File

@ -276,6 +276,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->asic_type == CHIP_RENOIR)
return 10000;
if (adev->asic_type == CHIP_RAVEN)
return reference_clock / 4;

View File

@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
cntl->enable_dp_audio);
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
switch (cntl->color_depth) {
case COLOR_DEPTH_888:
params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case COLOR_DEPTH_101010:
params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case COLOR_DEPTH_121212:
params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case COLOR_DEPTH_161616:
params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
default:
break;
}
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
cntl->enable_dp_audio));
params.ucLaneNum = (uint8_t)(cntl->lanes_number);
switch (cntl->color_depth) {
case COLOR_DEPTH_888:
params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case COLOR_DEPTH_101010:
params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case COLOR_DEPTH_121212:
params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case COLOR_DEPTH_161616:
params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
default:
break;
}
if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
result = BP_RESULT_OK;
@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
* driver choose program it itself, i.e. here we program it
* to 888 by default.
*/
if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
switch (bp_params->color_depth) {
case TRANSMITTER_COLOR_DEPTH_30:
/* yes this is correct, the atom define is wrong */
clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
break;
case TRANSMITTER_COLOR_DEPTH_36:
/* yes this is correct, the atom define is wrong */
clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
break;
default:
break;
}
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;
@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
* driver choose program it itself, i.e. here we pass required
* target rate that includes deep color.
*/
if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
switch (bp_params->color_depth) {
case TRANSMITTER_COLOR_DEPTH_30:
clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
break;
case TRANSMITTER_COLOR_DEPTH_36:
clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
break;
case TRANSMITTER_COLOR_DEPTH_48:
clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
break;
default:
break;
}
if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
result = BP_RESULT_OK;

View File

@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
pll_settings->use_external_clk;
switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010:
bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
break;
case COLOR_DEPTH_121212:
bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
break;
case COLOR_DEPTH_161616:
bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
break;
default:
break;
}
if (clk_src->bios->funcs->set_pixel_clock(
clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
return false;

Some files were not shown because too many files have changed in this diff Show More