This is the 5.4.59 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl88xTwACgkQONu9yGCS
 aT60Lw/9HV3JZ0KDYKHRRS/n4UWh2w7jHe5hwFTfX8/KibexzGzgV3gTaXsyQx75
 Bj2ruJTGZKDvy+a9/IsjTe1v1/HPwPydDyfABNl/Rn8vQuyHzyyGzQo2owqGb57w
 4wVuoqhPASn0n2QAMG05caAx1uqPlXcT/I4H8vpzieNEHbB/2TbwFiWvrDC2d+nw
 mw1PkRmaFp2GSMOHSb68n2oYUv19EBHDtxF9WkwyftdKALQYz5x4O/n4lhpzMSmO
 OeiUg/dpLi0bptL1SQAfaY/Lxa1gFb62YvP47WbWp/P6zespOInk2n8kmpoHxol3
 MMZelRCdl9NtIKVXMEOFEv/asXPlF2wARGmNejGCYLaRqal1bH67yG4eja7biIJe
 gr5OBOFVPol83sPAnb6V4P2SEs650WR95hshYpRL9lbmcHY4q5zoXVm9ul/Hp49R
 qjjfxHDNQQoQgX4ZhEJLp2a5AEJUM4Kz28pZXHyczKYc2R6/AZWqHfnbQVURf+au
 0xJ/tOFxn3yqRtTkmuuifkZcUTJ5iRnzxUUYiGyhTqZOTNZTD0FgiI+S7gckI//K
 idoWGTszHZb6YkNBEwz4WJFtxCGpO95xgD95jOpJVxFcJ5f5PpfZE7RkXUDLR5UO
 XzJopB3nOT1h3EoI3xq7aPDF1hKbkp+VRMWcVrBeP9KgcpLisS4=
 =Tk2c
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.59' into 5.4.x+fslc

This is the 5.4.59 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2020-08-23 20:47:30 +00:00
commit 62ac342430
292 changed files with 2123 additions and 1210 deletions

View File

@ -1566,7 +1566,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
KernelVersion: 4.3
Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no offset etc.) percentage reading of a substance.
Raw (unscaled no offset etc.) reading of a substance. Units
after application of scale and offset are percents.
What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 58
SUBLEVEL = 59
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -339,7 +339,7 @@
reg = <0x20>;
remote = <&vin1>;
port {
ports {
#address-cells = <1>;
#size-cells = <0>;
@ -399,7 +399,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
default-input = <0>;
port {
ports {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -16,15 +16,27 @@
regulator-type = "voltage";
regulator-boot-on;
regulator-always-on;
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-min-microvolt = <1108475>;
regulator-max-microvolt = <1308475>;
regulator-ramp-delay = <50>; /* 4ms */
gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */
gpios-states = <0x1>;
states = <1100000 0>, <1300000 1>;
states = <1108475 0>, <1308475 1>;
};
};
&cpu0 {
cpu-supply = <&reg_vdd_cpux>;
};
&cpu1 {
cpu-supply = <&reg_vdd_cpux>;
};
&cpu2 {
cpu-supply = <&reg_vdd_cpux>;
};
&cpu3 {
cpu-supply = <&reg_vdd_cpux>;
};

View File

@ -22,6 +22,19 @@
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
* When compiled with clang, pc and sp are not pushed. A simple function
* prologue looks like this when built with clang:
*
* stmdb {..., fp, lr}
* add fp, sp, #x
* sub sp, sp, #y
*
* A simple function epilogue looks like this when built with clang:
*
* sub sp, fp, #x
* ldm {..., fp, pc}
*
*
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
#ifdef CONFIG_CC_IS_CLANG
/* check current frame pointer is within bounds */
if (fp < low + 4 || fp > high - 4)
return -EINVAL;
frame->sp = frame->fp;
frame->fp = *(unsigned long *)(fp);
frame->pc = frame->lr;
frame->lr = *(unsigned long *)(fp + 4);
#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
#endif
return 0;
}

View File

@ -592,13 +592,13 @@ static void __init at91_pm_sram_init(void)
sram_pool = gen_pool_get(&pdev->dev, NULL);
if (!sram_pool) {
pr_warn("%s: sram pool unavailable!\n", __func__);
return;
goto out_put_device;
}
sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
if (!sram_base) {
pr_warn("%s: unable to alloc sram!\n", __func__);
return;
goto out_put_device;
}
sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
@ -606,12 +606,17 @@ static void __init at91_pm_sram_init(void)
at91_pm_suspend_in_sram_sz, false);
if (!at91_suspend_sram_fn) {
pr_warn("SRAM: Could not map\n");
return;
goto out_put_device;
}
/* Copy the pm suspend handler to SRAM */
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
return;
out_put_device:
put_device(&pdev->dev);
return;
}
static bool __init at91_is_pm_mode_active(int pm_mode)

View File

@ -26,6 +26,7 @@
#define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30)
static void __iomem *ns_sram_base_addr __ro_after_init;
static bool secure_firmware __ro_after_init;
/*
* The common v7_exit_coherency_flush API could not be used because of the
@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init;
static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
bool state;
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
if (!exynos_cpu_power_state(cpunr)) {
exynos_cpu_power_up(cpunr);
state = exynos_cpu_power_state(cpunr);
exynos_cpu_power_up(cpunr);
if (!state && secure_firmware) {
/*
* This assumes the cluster number of the big cores(Cortex A15)
* is 0 and the Little cores(Cortex A7) is 1.
@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void)
return -ENOMEM;
}
secure_firmware = exynos_secure_firmware_available();
/*
* To increase the stability of KFC reset we need to program
* the PMU SPARE3 register

View File

@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
goto put_node;
goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
goto put_node;
goto put_device;
}
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!suspend_ocram_base) {
pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
ret = -ENOMEM;
goto put_node;
goto put_device;
}
/* Copy the code that puts DDR in self refresh to ocram */
@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void)
if (!socfpga_sdram_self_refresh_in_ocram)
ret = -EFAULT;
put_device:
put_device(&pdev->dev);
put_node:
of_node_put(np);

View File

@ -157,6 +157,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1150000>;
regulator-enable-ramp-delay = <125>;
regulator-always-on;
};
ldo8_reg: LDO8 {

View File

@ -530,6 +530,17 @@
status = "ok";
compatible = "adi,adv7533";
reg = <0x39>;
adi,dsi-lanes = <4>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
};
port@1 {
reg = <1>;
};
};
};
};

View File

@ -516,7 +516,7 @@
reg = <0x39>;
interrupt-parent = <&gpio1>;
interrupts = <1 2>;
pd-gpio = <&gpio0 4 0>;
pd-gpios = <&gpio0 4 0>;
adi,dsi-lanes = <4>;
#sound-dai-cells = <0>;

View File

@ -508,7 +508,7 @@
pins = "gpio63", "gpio64", "gpio65", "gpio66",
"gpio67", "gpio68";
drive-strength = <8>;
bias-pull-none;
bias-disable;
};
};
cdc_pdm_lines_sus: pdm_lines_off {
@ -537,7 +537,7 @@
pins = "gpio113", "gpio114", "gpio115",
"gpio116";
drive-strength = <8>;
bias-pull-none;
bias-disable;
};
};
@ -565,7 +565,7 @@
pinconf {
pins = "gpio110";
drive-strength = <8>;
bias-pull-none;
bias-disable;
};
};
@ -591,7 +591,7 @@
pinconf {
pins = "gpio116";
drive-strength = <8>;
bias-pull-none;
bias-disable;
};
};
ext_mclk_tlmm_lines_sus: mclk_lines_off {
@ -619,7 +619,7 @@
pins = "gpio112", "gpio117", "gpio118",
"gpio119";
drive-strength = <8>;
bias-pull-none;
bias-disable;
};
};
ext_sec_tlmm_lines_sus: tlmm_lines_off {

View File

@ -156,7 +156,7 @@
pinctrl-0 = <&rgmii_pins>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>;
tx_delay = <0x10>;
rx_delay = <0x10>;
status = "okay";

View File

@ -101,7 +101,7 @@
vcc5v0_host: vcc5v0-host-regulator {
compatible = "regulator-fixed";
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
enable-active-low;
pinctrl-names = "default";
pinctrl-0 = <&vcc5v0_host_en>;
@ -157,7 +157,7 @@
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x10>;

View File

@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
static __inline__ void iop_stop(volatile struct mac_iop *iop)
{
iop->status_ctrl &= ~IOP_RUN;
iop->status_ctrl = IOP_AUTOINC;
}
static __inline__ void iop_start(volatile struct mac_iop *iop)
@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
}
static __inline__ void iop_bypass(volatile struct mac_iop *iop)
{
iop->status_ctrl |= IOP_BYPASS;
}
static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
{
iop->status_ctrl |= IOP_IRQ;
iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
}
static int iop_alive(volatile struct mac_iop *iop)
@ -244,7 +239,6 @@ void __init iop_preinit(void)
} else {
iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
}
iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
iop_scc_present = 1;
} else {
iop_base[IOP_NUM_SCC] = NULL;
@ -256,7 +250,7 @@ void __init iop_preinit(void)
} else {
iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
}
iop_base[IOP_NUM_ISM]->status_ctrl = 0;
iop_stop(iop_base[IOP_NUM_ISM]);
iop_ism_present = 1;
} else {
iop_base[IOP_NUM_ISM] = NULL;
@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
msg->status = IOP_MSGSTATUS_UNUSED;
msg = msg->next;
iop_send_queue[iop_num][chan] = msg;
if (msg) iop_do_send(msg);
if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
iop_do_send(msg);
}
/*
@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
if (!(q = iop_send_queue[iop_num][chan])) {
iop_send_queue[iop_num][chan] = msg;
iop_do_send(msg);
} else {
while (q->next) q = q->next;
q->next = msg;
}
if (iop_readb(iop_base[iop_num],
IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
iop_do_send(msg);
}
return 0;
}

View File

@ -518,6 +518,7 @@ static int __init dwc3_octeon_device_init(void)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
put_device(&pdev->dev);
dev_err(&pdev->dev, "No memory resources\n");
return -ENXIO;
}
@ -529,8 +530,10 @@ static int __init dwc3_octeon_device_init(void)
* know the difference.
*/
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
if (IS_ERR(base)) {
put_device(&pdev->dev);
return PTR_ERR(base);
}
mutex_lock(&dwc3_octeon_clocks_mutex);
dwc3_octeon_clocks_start(&pdev->dev, (u64)base);

View File

@ -539,6 +539,7 @@ err_free_resource:
pci_free_resource_list(&host->windows);
err_remove_domain:
irq_domain_remove(domain);
irq_domain_free_fwnode(fn);
return err;
}
@ -546,8 +547,10 @@ static int bridge_remove(struct platform_device *pdev)
{
struct pci_bus *bus = platform_get_drvdata(pdev);
struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
struct fwnode_handle *fn = bc->domain->fwnode;
irq_domain_remove(bc->domain);
irq_domain_free_fwnode(fn);
pci_lock_rescan_remove();
pci_stop_root_bus(bus);
pci_remove_root_bus(bus);

View File

@ -26,6 +26,67 @@
#define __smp_rmb() mb()
#define __smp_wmb() mb()
#define __smp_store_release(p, v) \
do { \
typeof(p) __p = (p); \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile("stb,ma %0,0(%1)" \
: : "r"(*(__u8 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 2: \
asm volatile("sth,ma %0,0(%1)" \
: : "r"(*(__u16 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 4: \
asm volatile("stw,ma %0,0(%1)" \
: : "r"(*(__u32 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
case 8: \
if (IS_ENABLED(CONFIG_64BIT)) \
asm volatile("std,ma %0,0(%1)" \
: : "r"(*(__u64 *)__u.__c), "r"(__p) \
: "memory"); \
break; \
} \
} while (0)
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile("ldb,ma 0(%1),%0" \
: "=r"(*(__u8 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 2: \
asm volatile("ldh,ma 0(%1),%0" \
: "=r"(*(__u16 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 4: \
asm volatile("ldw,ma 0(%1),%0" \
: "=r"(*(__u32 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
case 8: \
if (IS_ENABLED(CONFIG_64BIT)) \
asm volatile("ldd,ma 0(%1),%0" \
: "=r"(*(__u64 *)__u.__c) : "r"(__p) \
: "memory"); \
break; \
} \
__u.__val; \
})
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */

View File

@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
#ifdef CONFIG_SMP
(void) __ldcw(a);
#else
mb();
#endif
*a = 1;
/* Release with ordered store. */
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)

View File

@ -454,7 +454,6 @@
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
LDCW 0(\tmp),\tmp1
b \fault
stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
@ -464,23 +463,26 @@
3:
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp,tmp1
/* Release pa_tlb_lock lock without reloading lock address.
Note that the values in the register spc are limited to
NR_SPACE_IDS (262144). Thus, the stw instruction always
stores a nonzero value even when register spc is 64 bits.
We use an ordered store to ensure all prior accesses are
performed prior to releasing the lock. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
98: or,COND(=) %r0,\spc,%r0
LDCW 0(\tmp),\tmp1
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
stw,ma \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
.macro tlb_unlock1 spc,tmp,tmp1
.macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
tlb_unlock0 \spc,\tmp,\tmp1
tlb_unlock0 \spc,\tmp
#endif
.endm
@ -1163,7 +1165,7 @@ dtlb_miss_20w:
idtlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1223,7 +1225,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1256,7 +1258,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1285,7 +1287,7 @@ dtlb_miss_20:
idtlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1313,7 +1315,7 @@ nadtlb_miss_20:
idtlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1420,7 +1422,7 @@ itlb_miss_20w:
iitlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1444,7 +1446,7 @@ naitlb_miss_20w:
iitlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1478,7 +1480,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1502,7 +1504,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1532,7 +1534,7 @@ itlb_miss_20:
iitlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1552,7 +1554,7 @@ naitlb_miss_20:
iitlbt pte,prot
tlb_unlock1 spc,t0,t1
tlb_unlock1 spc,t0
rfir
nop
@ -1582,7 +1584,7 @@ dbit_trap_20w:
idtlbt pte,prot
tlb_unlock0 spc,t0,t1
tlb_unlock0 spc,t0
rfir
nop
#else
@ -1608,7 +1610,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
tlb_unlock0 spc,t0,t1
tlb_unlock0 spc,t0
rfir
nop
@ -1628,7 +1630,7 @@ dbit_trap_20:
idtlbt pte,prot
tlb_unlock0 spc,t0,t1
tlb_unlock0 spc,t0
rfir
nop
#endif

View File

@ -640,11 +640,7 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
#ifdef CONFIG_SMP
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@ -658,11 +654,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
#ifdef CONFIG_SMP
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
@ -863,11 +855,7 @@ cas2_action:
cas2_end:
/* Free lock */
#ifdef CONFIG_SMP
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@ -877,11 +865,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
#ifdef CONFIG_SMP
98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
stw %r20, 0(%sr2,%r20)
stw,ma %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
b lws_exit

View File

@ -119,7 +119,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c
src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
ifndef CONFIG_PPC64_BOOT_WRAPPER
src-wlib-y += crtsavres.S

View File

@ -128,7 +128,7 @@ int serial_console_init(void)
dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
rc = cpm_console_init(devp, &serial_cd);
#endif
#ifdef CONFIG_PPC_MPC52XX
#ifdef CONFIG_PPC_MPC52xx
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
#endif

View File

@ -12,6 +12,8 @@
#ifdef CONFIG_PPC_PERF_CTRS
#include <asm/perf_event_server.h>
#else
static inline bool is_sier_available(void) { return false; }
#endif
#ifdef CONFIG_FSL_EMB_PERF_EVENT

View File

@ -203,7 +203,7 @@ do { \
#endif /* __powerpc64__ */
#define arch_has_single_step() (1)
#ifndef CONFIG_BOOK3S_601
#ifndef CONFIG_PPC_BOOK3S_601
#define arch_has_block_step() (true)
#else
#define arch_has_block_step() (false)

View File

@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
extern int rtas_ibm_suspend_me(u64 handle);
struct rtc_time;

View File

@ -17,7 +17,7 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
if (IS_ENABLED(CONFIG_BOOK3S_601))
if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
return 0;
return mftb();

View File

@ -842,96 +842,6 @@ static void rtas_percpu_suspend_me(void *info)
__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
}
enum rtas_cpu_state {
DOWN,
UP,
};
#ifndef CONFIG_SMP
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
cpumask_var_t cpus)
{
if (!cpumask_empty(cpus)) {
cpumask_clear(cpus);
return -EINVAL;
} else
return 0;
}
#else
/* On return cpumask will be altered to indicate CPUs changed.
* CPUs with states changed will be set in the mask,
* CPUs with status unchanged will be unset in the mask. */
static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
cpumask_var_t cpus)
{
int cpu;
int cpuret = 0;
int ret = 0;
if (cpumask_empty(cpus))
return 0;
for_each_cpu(cpu, cpus) {
struct device *dev = get_cpu_device(cpu);
switch (state) {
case DOWN:
cpuret = device_offline(dev);
break;
case UP:
cpuret = device_online(dev);
break;
}
if (cpuret < 0) {
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
__func__,
((state == UP) ? "up" : "down"),
cpu, cpuret);
if (!ret)
ret = cpuret;
if (state == UP) {
/* clear bits for unchanged cpus, return */
cpumask_shift_right(cpus, cpus, cpu);
cpumask_shift_left(cpus, cpus, cpu);
break;
} else {
/* clear bit for unchanged cpu, continue */
cpumask_clear_cpu(cpu, cpus);
}
}
cond_resched();
}
return ret;
}
#endif
int rtas_online_cpus_mask(cpumask_var_t cpus)
{
int ret;
ret = rtas_cpu_state_change_mask(UP, cpus);
if (ret) {
cpumask_var_t tmp_mask;
if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
return ret;
/* Use tmp_mask to preserve cpus mask from first failure */
cpumask_copy(tmp_mask, cpus);
rtas_offline_cpus_mask(tmp_mask);
free_cpumask_var(tmp_mask);
}
return ret;
}
int rtas_offline_cpus_mask(cpumask_var_t cpus)
{
return rtas_cpu_state_change_mask(DOWN, cpus);
}
int rtas_ibm_suspend_me(u64 handle)
{
long state;
@ -939,8 +849,6 @@ int rtas_ibm_suspend_me(u64 handle)
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
struct rtas_suspend_me_data data;
DECLARE_COMPLETION_ONSTACK(done);
cpumask_var_t offline_mask;
int cpuret;
if (!rtas_service_present("ibm,suspend-me"))
return -ENOSYS;
@ -961,9 +869,6 @@ int rtas_ibm_suspend_me(u64 handle)
return -EIO;
}
if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
return -ENOMEM;
atomic_set(&data.working, 0);
atomic_set(&data.done, 0);
atomic_set(&data.error, 0);
@ -972,24 +877,8 @@ int rtas_ibm_suspend_me(u64 handle)
lock_device_hotplug();
/* All present CPUs must be online */
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
cpuret = rtas_online_cpus_mask(offline_mask);
if (cpuret) {
pr_err("%s: Could not bring present CPUs online.\n", __func__);
atomic_set(&data.error, cpuret);
goto out;
}
cpu_hotplug_disable();
/* Check if we raced with a CPU-Offline Operation */
if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
atomic_set(&data.error, -EAGAIN);
goto out_hotplug_enable;
}
/* Call function on all CPUs. One of us will make the
* rtas call
*/
@ -1000,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle)
if (atomic_read(&data.error) != 0)
printk(KERN_ERR "Error doing global join\n");
out_hotplug_enable:
cpu_hotplug_enable();
/* Take down CPUs not online prior to suspend */
cpuret = rtas_offline_cpus_mask(offline_mask);
if (cpuret)
pr_warn("%s: Could not restore CPUs to offline state.\n",
__func__);
out:
unlock_device_hotplug();
free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */

View File

@ -682,7 +682,7 @@ int vdso_getcpu_init(void)
node = cpu_to_node(cpu);
WARN_ON_ONCE(node > 0xffff);
val = (cpu & 0xfff) | ((node & 0xffff) << 16);
val = (cpu & 0xffff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG_VDSO_WRITE, val);
get_paca()->sprg_vdso = val;

View File

@ -83,13 +83,17 @@ static int pkey_initialize(void)
scan_pkey_feature();
/*
* Let's assume 32 pkeys on P8 bare metal, if its not defined by device
* tree. We make this exception since skiboot forgot to expose this
* property on power8.
* Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
* tree. We make this exception since some version of skiboot forgot to
* expose this property on power8/9.
*/
if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
cpu_has_feature(CPU_FTRS_POWER8))
pkeys_total = 32;
if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) {
unsigned long pvr = mfspr(SPRN_PVR);
if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
pkeys_total = 32;
}
/*
* Adjust the upper limit, based on the number of bits supported by

View File

@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
cpumask_var_t offline_mask;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
return -ENOMEM;
stream_id = simple_strtoul(buf, NULL, 16);
do {
@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev,
} while (rc == -EAGAIN);
if (!rc) {
/* All present CPUs must be online */
cpumask_andnot(offline_mask, cpu_present_mask,
cpu_online_mask);
rc = rtas_online_cpus_mask(offline_mask);
if (rc) {
pr_err("%s: Could not bring present CPUs online.\n",
__func__);
goto out;
}
stop_topology_update();
rc = pm_suspend(PM_SUSPEND_MEM);
start_topology_update();
/* Take down CPUs not online prior to suspend */
if (!rtas_offline_cpus_mask(offline_mask))
pr_warn("%s: Could not restore CPUs to offline "
"state.\n", __func__);
}
stream_id = 0;
if (!rc)
rc = count;
out:
free_cpumask_var(offline_mask);
return rc;
}

View File

@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
}
EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
split_huge_pmd(vma, pmd, addr);
return 0;
}
static const struct mm_walk_ops thp_split_walk_ops = {
.pmd_entry = thp_split_walk_pmd_entry,
};
static inline void thp_split_mm(struct mm_struct *mm)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct vm_area_struct *vma;
unsigned long addr;
for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
for (addr = vma->vm_start;
addr < vma->vm_end;
addr += PAGE_SIZE)
follow_page(vma, addr, FOLL_SPLIT);
vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &thp_split_walk_ops, NULL);
}
mm->def_flags |= VM_NOHUGEPAGE;
#endif
}
#else
static inline void thp_split_mm(struct mm_struct *mm)
{
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* Remove all empty zero pages from the mapping for lazy refaulting

View File

@ -127,10 +127,6 @@ ddq_add_8:
/* generate a unique variable for ddq_add_x */
.macro setddq n
var_ddq_add = ddq_add_\n
.endm
/* generate a unique variable for xmm register */
.macro setxdata n
var_xdata = %xmm\n
@ -140,9 +136,7 @@ ddq_add_8:
.macro club name, id
.altmacro
.if \name == DDQ_DATA
setddq %\id
.elseif \name == XDATA
.if \name == XDATA
setxdata %\id
.endif
.noaltmacro
@ -165,9 +159,8 @@ ddq_add_8:
.set i, 1
.rept (by - 1)
club DDQ_DATA, i
club XDATA, i
vpaddq var_ddq_add(%rip), xcounter, var_xdata
vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
vptest ddq_low_msk(%rip), var_xdata
jnz 1f
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
@ -180,8 +173,7 @@ ddq_add_8:
vmovdqa 1*16(p_keys), xkeyA
vpxor xkey0, xdata0, xdata0
club DDQ_DATA, by
vpaddq var_ddq_add(%rip), xcounter, xcounter
vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
vptest ddq_low_msk(%rip), xcounter
jnz 1f
vpaddq ddq_high_add_1(%rip), xcounter, xcounter

View File

@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
PSHUFB_XMM %xmm2, %xmm0
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
@ -978,7 +978,7 @@ _initial_blocks_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5

View File

@ -2348,8 +2348,13 @@ static int mp_irqdomain_create(int ioapic)
static void ioapic_destroy_irqdomain(int idx)
{
struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
if (ioapics[idx].irqdomain) {
irq_domain_remove(ioapics[idx].irqdomain);
if (!cfg->dev)
irq_domain_free_fwnode(fn);
ioapics[idx].irqdomain = NULL;
}
}

View File

@ -511,7 +511,7 @@ static void do_inject(void)
*/
if (inj_type == DFR_INT_INJ) {
i_mce.status |= MCI_STATUS_DEFERRED;
i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
i_mce.status &= ~MCI_STATUS_UC;
}
/*

View File

@ -316,7 +316,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
*/
mutex_lock(&task->mm->context.lock);
ldt = task->mm->context.ldt;
if (unlikely(idx >= ldt->nr_entries))
if (unlikely(!ldt || idx >= ldt->nr_entries))
base = 0;
else
base = get_desc_base(ldt->entries + idx);

View File

@ -1377,7 +1377,7 @@ static void ioc_timer_fn(struct timer_list *timer)
* should have woken up in the last period and expire idle iocgs.
*/
list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
!iocg_is_idle(iocg))
continue;

View File

@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
(u8)access_byte_width;
}
}
/* An additional reference for the container */
acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
"RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,

View File

@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
next_object = object->buffer_field.buffer_obj;
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
next_object = object->field.region_obj;
break;
case ACPI_TYPE_LOCAL_BANK_FIELD:
next_object = object->bank_field.bank_obj;
@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
}
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_REGION:
default:

View File

@ -2325,6 +2325,8 @@ static void __exit loop_exit(void)
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
mutex_lock(&loop_ctl_mutex);
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
idr_destroy(&loop_index_idr);
@ -2332,6 +2334,8 @@ static void __exit loop_exit(void)
unregister_blkdev(LOOP_MAJOR, "loop");
misc_deregister(&loop_misc);
mutex_unlock(&loop_ctl_mutex);
}
module_init(loop_init);

View File

@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL,
.firmware = "mrvl/sd8977_uapsta.bin",
.firmware = "mrvl/sdsd8977_combo_v2.bin",
.reg = &btmrvl_reg_8977,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
.firmware = "mrvl/sdsd8997_combo_v4.bin",
.reg = &btmrvl_reg_8997,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");

View File

@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = MTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
return err;
}
fw_ptr = fw->data;
fw_size = fw->size;

View File

@ -2792,7 +2792,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@ -2800,6 +2800,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
return err;
}
fw_ptr = fw->data;
fw_size = fw->size;

View File

@ -790,7 +790,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;

View File

@ -357,7 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
hci_unregister_dev(hdev);
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev);
cancel_work_sync(&hu->write_work);

View File

@ -1371,6 +1371,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
@ -1440,8 +1444,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif

View File

@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
__free_page(page);
return -EINVAL;
}
intel_private.scratch_page_dma = dma_addr;
} else

View File

@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->cdev.owner = THIS_MODULE;
chip->cdevs.owner = THIS_MODULE;
chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!chip->work_space.context_buf) {
rc = -ENOMEM;
goto out;
}
chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!chip->work_space.session_buf) {
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
rc = -ENOMEM;
goto out;
}

View File

@ -177,6 +177,9 @@ struct tpm_header {
#define TPM_TAG_RQU_COMMAND 193
/* TPM2 specific constants. */
#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
struct stclear_flags_t {
__be16 tag;
u8 deactivated;
@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
int tpm2_init_space(struct tpm_space *space);
int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
void tpm2_flush_space(struct tpm_chip *chip);
int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,

View File

@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
}
}
int tpm2_init_space(struct tpm_space *space)
int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
{
space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
space->context_buf = kzalloc(buf_size, GFP_KERNEL);
if (!space->context_buf)
return -ENOMEM;
space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
space->session_buf = kzalloc(buf_size, GFP_KERNEL);
if (space->session_buf == NULL) {
kfree(space->context_buf);
/* Prevent caller getting a dangling pointer. */
space->context_buf = NULL;
return -ENOMEM;
}
space->buf_size = buf_size;
return 0;
}
@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
sizeof(space->context_tbl));
memcpy(&chip->work_space.session_tbl, &space->session_tbl,
sizeof(space->session_tbl));
memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
memcpy(chip->work_space.context_buf, space->context_buf,
space->buf_size);
memcpy(chip->work_space.session_buf, space->session_buf,
space->buf_size);
rc = tpm2_load_space(chip);
if (rc) {
@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->context_tbl[i],
space->context_buf, PAGE_SIZE,
space->context_buf, space->buf_size,
&offset);
if (rc == -ENOENT) {
space->context_tbl[i] = 0;
@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
continue;
rc = tpm2_save_context(chip, space->session_tbl[i],
space->session_buf, PAGE_SIZE,
space->session_buf, space->buf_size,
&offset);
if (rc == -ENOENT) {
/* handle error saving session, just forget it */
space->session_tbl[i] = 0;
@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
sizeof(space->context_tbl));
memcpy(&space->session_tbl, &chip->work_space.session_tbl,
sizeof(space->session_tbl));
memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
memcpy(space->context_buf, chip->work_space.context_buf,
space->buf_size);
memcpy(space->session_buf, chip->work_space.session_buf,
space->buf_size);
return 0;
out:

View File

@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
if (priv == NULL)
return -ENOMEM;
rc = tpm2_init_space(&priv->space);
rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
kfree(priv);
return -ENOMEM;

View File

@ -155,6 +155,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev)
for (entry = table; entry->name; entry++)
maxbit = max_t(u8, maxbit, entry->bit);
maxbit++;
hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
GFP_KERNEL);

View File

@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
{
int ret;
unsigned long min_rate, max_rate;
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
sclk->hw.init = &init;
ret = devm_clk_hw_register(dev, &sclk->hw);
if (!ret)
clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
sclk->info->range.max_rate);
if (ret)
return ret;
if (sclk->info->rate_discrete) {
int num_rates = sclk->info->list.num_rates;
if (num_rates <= 0)
return -EINVAL;
min_rate = sclk->info->list.rates[0];
max_rate = sclk->info->list.rates[num_rates - 1];
} else {
min_rate = sclk->info->range.min_rate;
max_rate = sclk->info->range.max_rate;
}
clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
return ret;
}

View File

@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
!= (c->aggr_state & BIT(state));
}
static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
struct tcs_cmd *cmd, bool wait)
{
if (wait)
return rpmh_write(c->dev, state, cmd, 1);
return rpmh_write_async(c->dev, state, cmd, 1);
}
static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
{
struct tcs_cmd cmd = { 0 };
u32 cmd_state, on_val;
enum rpmh_state state = RPMH_SLEEP_STATE;
int ret;
bool wait;
cmd.addr = c->res_addr;
cmd_state = c->aggr_state;
@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
if (cmd_state & BIT(state))
cmd.data = on_val;
ret = rpmh_write_async(c->dev, state, &cmd, 1);
wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
ret = clk_rpmh_send(c, state, &cmd, wait);
if (ret) {
dev_err(c->dev, "set %s state of %s failed: (%d)\n",
!state ? "sleep" :
@ -267,7 +278,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
cmd.addr = c->res_addr;
cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
if (ret) {
dev_err(c->dev, "set active state of %s failed: (%d)\n",
c->res_name, ret);

View File

@ -41,6 +41,7 @@ config ARM_ARMADA_37XX_CPUFREQ
config ARM_ARMADA_8K_CPUFREQ
tristate "Armada 8K CPUFreq driver"
depends on ARCH_MVEBU && CPUFREQ_DT
select ARMADA_AP_CPU_CLK
help
This enables the CPUFreq driver support for Marvell
Armada8k SOCs.

View File

@ -456,6 +456,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
memset(&pdata, 0, sizeof(pdata));
pdata.suspend = armada37xx_cpufreq_suspend;
pdata.resume = armada37xx_cpufreq_resume;

View File

@ -616,6 +616,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
static struct cpufreq_governor *get_governor(const char *str_governor)
{
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
if (!t)
goto unlock;
if (!try_module_get(t->owner))
t = NULL;
unlock:
mutex_unlock(&cpufreq_governor_mutex);
return t;
}
static unsigned int cpufreq_parse_policy(char *str_governor)
{
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
@ -635,28 +653,14 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
{
struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex);
t = get_governor(str_governor);
if (t)
return t;
t = find_governor(str_governor);
if (!t) {
int ret;
if (request_module("cpufreq_%s", str_governor))
return NULL;
mutex_unlock(&cpufreq_governor_mutex);
ret = request_module("cpufreq_%s", str_governor);
if (ret)
return NULL;
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
}
if (t && !try_module_get(t->owner))
t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
return t;
return get_governor(str_governor);
}
/**
@ -810,12 +814,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
goto out;
}
mutex_lock(&cpufreq_governor_mutex);
for_each_governor(t) {
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
break;
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
mutex_unlock(&cpufreq_governor_mutex);
out:
i += sprintf(&buf[i], "\n");
return i;
@ -1053,15 +1059,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
struct cpufreq_governor *def_gov = cpufreq_default_governor();
struct cpufreq_governor *gov = NULL;
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
int ret;
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
gov = find_governor(policy->last_governor);
gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
} else if (def_gov) {
gov = def_gov;
__module_get(gov->owner);
} else {
return -ENODATA;
}
@ -1084,7 +1092,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
return -ENODATA;
}
return cpufreq_set_policy(policy, gov, pol);
ret = cpufreq_set_policy(policy, gov, pol);
if (gov)
module_put(gov->owner);
return ret;
}
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)

View File

@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
int status;
memset(req_info, 0, sizeof(struct cpt_request_info));
req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
memset(fctx, 0, sizeof(struct fc_context));
create_input_list(req, enc, enc_iv_len);
create_output_list(req, enc_iv_len);

View File

@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup gather (input) components */
g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->gather_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup scatter (output) components */
s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->scatter_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Create and initialize DPTR */
info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->in_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
}
/* Create and initialize RPTR */
info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->out_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
struct cpt_vq_command vq_cmd;
union cpt_inst_s cptinst;
info = kzalloc(sizeof(*info), GFP_KERNEL);
info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
return -ENOMEM;
@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
* Get buffer for union cpt_res_s response
* structure and its physical address
*/
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
ret = -ENOMEM;

View File

@ -62,6 +62,8 @@ struct cpt_request_info {
union ctrl_info ctrl; /* User control information */
struct cptvf_request req; /* Request Information (Core specific) */
bool may_sleep;
struct buf_ptr in[MAX_BUF_CNT];
struct buf_ptr out[MAX_BUF_CNT];

View File

@ -469,6 +469,7 @@ struct ccp_sg_workarea {
unsigned int sg_used;
struct scatterlist *dma_sg;
struct scatterlist *dma_sg_head;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;

View File

@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
}
@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
return 0;
wa->dma_sg = sg;
wa->dma_sg_head = sg;
wa->dma_dev = dev;
wa->dma_dir = dma_dir;
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
{
unsigned int nbytes = min_t(u64, len, wa->bytes_left);
unsigned int sg_combined_len = 0;
if (!wa->sg)
return;
wa->sg_used += nbytes;
wa->bytes_left -= nbytes;
if (wa->sg_used == wa->sg->length) {
wa->sg = sg_next(wa->sg);
if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
/* Advance to the next DMA scatterlist entry */
wa->dma_sg = sg_next(wa->dma_sg);
/* In the case that the DMA mapped scatterlist has entries
* that have been merged, the non-DMA mapped scatterlist
* must be advanced multiple times for each merged entry.
* This ensures that the current non-DMA mapped entry
* corresponds to the current DMA mapped entry.
*/
do {
sg_combined_len += wa->sg->length;
wa->sg = sg_next(wa->sg);
} while (wa->sg_used > sg_combined_len);
wa->sg_used = 0;
}
}
@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
/* Update the structures and generate the count */
buf_count = 0;
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
dm_wa->length - buf_count);
nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
* and destination. The resulting len values will always be <= UINT_MAX
* because the dma length is an unsigned int.
*/
sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
if (dst) {
sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len);
} else {
@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough data in the sg element, but we need to
* adjust for any previously copied data
*/
op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
op->src.u.dma.offset = src->sg_wa.sg_used;
op->src.u.dma.length = op_len & ~(block_size - 1);
@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough room in the sg element, but we need to
* adjust for any previously used area
*/
op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
op->dst.u.dma.offset = dst->sg_wa.sg_used;
op->dst.u.dma.length = op->src.u.dma.length;
}
@ -2028,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.sg_wa.sg_used = 0;
for (i = 1; i <= src.sg_wa.dma_count; i++) {
if (!dst.sg_wa.sg ||
(dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
(sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
ret = -EINVAL;
goto e_dst;
}
@ -2054,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
dst.sg_wa.sg_used += src.sg_wa.sg->length;
if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
dst.sg_wa.sg_used = 0;
}

View File

@ -163,7 +163,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
skcipher_alg.base);
struct device *dev = drvdata_to_dev(cc_alg->drvdata);
unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
int rc = 0;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
@ -175,10 +174,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
ctx_p->flow_mode = cc_alg->flow_mode;
ctx_p->drvdata = cc_alg->drvdata;
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
if (IS_ERR(ctx_p->shash_tfm)) {
dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
return PTR_ERR(ctx_p->shash_tfm);
}
}
/* Allocate key buffer, cache line aligned */
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
return -ENOMEM;
goto free_shash;
dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
ctx_p->user.key);
@ -190,21 +198,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
goto free_key;
}
dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
if (IS_ERR(ctx_p->shash_tfm)) {
dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
return PTR_ERR(ctx_p->shash_tfm);
}
}
return 0;
return rc;
free_key:
kfree(ctx_p->user.key);
free_shash:
crypto_free_shash(ctx_p->shash_tfm);
return -ENOMEM;
}
static void cc_cipher_exit(struct crypto_tfm *tfm)

View File

@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
dma_addr_t *psec_sgl,
struct scatterlist *sgl,
int count,
struct sec_dev_info *info)
struct sec_dev_info *info,
gfp_t gfp)
{
struct sec_hw_sgl *sgl_current = NULL;
struct sec_hw_sgl *sgl_next;
@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
sge_index = i % SEC_MAX_SGE_NUM;
if (sge_index == 0) {
sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
GFP_KERNEL, &sgl_next_dma);
gfp, &sgl_next_dma);
if (!sgl_next) {
ret = -ENOMEM;
goto err_free_hw_sgls;
@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
}
static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
int *steps)
int *steps, gfp_t gfp)
{
size_t *sizes;
int i;
/* Split into suitable sized blocks */
*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
sizes = kcalloc(*steps, sizeof(*sizes), gfp);
if (!sizes)
return -ENOMEM;
@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
int steps, struct scatterlist ***splits,
int **splits_nents,
int sgl_len_in,
struct device *dev)
struct device *dev, gfp_t gfp)
{
int ret, count;
@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
if (!count)
return -EINVAL;
*splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
*splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
if (!*splits) {
ret = -ENOMEM;
goto err_unmap_sg;
}
*splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
*splits_nents = kcalloc(steps, sizeof(int), gfp);
if (!*splits_nents) {
ret = -ENOMEM;
goto err_free_splits;
@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
/* output the scatter list before and after this */
ret = sg_split(sgl, count, 0, steps, split_sizes,
*splits, *splits_nents, GFP_KERNEL);
*splits, *splits_nents, gfp);
if (ret) {
ret = -ENOMEM;
goto err_free_splits_nents;
@ -630,13 +631,13 @@ static struct sec_request_el
int el_size, bool different_dest,
struct scatterlist *sgl_in, int n_ents_in,
struct scatterlist *sgl_out, int n_ents_out,
struct sec_dev_info *info)
struct sec_dev_info *info, gfp_t gfp)
{
struct sec_request_el *el;
struct sec_bd_info *req;
int ret;
el = kzalloc(sizeof(*el), GFP_KERNEL);
el = kzalloc(sizeof(*el), gfp);
if (!el)
return ERR_PTR(-ENOMEM);
el->el_length = el_size;
@ -668,7 +669,7 @@ static struct sec_request_el
el->sgl_in = sgl_in;
ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
n_ents_in, info);
n_ents_in, info, gfp);
if (ret)
goto err_free_el;
@ -679,7 +680,7 @@ static struct sec_request_el
el->sgl_out = sgl_out;
ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
el->sgl_out,
n_ents_out, info);
n_ents_out, info, gfp);
if (ret)
goto err_free_hw_sgl_in;
@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
bool split = skreq->src != skreq->dst;
gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_in = sg_nents(skreq->src);
ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
&steps);
&steps, gfp);
if (ret)
return ret;
sec_req->num_elements = steps;
ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
&splits_in_nents, sec_req->len_in,
info->dev);
info->dev, gfp);
if (ret)
goto err_free_split_sizes;
@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
sec_req->len_out, info->dev);
sec_req->len_out, info->dev, gfp);
if (ret)
goto err_unmap_in_sg;
}
@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
splits_in[i], splits_in_nents[i],
split ? splits_out[i] : NULL,
split ? splits_out_nents[i] : 0,
info);
info, gfp);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;

View File

@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
}
return 0;
out_err:
/* Do not free the list head unless we allocated it. */
tail_old = tail_old->next;
if (flag) {
kfree(*init_tab_base);
*init_tab_base = NULL;
}
while (tail_old) {
mem_init = tail_old->next;
kfree(tail_old);
tail_old = mem_init;
}
if (flag)
kfree(*init_tab_base);
return -ENOMEM;
}

View File

@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* Error exit stack */
err_kobj_reg:
kobject_put(&edac_dev->kobj);
module_put(edac_dev->owner);
err_out:

View File

@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
/* Error unwind statck */
kobject_init_and_add_fail:
kfree(edac_pci_top_main_kobj);
kobject_put(edac_pci_top_main_kobj);
kzalloc_fail:
module_put(THIS_MODULE);

View File

@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
domains[i] = &scmi_pd->genpd;
if (handle->power_ops->state_get(handle, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
scmi_pd->domain = i;
scmi_pd->handle = handle;
@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
if (handle->power_ops->state_get(handle, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;

View File

@ -404,7 +404,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);
if (irq_src)
amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
@ -539,8 +541,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(ring);
}
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
@ -576,8 +579,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
}
/* disable the interrupt */
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
}
@ -603,8 +607,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
continue;
/* enable the interrupt */
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
if (ring->fence_drv.irq_src)
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
}

View File

@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
&pp_clk_info);
else if (adev->smu.funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
else
return false;
if (ret)
return false;

View File

@ -171,7 +171,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
chip_name = "navi12";
break;
default:
BUG();
dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);

View File

@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm)
const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
unsigned long crtcs = BIT(drm->mode_config.num_crtc);
unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |

View File

@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
{
u8 ret;
u8 ret = 0;
sii8620_read_buf(ctx, addr, &ret, 1);
return ret;

View File

@ -647,6 +647,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
buf[i]);
}
/* Clear old status bits before start so we don't get confused */
regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
AUX_IRQ_STATUS_NAT_I2C_FAIL |
AUX_IRQ_STATUS_AUX_RPLY_TOUT |
AUX_IRQ_STATUS_AUX_SHORT);
regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,

View File

@ -337,13 +337,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
buf[len] = '\0';
if (!strcmp(buf, "on"))
if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
else if (!strcmp(buf, "digital"))
else if (sysfs_streq(buf, "digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
else if (!strcmp(buf, "off"))
else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
else if (!strcmp(buf, "unspecified"))
else if (sysfs_streq(buf, "unspecified"))
connector->force = DRM_FORCE_UNSPECIFIED;
else
return -EINVAL;

View File

@ -710,6 +710,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
if (!objs)
return -ENOMEM;
*objs_out = objs;
handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
@ -723,8 +725,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
}
ret = objects_lookup(filp, handles, count, objs);
*objs_out = objs;
out:
kvfree(handles);
return ret;

View File

@ -1029,11 +1029,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
*/
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
{
u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
scanline & 0xff };
u8 payload[2] = { scanline >> 8, scanline & 0xff };
ssize_t err;
err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
sizeof(payload));
if (err < 0)
return err;

View File

@ -713,7 +713,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
dev_err(gpu->dev, "Failed to enable GPU power domain\n");
return ret;
goto pm_put;
}
etnaviv_hw_identify(gpu);
@ -802,6 +802,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
fail:
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
@ -842,7 +843,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
return ret;
goto pm_put;
dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
@ -965,6 +966,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
ret = 0;
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
return ret;
@ -978,7 +980,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
dev_err(gpu->dev, "recover hung GPU!\n");
if (pm_runtime_get_sync(gpu->dev) < 0)
return;
goto pm_put;
mutex_lock(&gpu->lock);
@ -997,6 +999,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
}
@ -1269,8 +1272,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
if (!submit->runtime_resumed) {
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_noidle(gpu->dev);
return NULL;
}
submit->runtime_resumed = true;
}
@ -1287,6 +1292,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
pm_runtime_put_noidle(gpu->dev);
return NULL;
}
@ -1457,7 +1463,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
if (gpu->clk_bus) {
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
return ret;
goto disable_clk_reg;
}
if (gpu->clk_core) {
@ -1480,6 +1486,9 @@ disable_clk_core:
disable_clk_bus:
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
disable_clk_reg:
if (gpu->clk_reg)
clk_disable_unprepare(gpu->clk_reg);
return ret;
}

View File

@ -212,9 +212,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
if (!pdev->dev.of_node)
return -ENODEV;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi = dev_get_drvdata(dev);
memset(hdmi, 0, sizeof(*hdmi));
match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
plat_data = match->data;
@ -239,8 +238,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
platform_set_drvdata(pdev, hdmi);
hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
/*
@ -270,6 +267,14 @@ static const struct component_ops dw_hdmi_imx_ops = {
static int dw_hdmi_imx_probe(struct platform_device *pdev)
{
struct imx_hdmi *hdmi;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
platform_set_drvdata(pdev, hdmi);
return component_add(&pdev->dev, &dw_hdmi_imx_ops);
}

View File

@ -281,9 +281,10 @@ static void imx_drm_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
drm_dev_put(drm);

View File

@ -593,9 +593,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
int ret;
int i;
imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
if (!imx_ldb)
return -ENOMEM;
imx_ldb = dev_get_drvdata(dev);
memset(imx_ldb, 0, sizeof(*imx_ldb));
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
@ -703,8 +702,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
}
dev_set_drvdata(dev, imx_ldb);
return 0;
free_child:
@ -736,6 +733,14 @@ static const struct component_ops imx_ldb_ops = {
static int imx_ldb_probe(struct platform_device *pdev)
{
struct imx_ldb *imx_ldb;
imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
if (!imx_ldb)
return -ENOMEM;
platform_set_drvdata(pdev, imx_ldb);
return component_add(&pdev->dev, &imx_ldb_ops);
}

View File

@ -494,6 +494,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
return 0;
}
static void imx_tve_disable_regulator(void *data)
{
struct imx_tve *tve = data;
regulator_disable(tve->dac_reg);
}
static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
{
return (reg % 4 == 0) && (reg <= 0xdc);
@ -546,9 +553,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
int irq;
int ret;
tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
if (!tve)
return -ENOMEM;
tve = dev_get_drvdata(dev);
memset(tve, 0, sizeof(*tve));
tve->dev = dev;
spin_lock_init(&tve->lock);
@ -618,6 +624,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
if (ret)
return ret;
}
tve->clk = devm_clk_get(dev, "tve");
@ -659,27 +668,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
dev_set_drvdata(dev, tve);
return 0;
}
static void imx_tve_unbind(struct device *dev, struct device *master,
void *data)
{
struct imx_tve *tve = dev_get_drvdata(dev);
if (!IS_ERR(tve->dac_reg))
regulator_disable(tve->dac_reg);
}
static const struct component_ops imx_tve_ops = {
.bind = imx_tve_bind,
.unbind = imx_tve_unbind,
};
static int imx_tve_probe(struct platform_device *pdev)
{
struct imx_tve *tve;
tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
if (!tve)
return -ENOMEM;
platform_set_drvdata(pdev, tve);
return component_add(&pdev->dev, &imx_tve_ops);
}

View File

@ -438,21 +438,13 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
struct ipu_client_platformdata *pdata = dev->platform_data;
struct drm_device *drm = data;
struct ipu_crtc *ipu_crtc;
int ret;
ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
return -ENOMEM;
ipu_crtc = dev_get_drvdata(dev);
memset(ipu_crtc, 0, sizeof(*ipu_crtc));
ipu_crtc->dev = dev;
ret = ipu_crtc_init(ipu_crtc, pdata, drm);
if (ret)
return ret;
dev_set_drvdata(dev, ipu_crtc);
return 0;
return ipu_crtc_init(ipu_crtc, pdata, drm);
}
static void ipu_drm_unbind(struct device *dev, struct device *master,
@ -474,6 +466,7 @@ static const struct component_ops ipu_crtc_ops = {
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ipu_crtc *ipu_crtc;
int ret;
if (!dev->platform_data)
@ -483,6 +476,12 @@ static int ipu_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
return -ENOMEM;
dev_set_drvdata(dev, ipu_crtc);
return component_add(dev, &ipu_crtc_ops);
}

View File

@ -204,9 +204,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
imxpd = dev_get_drvdata(dev);
memset(imxpd, 0, sizeof(*imxpd));
edidp = of_get_property(np, "edid", &imxpd->edid_len);
if (edidp)
@ -236,8 +235,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
dev_set_drvdata(dev, imxpd);
return 0;
}
@ -259,6 +256,14 @@ static const struct component_ops imx_pd_ops = {
static int imx_pd_probe(struct platform_device *pdev)
{
struct imx_parallel_display *imxpd;
imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
platform_set_drvdata(pdev, imxpd);
return component_add(&pdev->dev, &imx_pd_ops);
}

View File

@ -713,10 +713,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/*
* "enable" the GX power domain which won't actually do anything but it
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get_sync(gmu->gxpd);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
return ret;
}
@ -752,19 +761,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Set the GPU to the highest power frequency */
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
/*
* "enable" the GX power domain which won't actually do anything but it
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get(gmu->gxpd);
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
disable_irq(gmu->gmu_irq);
a6xx_rpmh_stop(gmu);
pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
}

View File

@ -381,7 +381,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
if (!fevent) {
DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
return;
}

View File

@ -977,10 +977,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj,
bool struct_mutex_locked)
struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
@ -1006,15 +1004,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
if (struct_mutex_locked) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} else {
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
}
*obj = &msm_obj->base;
return 0;
@ -1024,6 +1013,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags, bool struct_mutex_locked)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
bool use_vram = false;
int ret;
@ -1044,14 +1034,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (size == 0)
return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
goto fail;
msm_obj = to_msm_bo(obj);
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
@ -1086,6 +1077,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
if (struct_mutex_locked) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
} else {
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
}
return obj;
fail:
@ -1108,6 +1108,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
uint32_t size;
@ -1121,7 +1122,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
goto fail;
@ -1146,6 +1147,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
}
mutex_unlock(&msm_obj->lock);
mutex_lock(&dev->struct_mutex);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
mutex_unlock(&dev->struct_mutex);
return obj;
fail:

View File

@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
{
u32 mode = 0x00;
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
if (asyh->base.depth > asyh->or.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = asyc->dither.mode;
}
if (asyc->dither.mode) {
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
if (asyh->base.depth > asyh->or.bpc * 3)
mode = DITHERING_MODE_DYNAMIC2X2;
} else {
mode = asyc->dither.mode;
}
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
if (asyh->or.bpc >= 8)
mode |= DITHERING_DEPTH_8BPC;
} else {
mode |= asyc->dither.depth;
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
if (asyh->or.bpc >= 8)
mode |= DITHERING_DEPTH_8BPC;
} else {
mode |= asyc->dither.depth;
}
}
asyh->dither.enable = mode;

View File

@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
int ret;
ret = pm_runtime_get_sync(drm->dev->dev);
if (ret < 0 && ret != -EACCES)
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(drm->dev->dev);
return ret;
}
seq_printf(m, "0x%08x\n",
nvif_rd32(&drm->client.device.object, 0x101000));

View File

@ -1052,8 +1052,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
/* need to bring up power immediately if opening device */
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
@ -1135,8 +1137,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
case DRM_NOUVEAU_NVIF:

View File

@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int ret;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0 && ret != -EACCES))
if (WARN_ON(ret < 0 && ret != -EACCES)) {
pm_runtime_put_autosuspend(dev);
return;
}
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);

View File

@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
/*
* A failing ttm_dma_tt_init() will call ttm_tt_destroy()
* and thus our nouveau_sgdma_destroy() hook, so we don't need
* to free nvbe here.
*/
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
kfree(nvbe);
return NULL;
}
return &nvbe->ttm.ttm;
}

View File

@ -1935,7 +1935,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
static const struct panel_desc lg_lb070wv8 = {
.modes = &lg_lb070wv8_mode,
.num_modes = 1,
.bpc = 16,
.bpc = 8,
.size = {
.width = 151,
.height = 91,

View File

@ -4366,7 +4366,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
}
j++;
if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
if (!pi->mem_gddr5) {

View File

@ -631,8 +631,10 @@ radeon_crtc_set_config(struct drm_mode_set *set,
dev = set->crtc->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
ret = drm_crtc_helper_set_config(set, ctx);

View File

@ -174,12 +174,7 @@ int radeon_no_wb;
int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
#ifdef __powerpc__
/* Default to PCI on PowerPC (fdo #95017) */
int radeon_agpmode = -1;
#else
int radeon_agpmode = 0;
#endif
int radeon_vram_limit = 0;
int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
@ -555,8 +550,10 @@ long radeon_drm_ioctl(struct file *filp,
long ret;
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
ret = drm_ioctl(filp, cmd, arg);

View File

@ -638,8 +638,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
if (r < 0) {
pm_runtime_put_autosuspend(dev->dev);
return r;
}
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {

View File

@ -424,9 +424,12 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
DRM_DEBUG_DRIVER("\n");
pm_runtime_get_sync(ddev->dev);
/* Sets the background color value */
reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK);

View File

@ -143,12 +143,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
int i;
for (i = 0; i < timings->num_timings; i++) {
struct drm_display_mode *mode = drm_mode_create(dev);
struct drm_display_mode *mode;
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
mode = drm_mode_create(dev);
if (!mode)
break;
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;

Some files were not shown because too many files have changed in this diff Show More