This is the 4.9.127 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlucuBEACgkQONu9yGCS aT4HlBAAnXzLh/c8dKPdWzU3Bx6L9If7syeW0Gl06AtGsUrVqrggOnumdXRgPV3y /4Br6piwrpjHP9P7m/pS03Ol3+DvD4JKhMRFr0VzGwk4e32wZ8cNd7qx5RF/4z9s tpebYJVJoRRmefi1EJyj+NAGgDCk+7A1pzW3W2NK/mirQdkHywfLJHhQDyHgmWQy jyDpz2ihNdorKHN8ogK7FYQ7JZC9tnooB4GZFObaB8nkaeMaRGD2V2jrHRi8a4tv 5Bm9HmlGJOqtsstsr1Em6W+UNRKfztfDmYuk+H9xCo8odCSHeyx30nUDxjp6Bo2f OvC/x1x6JIVNsoAgzlRZKMJR1Lv0IK3SEVPrecOOSWeardtt92JUQjoLaLmW03xR dlnXZyDeO75Qbdgo/5b/d6RiVNGSv0CPDOQSREDMX6Rvf9P8ezceF/iDz0SrH9Zm GkjQCQ2yyqrrSID5baB562AHszBa+rdqgp4RBheE+M5s7UALL09tFJquaijZ8jvO dMs6f847MeMARFIGLNKff9cvWI2zanTwOPX+E3rcj88rN79jvIsyD/hDzkqHoR7J Rhb2K7478gC1N2XTOu97hNmB9qTNIn9U1hvS/f2d7SGyjOU9WR+XgCgDUWZsZnPP H4eeblr9iaCUHI/4Q+nWUuQPKnCdGb9Ay9lyq9uqHIQBz+uVZ4I= =hV8i -----END PGP SIGNATURE----- Merge tag 'v4.9.127' into imx_4.9.x_2.3.0_ga This is the 4.9.127 stable release Conflicts: drivers/gpu/drm/imx/imx-ldb.c drivers/staging/android/ion/ion_priv.h
This commit is contained in:
commit
ded0ac8f40
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 123
|
||||
SUBLEVEL = 127
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
|
|||
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
|
||||
{
|
||||
int error;
|
||||
char tmp[5 * 32];
|
||||
|
||||
down_read(&uts_sem);
|
||||
error = -EFAULT;
|
||||
if (copy_to_user(name + 0, utsname()->sysname, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 32, utsname()->nodename, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 64, utsname()->release, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 96, utsname()->version, 32))
|
||||
goto out;
|
||||
if (copy_to_user(name + 128, utsname()->machine, 32))
|
||||
goto out;
|
||||
memcpy(tmp + 0 * 32, utsname()->sysname, 32);
|
||||
memcpy(tmp + 1 * 32, utsname()->nodename, 32);
|
||||
memcpy(tmp + 2 * 32, utsname()->release, 32);
|
||||
memcpy(tmp + 3 * 32, utsname()->version, 32);
|
||||
memcpy(tmp + 4 * 32, utsname()->machine, 32);
|
||||
up_read(&uts_sem);
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
up_read(&uts_sem);
|
||||
return error;
|
||||
if (copy_to_user(name, tmp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE0(getpagesize)
|
||||
|
@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
|
|||
*/
|
||||
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
|
||||
{
|
||||
unsigned len;
|
||||
int i;
|
||||
int len, err = 0;
|
||||
char *kname;
|
||||
char tmp[32];
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, name, namelen))
|
||||
return -EFAULT;
|
||||
|
||||
len = namelen;
|
||||
if (len > 32)
|
||||
len = 32;
|
||||
if (namelen < 0 || namelen > 32)
|
||||
namelen = 32;
|
||||
|
||||
down_read(&uts_sem);
|
||||
for (i = 0; i < len; ++i) {
|
||||
__put_user(utsname()->domainname[i], name + i);
|
||||
if (utsname()->domainname[i] == '\0')
|
||||
break;
|
||||
}
|
||||
kname = utsname()->domainname;
|
||||
len = strnlen(kname, namelen);
|
||||
len = min(len + 1, namelen);
|
||||
memcpy(tmp, kname, len);
|
||||
up_read(&uts_sem);
|
||||
|
||||
if (copy_to_user(name, tmp, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
|||
};
|
||||
unsigned long offset;
|
||||
const char *res;
|
||||
long len, err = -EINVAL;
|
||||
long len;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
offset = command-1;
|
||||
if (offset >= ARRAY_SIZE(sysinfo_table)) {
|
||||
/* Digital UNIX has a few unpublished interfaces here */
|
||||
printk("sysinfo(%d)", command);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
|||
len = strlen(res)+1;
|
||||
if ((unsigned long)len > (unsigned long)count)
|
||||
len = count;
|
||||
if (copy_to_user(buf, res, len))
|
||||
err = -EFAULT;
|
||||
else
|
||||
err = 0;
|
||||
memcpy(tmp, res, len);
|
||||
up_read(&uts_sem);
|
||||
out:
|
||||
return err;
|
||||
if (copy_to_user(buf, tmp, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
|
||||
|
|
|
@ -18,7 +18,7 @@ endif
|
|||
|
||||
KBUILD_DEFCONFIG := nsim_700_defconfig
|
||||
|
||||
cflags-y += -fno-common -pipe -fno-builtin -D__linux__
|
||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
|
||||
|
||||
|
@ -141,16 +141,3 @@ dtbs: scripts
|
|||
|
||||
archclean:
|
||||
$(Q)$(MAKE) $(clean)=$(boot)
|
||||
|
||||
# Hacks to enable final link due to absence of link-time branch relexation
|
||||
# and gcc choosing optimal(shorter) branches at -O3
|
||||
#
|
||||
# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
|
||||
# However lib/decompress_inflate.o (.init.text) calls
|
||||
# zlib_inflate_workspacesize (.text) causing relocation errors.
|
||||
# Thus forcing all exten calls in this file to be long calls
|
||||
export CFLAGS_decompress_inflate.o = -mmedium-calls
|
||||
export CFLAGS_initramfs.o = -mmedium-calls
|
||||
ifdef CONFIG_SMP
|
||||
export CFLAGS_core.o = -mmedium-calls
|
||||
endif
|
||||
|
|
|
@ -17,8 +17,11 @@
|
|||
#ifndef __ASM_ARC_UDELAY_H
|
||||
#define __ASM_ARC_UDELAY_H
|
||||
|
||||
#include <asm-generic/types.h>
|
||||
#include <asm/param.h> /* HZ */
|
||||
|
||||
extern unsigned long loops_per_jiffy;
|
||||
|
||||
static inline void __delay(unsigned long loops)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
|
|
|
@ -34,9 +34,7 @@ struct machine_desc {
|
|||
const char *name;
|
||||
const char **dt_compat;
|
||||
void (*init_early)(void);
|
||||
#ifdef CONFIG_SMP
|
||||
void (*init_per_cpu)(unsigned int);
|
||||
#endif
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
|
||||
|
|
|
@ -31,10 +31,10 @@ void __init init_IRQ(void)
|
|||
/* a SMP H/w block could do IPI IRQ request here */
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(smp_processor_id());
|
||||
#endif
|
||||
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -44,7 +44,8 @@ SYSCALL_DEFINE0(arc_gettls)
|
|||
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
int uval = -EFAULT;
|
||||
u32 uval;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
||||
|
@ -57,23 +58,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
/* Z indicates to userspace if operation succeded */
|
||||
regs->status32 &= ~STATUS_Z_MASK;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
|
||||
if (!ret)
|
||||
goto fail;
|
||||
|
||||
again:
|
||||
preempt_disable();
|
||||
|
||||
if (__get_user(uval, uaddr))
|
||||
goto done;
|
||||
ret = __get_user(uval, uaddr);
|
||||
if (ret)
|
||||
goto fault;
|
||||
|
||||
if (uval == expected) {
|
||||
if (!__put_user(new, uaddr))
|
||||
regs->status32 |= STATUS_Z_MASK;
|
||||
}
|
||||
if (uval != expected)
|
||||
goto out;
|
||||
|
||||
done:
|
||||
ret = __put_user(new, uaddr);
|
||||
if (ret)
|
||||
goto fault;
|
||||
|
||||
regs->status32 |= STATUS_Z_MASK;
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
return uval;
|
||||
|
||||
fault:
|
||||
preempt_enable();
|
||||
|
||||
return uval;
|
||||
if (unlikely(ret != -EFAULT))
|
||||
goto fail;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
|
||||
FAULT_FLAG_WRITE, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (likely(!ret))
|
||||
goto again;
|
||||
|
||||
fail:
|
||||
force_sig(SIGSEGV, current);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
|
|
|
@ -840,7 +840,7 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
unsigned int paddr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t paddr = pfn << PAGE_SHIFT;
|
||||
|
||||
u_vaddr &= PAGE_MASK;
|
||||
|
||||
|
@ -860,8 +860,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
|
|||
unsigned long u_vaddr)
|
||||
{
|
||||
/* TBD: do we really need to clear the kernel mapping */
|
||||
__flush_dcache_page(page_address(page), u_vaddr);
|
||||
__flush_dcache_page(page_address(page), page_address(page));
|
||||
__flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
|
||||
__flush_dcache_page((phys_addr_t)page_address(page),
|
||||
(phys_addr_t)page_address(page));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#error "Incorrect ctop.h include"
|
||||
#endif
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <soc/nps/common.h>
|
||||
|
||||
/* core auxiliary registers */
|
||||
|
|
|
@ -74,6 +74,11 @@
|
|||
};
|
||||
};
|
||||
|
||||
/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
|
||||
&usb_otg_hs {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&iva {
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -533,6 +533,8 @@
|
|||
|
||||
touchscreen-size-x = <480>;
|
||||
touchscreen-size-y = <272>;
|
||||
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
tlv320aic3106: tlv320aic3106@1b {
|
||||
|
|
|
@ -128,7 +128,7 @@
|
|||
reg = <0x18008000 0x100>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-frequency = <100000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -157,7 +157,7 @@
|
|||
reg = <0x1800b000 0x100>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-frequency = <100000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -168,7 +168,7 @@
|
|||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
linux,pci-domain = <0>;
|
||||
|
||||
|
@ -190,10 +190,10 @@
|
|||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 97 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 98 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 99 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -203,7 +203,7 @@
|
|||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
linux,pci-domain = <1>;
|
||||
|
||||
|
@ -225,10 +225,10 @@
|
|||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 103 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 104 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 105 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -288,7 +288,7 @@
|
|||
reg = <0x38000 0x50>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-frequency = <100000>;
|
||||
};
|
||||
|
||||
|
@ -375,7 +375,7 @@
|
|||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
linux,pci-domain = <0>;
|
||||
|
||||
|
@ -397,10 +397,10 @@
|
|||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 128 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 129 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 130 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
|
||||
brcm,pcie-msi-inten;
|
||||
};
|
||||
};
|
||||
|
@ -411,7 +411,7 @@
|
|||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
linux,pci-domain = <1>;
|
||||
|
||||
|
@ -433,10 +433,10 @@
|
|||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 134 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 135 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 136 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
|
||||
brcm,pcie-msi-inten;
|
||||
};
|
||||
};
|
||||
|
@ -447,7 +447,7 @@
|
|||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
|
||||
interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
linux,pci-domain = <2>;
|
||||
|
||||
|
@ -469,10 +469,10 @@
|
|||
compatible = "brcm,iproc-msi";
|
||||
msi-controller;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 140 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 141 IRQ_TYPE_NONE>,
|
||||
<GIC_SPI 142 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
|
||||
brcm,pcie-msi-inten;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -377,11 +377,7 @@
|
|||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
reg = <0x226000 0x1000>;
|
||||
interrupts = <42 IRQ_TYPE_EDGE_BOTH
|
||||
43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
|
||||
45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
|
||||
47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
|
||||
49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
|
||||
interrupts = <42 43 44 45 46 47 48 49 50>;
|
||||
ti,ngpio = <144>;
|
||||
ti,davinci-gpio-unbanked = <0>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -205,6 +205,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x70>;
|
||||
reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -145,9 +145,11 @@ CONFIG_USB_STORAGE=y
|
|||
CONFIG_USB_CHIPIDEA=y
|
||||
CONFIG_USB_CHIPIDEA_UDC=y
|
||||
CONFIG_USB_CHIPIDEA_HOST=y
|
||||
CONFIG_USB_CHIPIDEA_ULPI=y
|
||||
CONFIG_NOP_USB_XCEIV=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_ETH=m
|
||||
CONFIG_USB_ULPI_BUS=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
|
|
|
@ -894,19 +894,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
|
|||
pmd = stage2_get_pmd(kvm, cache, addr);
|
||||
VM_BUG_ON(!pmd);
|
||||
|
||||
/*
|
||||
* Mapping in huge pages should only happen through a fault. If a
|
||||
* page is merged into a transparent huge page, the individual
|
||||
* subpages of that huge page should be unmapped through MMU
|
||||
* notifiers before we get here.
|
||||
*
|
||||
* Merging of CompoundPages is not supported; they should become
|
||||
* splitting first, unmapped, merged, and mapped back in on-demand.
|
||||
*/
|
||||
VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
|
||||
|
||||
old_pmd = *pmd;
|
||||
if (pmd_present(old_pmd)) {
|
||||
/*
|
||||
* Multiple vcpus faulting on the same PMD entry, can
|
||||
* lead to them sequentially updating the PMD with the
|
||||
* same value. Following the break-before-make
|
||||
* (pmd_clear() followed by tlb_flush()) process can
|
||||
* hinder forward progress due to refaults generated
|
||||
* on missing translations.
|
||||
*
|
||||
* Skip updating the page table if the entry is
|
||||
* unchanged.
|
||||
*/
|
||||
if (pmd_val(old_pmd) == pmd_val(*new_pmd))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Mapping in huge pages should only happen through a
|
||||
* fault. If a page is merged into a transparent huge
|
||||
* page, the individual subpages of that huge page
|
||||
* should be unmapped through MMU notifiers before we
|
||||
* get here.
|
||||
*
|
||||
* Merging of CompoundPages is not supported; they
|
||||
* should become splitting first, unmapped, merged,
|
||||
* and mapped back in on-demand.
|
||||
*/
|
||||
VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
|
||||
|
||||
pmd_clear(pmd);
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
} else {
|
||||
|
@ -962,6 +978,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
|||
/* Create 2nd stage page table mapping - Level 3 */
|
||||
old_pte = *pte;
|
||||
if (pte_present(old_pte)) {
|
||||
/* Skip page table update if there is no change */
|
||||
if (pte_val(old_pte) == pte_val(*new_pte))
|
||||
return 0;
|
||||
|
||||
kvm_set_pte(pte, __pte(0));
|
||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||
} else {
|
||||
|
|
|
@ -104,6 +104,45 @@ void omap5_erratum_workaround_801819(void)
|
|||
static inline void omap5_erratum_workaround_801819(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
/*
|
||||
* Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
|
||||
* ICIALLU) to activate the workaround for secondary Core.
|
||||
* NOTE: it is assumed that the primary core's configuration is done
|
||||
* by the boot loader (kernel will detect a misconfiguration and complain
|
||||
* if this is not done).
|
||||
*
|
||||
* In General Purpose(GP) devices, ACR bit settings can only be done
|
||||
* by ROM code in "secure world" using the smc call and there is no
|
||||
* option to update the "firmware" on such devices. This also works for
|
||||
* High security(HS) devices, as a backup option in case the
|
||||
* "update" is not done in the "security firmware".
|
||||
*/
|
||||
static void omap5_secondary_harden_predictor(void)
|
||||
{
|
||||
u32 acr, acr_mask;
|
||||
|
||||
asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
|
||||
|
||||
/*
|
||||
* ACTLR[0] (Enable invalidates of BTB with ICIALLU)
|
||||
*/
|
||||
acr_mask = BIT(0);
|
||||
|
||||
/* Do we already have it done.. if yes, skip expensive smc */
|
||||
if ((acr & acr_mask) == acr_mask)
|
||||
return;
|
||||
|
||||
acr |= acr_mask;
|
||||
omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
|
||||
|
||||
pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
|
||||
__func__, smp_processor_id());
|
||||
}
|
||||
#else
|
||||
static inline void omap5_secondary_harden_predictor(void) { }
|
||||
#endif
|
||||
|
||||
static void omap4_secondary_init(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
|
@ -126,6 +165,8 @@ static void omap4_secondary_init(unsigned int cpu)
|
|||
set_cntfreq();
|
||||
/* Configure ACR to disable streaming WA for 801819 */
|
||||
omap5_erratum_workaround_801819();
|
||||
/* Enable ACR to allow for ICUALLU workaround */
|
||||
omap5_secondary_harden_predictor();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
|
||||
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
|
||||
void __iomem *base = irq_base(i);
|
||||
|
||||
saved_icmr[i] = __raw_readl(base + ICMR);
|
||||
|
@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
|
||||
for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
|
||||
void __iomem *base = irq_base(i);
|
||||
|
||||
__raw_writel(saved_icmr[i], base + ICMR);
|
||||
|
|
|
@ -16,6 +16,7 @@ config ARCH_ROCKCHIP
|
|||
select ROCKCHIP_TIMER
|
||||
select ARM_GLOBAL_TIMER
|
||||
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
|
||||
select PM
|
||||
help
|
||||
Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
|
||||
containing the RK2928, RK30xx and RK31xx series.
|
||||
|
|
|
@ -722,19 +722,28 @@ int __mark_rodata_ro(void *unused)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kernel_set_to_readonly __read_mostly;
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
kernel_set_to_readonly = 1;
|
||||
stop_machine(__mark_rodata_ro, NULL, NULL);
|
||||
}
|
||||
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
if (!kernel_set_to_readonly)
|
||||
return;
|
||||
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
||||
current->active_mm);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
if (!kernel_set_to_readonly)
|
||||
return;
|
||||
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
||||
current->active_mm);
|
||||
}
|
||||
|
|
|
@ -125,6 +125,7 @@ config ARCH_ROCKCHIP
|
|||
select GPIOLIB
|
||||
select PINCTRL
|
||||
select PINCTRL_ROCKCHIP
|
||||
select PM
|
||||
select ROCKCHIP_TIMER
|
||||
help
|
||||
This enables support for the ARMv8 based Rockchip chipsets,
|
||||
|
|
|
@ -393,7 +393,7 @@
|
|||
reg = <0x66080000 0x100>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-frequency = <100000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -421,7 +421,7 @@
|
|||
reg = <0x660b0000 0x100>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
|
||||
interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clock-frequency = <100000>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -22,6 +22,11 @@
|
|||
#define CTR_L1IP_MASK 3
|
||||
#define CTR_CWG_SHIFT 24
|
||||
#define CTR_CWG_MASK 15
|
||||
#define CTR_DMINLINE_SHIFT 16
|
||||
#define CTR_IMINLINE_SHIFT 0
|
||||
|
||||
#define CTR_CACHE_MINLINE_MASK \
|
||||
((0xf << CTR_DMINLINE_SHIFT) | (0xf << CTR_IMINLINE_SHIFT))
|
||||
|
||||
#define ICACHE_POLICY_RESERVED 0
|
||||
#define ICACHE_POLICY_AIVIVT 1
|
||||
|
|
|
@ -37,7 +37,8 @@
|
|||
#define ARM64_UNMAP_KERNEL_AT_EL0 16
|
||||
#define ARM64_HARDEN_BRANCH_PREDICTOR 17
|
||||
#define ARM64_SSBD 18
|
||||
#define ARM64_MISMATCHED_CACHE_TYPE 19
|
||||
|
||||
#define ARM64_NCAPS 19
|
||||
#define ARM64_NCAPS 20
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
@ -31,12 +32,18 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
}
|
||||
|
||||
static bool
|
||||
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
u64 mask = CTR_CACHE_MINLINE_MASK;
|
||||
|
||||
/* Skip matching the min line sizes for cache type check */
|
||||
if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
|
||||
mask ^= arm64_ftr_reg_ctrel0.strict_mask;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
|
||||
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
|
||||
return (read_cpuid_cachetype() & mask) !=
|
||||
(arm64_ftr_reg_ctrel0.sys_val & mask);
|
||||
}
|
||||
|
||||
static int cpu_enable_trap_ctr_access(void *__unused)
|
||||
|
@ -446,7 +453,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
{
|
||||
.desc = "Mismatched cache line size",
|
||||
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
|
||||
.matches = has_mismatched_cache_line_size,
|
||||
.matches = has_mismatched_cache_type,
|
||||
.def_scope = SCOPE_LOCAL_CPU,
|
||||
.enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
{
|
||||
.desc = "Mismatched cache type",
|
||||
.capability = ARM64_MISMATCHED_CACHE_TYPE,
|
||||
.matches = has_mismatched_cache_type,
|
||||
.def_scope = SCOPE_LOCAL_CPU,
|
||||
.enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
|
|
|
@ -152,7 +152,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
|||
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
|
||||
/*
|
||||
* Linux can handle differing I-cache policies. Userspace JITs will
|
||||
* make use of *minLine.
|
||||
|
@ -160,7 +160,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
|||
*/
|
||||
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
|
||||
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
|
|||
break;
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
|
||||
pr_warn("Unrecoverable kprobe detected.\n");
|
||||
dump_kprobe(p);
|
||||
BUG();
|
||||
break;
|
||||
|
|
|
@ -205,7 +205,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
*/
|
||||
asmlinkage void secondary_start_kernel(void)
|
||||
asmlinkage notrace void secondary_start_kernel(void)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
|
|
@ -147,7 +147,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return memblock_is_map_memory(pfn << PAGE_SHIFT);
|
||||
phys_addr_t addr = pfn << PAGE_SHIFT;
|
||||
|
||||
if ((addr >> PAGE_SHIFT) != pfn)
|
||||
return 0;
|
||||
return memblock_is_map_memory(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(pfn_valid);
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
|
|||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
|
||||
unsigned long address)
|
||||
{
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
|
@ -73,8 +74,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||
return page;
|
||||
}
|
||||
|
||||
extern inline void pte_free(struct mm_struct *mm, struct page *page)
|
||||
static inline void pte_free(struct mm_struct *mm, struct page *page)
|
||||
{
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
|
|
|
@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
|
|||
*/
|
||||
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
|
||||
cpu_wait = NULL;
|
||||
|
||||
/*
|
||||
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
|
||||
* Enable ExternalSync for sync instruction to take effect
|
||||
*/
|
||||
set_c0_config7(MIPS_CONF7_ES);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -663,8 +663,6 @@
|
|||
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
|
||||
|
||||
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
|
||||
/* ExternalSync */
|
||||
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
|
||||
|
||||
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
|
||||
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
|
||||
|
@ -2643,7 +2641,6 @@ __BUILD_SET_C0(status)
|
|||
__BUILD_SET_C0(cause)
|
||||
__BUILD_SET_C0(config)
|
||||
__BUILD_SET_C0(config5)
|
||||
__BUILD_SET_C0(config7)
|
||||
__BUILD_SET_C0(intcontrol)
|
||||
__BUILD_SET_C0(intctl)
|
||||
__BUILD_SET_C0(srsmap)
|
||||
|
|
|
@ -141,7 +141,7 @@ struct mips_fpu_struct {
|
|||
|
||||
#define NUM_DSP_REGS 6
|
||||
|
||||
typedef __u32 dspreg_t;
|
||||
typedef unsigned long dspreg_t;
|
||||
|
||||
struct mips_dsp_state {
|
||||
dspreg_t dspr[NUM_DSP_REGS];
|
||||
|
|
|
@ -876,7 +876,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
goto out;
|
||||
}
|
||||
dregs = __get_dsp_regs(child);
|
||||
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
|
||||
tmp = dregs[addr - DSP_BASE];
|
||||
break;
|
||||
}
|
||||
case DSP_CONTROL:
|
||||
|
|
|
@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
goto out;
|
||||
}
|
||||
dregs = __get_dsp_regs(child);
|
||||
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
|
||||
tmp = dregs[addr - DSP_BASE];
|
||||
break;
|
||||
}
|
||||
case DSP_CONTROL:
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
#include "libgcc.h"
|
||||
|
||||
/*
|
||||
* GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
|
||||
* specific case only we'll implement it here.
|
||||
* GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
|
||||
* that specific case only we implement that intrinsic here.
|
||||
*
|
||||
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
|
||||
*/
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
|
||||
|
||||
/* multiply 64-bit values, low 64-bits returned */
|
||||
static inline long long notrace dmulu(long long a, long long b)
|
||||
|
|
|
@ -26,7 +26,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
|||
{
|
||||
volatile unsigned int *a;
|
||||
|
||||
mb();
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0)
|
||||
while (*a == 0)
|
||||
|
@ -36,16 +35,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
|||
local_irq_disable();
|
||||
} else
|
||||
cpu_relax();
|
||||
mb();
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
mb();
|
||||
|
||||
a = __ldcw_align(x);
|
||||
*a = 1;
|
||||
mb();
|
||||
*a = 1;
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||
|
@ -53,10 +51,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
|
|||
volatile unsigned int *a;
|
||||
int ret;
|
||||
|
||||
mb();
|
||||
a = __ldcw_align(x);
|
||||
ret = __ldcw(a) != 0;
|
||||
mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -629,12 +629,12 @@ cas_action:
|
|||
stw %r1, 4(%sr2,%r20)
|
||||
#endif
|
||||
/* The load and store could fail */
|
||||
1: ldw,ma 0(%r26), %r28
|
||||
1: ldw 0(%r26), %r28
|
||||
sub,<> %r28, %r25, %r0
|
||||
2: stw,ma %r24, 0(%r26)
|
||||
2: stw %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
/* Clear thread register indicator */
|
||||
stw %r0, 4(%sr2,%r20)
|
||||
|
@ -798,30 +798,30 @@ cas2_action:
|
|||
ldo 1(%r0),%r28
|
||||
|
||||
/* 8bit CAS */
|
||||
13: ldb,ma 0(%r26), %r29
|
||||
13: ldb 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
14: stb,ma %r24, 0(%r26)
|
||||
14: stb %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 16bit CAS */
|
||||
15: ldh,ma 0(%r26), %r29
|
||||
15: ldh 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
16: sth,ma %r24, 0(%r26)
|
||||
16: sth %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 32bit CAS */
|
||||
17: ldw,ma 0(%r26), %r29
|
||||
17: ldw 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
18: stw,ma %r24, 0(%r26)
|
||||
18: stw %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
|
@ -829,10 +829,10 @@ cas2_action:
|
|||
|
||||
/* 64bit CAS */
|
||||
#ifdef CONFIG_64BIT
|
||||
19: ldd,ma 0(%r26), %r29
|
||||
19: ldd 0(%r26), %r29
|
||||
sub,*= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
20: std,ma %r24, 0(%r26)
|
||||
20: std %r24, 0(%r26)
|
||||
copy %r0, %r28
|
||||
#else
|
||||
/* Compare first word */
|
||||
|
@ -851,7 +851,7 @@ cas2_action:
|
|||
cas2_end:
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
ssm PSW_SM_I, %r0
|
||||
/* Return to userspace, set no error */
|
||||
|
|
|
@ -190,9 +190,6 @@ struct fadump_crash_info_header {
|
|||
struct cpumask online_mask;
|
||||
};
|
||||
|
||||
/* Crash memory ranges */
|
||||
#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
|
||||
|
||||
struct fad_crash_memory_ranges {
|
||||
unsigned long long base;
|
||||
unsigned long long size;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/crash_dump.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/prom.h>
|
||||
|
@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
|
|||
static const struct fadump_mem_struct *fdm_active;
|
||||
|
||||
static DEFINE_MUTEX(fadump_mutex);
|
||||
struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
|
||||
struct fad_crash_memory_ranges *crash_memory_ranges;
|
||||
int crash_memory_ranges_size;
|
||||
int crash_mem_ranges;
|
||||
int max_crash_mem_ranges;
|
||||
|
||||
/* Scan the Firmware Assisted dump configuration details. */
|
||||
int __init early_init_dt_scan_fw_dump(unsigned long node,
|
||||
|
@ -731,38 +734,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void fadump_add_crash_memory(unsigned long long base,
|
||||
unsigned long long end)
|
||||
static void free_crash_memory_ranges(void)
|
||||
{
|
||||
kfree(crash_memory_ranges);
|
||||
crash_memory_ranges = NULL;
|
||||
crash_memory_ranges_size = 0;
|
||||
max_crash_mem_ranges = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate or reallocate crash memory ranges array in incremental units
|
||||
* of PAGE_SIZE.
|
||||
*/
|
||||
static int allocate_crash_memory_ranges(void)
|
||||
{
|
||||
struct fad_crash_memory_ranges *new_array;
|
||||
u64 new_size;
|
||||
|
||||
new_size = crash_memory_ranges_size + PAGE_SIZE;
|
||||
pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
|
||||
new_size);
|
||||
|
||||
new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
|
||||
if (new_array == NULL) {
|
||||
pr_err("Insufficient memory for setting up crash memory ranges\n");
|
||||
free_crash_memory_ranges();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
crash_memory_ranges = new_array;
|
||||
crash_memory_ranges_size = new_size;
|
||||
max_crash_mem_ranges = (new_size /
|
||||
sizeof(struct fad_crash_memory_ranges));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int fadump_add_crash_memory(unsigned long long base,
|
||||
unsigned long long end)
|
||||
{
|
||||
if (base == end)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (crash_mem_ranges == max_crash_mem_ranges) {
|
||||
int ret;
|
||||
|
||||
ret = allocate_crash_memory_ranges();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
|
||||
crash_mem_ranges, base, end - 1, (end - base));
|
||||
crash_memory_ranges[crash_mem_ranges].base = base;
|
||||
crash_memory_ranges[crash_mem_ranges].size = end - base;
|
||||
crash_mem_ranges++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fadump_exclude_reserved_area(unsigned long long start,
|
||||
static int fadump_exclude_reserved_area(unsigned long long start,
|
||||
unsigned long long end)
|
||||
{
|
||||
unsigned long long ra_start, ra_end;
|
||||
int ret = 0;
|
||||
|
||||
ra_start = fw_dump.reserve_dump_area_start;
|
||||
ra_end = ra_start + fw_dump.reserve_dump_area_size;
|
||||
|
||||
if ((ra_start < end) && (ra_end > start)) {
|
||||
if ((start < ra_start) && (end > ra_end)) {
|
||||
fadump_add_crash_memory(start, ra_start);
|
||||
fadump_add_crash_memory(ra_end, end);
|
||||
ret = fadump_add_crash_memory(start, ra_start);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fadump_add_crash_memory(ra_end, end);
|
||||
} else if (start < ra_start) {
|
||||
fadump_add_crash_memory(start, ra_start);
|
||||
ret = fadump_add_crash_memory(start, ra_start);
|
||||
} else if (ra_end < end) {
|
||||
fadump_add_crash_memory(ra_end, end);
|
||||
ret = fadump_add_crash_memory(ra_end, end);
|
||||
}
|
||||
} else
|
||||
fadump_add_crash_memory(start, end);
|
||||
ret = fadump_add_crash_memory(start, end);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fadump_init_elfcore_header(char *bufp)
|
||||
|
@ -802,10 +855,11 @@ static int fadump_init_elfcore_header(char *bufp)
|
|||
* Traverse through memblock structure and setup crash memory ranges. These
|
||||
* ranges will be used create PT_LOAD program headers in elfcore header.
|
||||
*/
|
||||
static void fadump_setup_crash_memory_ranges(void)
|
||||
static int fadump_setup_crash_memory_ranges(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long long start, end;
|
||||
int ret;
|
||||
|
||||
pr_debug("Setup crash memory ranges.\n");
|
||||
crash_mem_ranges = 0;
|
||||
|
@ -816,7 +870,9 @@ static void fadump_setup_crash_memory_ranges(void)
|
|||
* specified during fadump registration. We need to create a separate
|
||||
* program header for this chunk with the correct offset.
|
||||
*/
|
||||
fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
|
||||
ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
start = (unsigned long long)reg->base;
|
||||
|
@ -825,8 +881,12 @@ static void fadump_setup_crash_memory_ranges(void)
|
|||
start = fw_dump.boot_memory_size;
|
||||
|
||||
/* add this range excluding the reserved dump area. */
|
||||
fadump_exclude_reserved_area(start, end);
|
||||
ret = fadump_exclude_reserved_area(start, end);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -950,6 +1010,7 @@ static void register_fadump(void)
|
|||
{
|
||||
unsigned long addr;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If no memory is reserved then we can not register for firmware-
|
||||
|
@ -958,7 +1019,9 @@ static void register_fadump(void)
|
|||
if (!fw_dump.reserve_dump_area_size)
|
||||
return;
|
||||
|
||||
fadump_setup_crash_memory_ranges();
|
||||
ret = fadump_setup_crash_memory_ranges();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
|
||||
/* Initialize fadump crash info header. */
|
||||
|
@ -1036,6 +1099,7 @@ void fadump_cleanup(void)
|
|||
} else if (fw_dump.dump_registered) {
|
||||
/* Un-register Firmware-assisted dump if it was registered. */
|
||||
fadump_unregister_dump(&fdm);
|
||||
free_crash_memory_ranges();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -326,6 +326,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
|||
u64 imm64;
|
||||
u8 *func;
|
||||
u32 true_cond;
|
||||
u32 tmp_idx;
|
||||
|
||||
/*
|
||||
* addrs[] maps a BPF bytecode address into a real offset from
|
||||
|
@ -685,11 +686,7 @@ emit_clear:
|
|||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* Get EA into TMP_REG_1 */
|
||||
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
||||
/* error if EA is not word-aligned */
|
||||
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
|
||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
|
||||
PPC_LI(b2p[BPF_REG_0], 0);
|
||||
PPC_JMP(exit_addr);
|
||||
tmp_idx = ctx->idx * 4;
|
||||
/* load value from memory into TMP_REG_2 */
|
||||
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||
/* add value from src_reg into this */
|
||||
|
@ -697,32 +694,16 @@ emit_clear:
|
|||
/* store result back */
|
||||
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||
/* we're done if this succeeded */
|
||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
|
||||
/* otherwise, let's try once more */
|
||||
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
||||
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||
/* exit if the store was not successful */
|
||||
PPC_LI(b2p[BPF_REG_0], 0);
|
||||
PPC_BCC(COND_NE, exit_addr);
|
||||
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||
break;
|
||||
/* *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
|
||||
/* error if EA is not doubleword-aligned */
|
||||
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
|
||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
|
||||
PPC_LI(b2p[BPF_REG_0], 0);
|
||||
PPC_JMP(exit_addr);
|
||||
tmp_idx = ctx->idx * 4;
|
||||
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
||||
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
|
||||
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
|
||||
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
|
||||
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
|
||||
PPC_LI(b2p[BPF_REG_0], 0);
|
||||
PPC_BCC(COND_NE, exit_addr);
|
||||
PPC_BCC_SHORT(COND_NE, tmp_idx);
|
||||
break;
|
||||
|
||||
/*
|
||||
|
|
|
@ -3124,12 +3124,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
|
|||
#endif /* CONFIG_DEBUG_FS */
|
||||
}
|
||||
|
||||
static void pnv_pci_enable_bridge(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev = bus->self;
|
||||
struct pci_bus *child;
|
||||
|
||||
/* Empty bus ? bail */
|
||||
if (list_empty(&bus->devices))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If there's a bridge associated with that bus enable it. This works
|
||||
* around races in the generic code if the enabling is done during
|
||||
* parallel probing. This can be removed once those races have been
|
||||
* fixed.
|
||||
*/
|
||||
if (dev) {
|
||||
int rc = pci_enable_device(dev);
|
||||
if (rc)
|
||||
pci_err(dev, "Error enabling bridge (%d)\n", rc);
|
||||
pci_set_master(dev);
|
||||
}
|
||||
|
||||
/* Perform the same to child busses */
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pnv_pci_enable_bridge(child);
|
||||
}
|
||||
|
||||
static void pnv_pci_enable_bridges(void)
|
||||
{
|
||||
struct pci_controller *hose;
|
||||
|
||||
list_for_each_entry(hose, &hose_list, list_node)
|
||||
pnv_pci_enable_bridge(hose->bus);
|
||||
}
|
||||
|
||||
static void pnv_pci_ioda_fixup(void)
|
||||
{
|
||||
pnv_pci_ioda_setup_PEs();
|
||||
pnv_pci_ioda_setup_iommu_api();
|
||||
pnv_pci_ioda_create_dbgfs();
|
||||
|
||||
pnv_pci_enable_bridges();
|
||||
|
||||
#ifdef CONFIG_EEH
|
||||
eeh_init();
|
||||
eeh_addr_cache_build();
|
||||
|
|
|
@ -346,7 +346,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
savep = __va(regs->gpr[3]);
|
||||
regs->gpr[3] = savep[0]; /* restore original r3 */
|
||||
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
|
||||
|
||||
/* If it isn't an extended log we can use the per cpu 64bit buffer */
|
||||
h = (struct rtas_error_log *)&savep[1];
|
||||
|
@ -357,7 +357,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
|
|||
int len, error_log_length;
|
||||
|
||||
error_log_length = 8 + rtas_error_extended_log_length(h);
|
||||
len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
|
||||
len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
|
||||
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
|
||||
memcpy(global_mce_data_buf, h, len);
|
||||
errhdr = (struct rtas_error_log *)global_mce_data_buf;
|
||||
|
|
|
@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
|
|||
|
||||
/* IO map the message register block. */
|
||||
of_address_to_resource(np, 0, &rsrc);
|
||||
msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
|
||||
msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
|
||||
if (!msgr_block_addr) {
|
||||
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
|
||||
return -EFAULT;
|
||||
|
|
|
@ -261,7 +261,6 @@ struct qdio_outbuf_state {
|
|||
void *user;
|
||||
};
|
||||
|
||||
#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
|
||||
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
|
||||
|
||||
#define CHSC_AC1_INITIATE_INPUTQ 0x80
|
||||
|
|
|
@ -401,11 +401,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
|
|||
if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
|
||||
sizeof(nt_name) - 1))
|
||||
return NULL;
|
||||
if (strcmp(nt_name, "VMCOREINFO") != 0)
|
||||
if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
|
||||
return NULL;
|
||||
vmcoreinfo = kzalloc_panic(note.n_descsz);
|
||||
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
|
||||
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
|
||||
kfree(vmcoreinfo);
|
||||
return NULL;
|
||||
}
|
||||
*size = note.n_descsz;
|
||||
return vmcoreinfo;
|
||||
}
|
||||
|
@ -415,15 +417,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
|
|||
*/
|
||||
static void *nt_vmcoreinfo(void *ptr)
|
||||
{
|
||||
const char *name = VMCOREINFO_NOTE_NAME;
|
||||
unsigned long size;
|
||||
void *vmcoreinfo;
|
||||
|
||||
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
|
||||
if (!vmcoreinfo)
|
||||
vmcoreinfo = get_vmcoreinfo_old(&size);
|
||||
if (vmcoreinfo)
|
||||
return nt_init_name(ptr, 0, vmcoreinfo, size, name);
|
||||
|
||||
vmcoreinfo = get_vmcoreinfo_old(&size);
|
||||
if (!vmcoreinfo)
|
||||
return ptr;
|
||||
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
|
||||
ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
|
||||
kfree(vmcoreinfo);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*/
|
||||
ENTRY(memset)
|
||||
ltgr %r4,%r4
|
||||
bzr %r14
|
||||
jz .Lmemset_exit
|
||||
ltgr %r3,%r3
|
||||
jnz .Lmemset_fill
|
||||
aghi %r4,-1
|
||||
|
@ -42,12 +42,13 @@ ENTRY(memset)
|
|||
.Lmemset_clear_rest:
|
||||
larl %r3,.Lmemset_xc
|
||||
ex %r4,0(%r3)
|
||||
.Lmemset_exit:
|
||||
BR_EX %r14
|
||||
.Lmemset_fill:
|
||||
stc %r3,0(%r2)
|
||||
cghi %r4,1
|
||||
lgr %r1,%r2
|
||||
ber %r14
|
||||
je .Lmemset_fill_exit
|
||||
aghi %r4,-2
|
||||
srlg %r3,%r4,8
|
||||
ltgr %r3,%r3
|
||||
|
@ -59,6 +60,7 @@ ENTRY(memset)
|
|||
.Lmemset_fill_rest:
|
||||
larl %r3,.Lmemset_mvc
|
||||
ex %r4,0(%r3)
|
||||
.Lmemset_fill_exit:
|
||||
BR_EX %r14
|
||||
.Lmemset_xc:
|
||||
xc 0(1,%r1),0(%r1)
|
||||
|
@ -73,7 +75,7 @@ EXPORT_SYMBOL(memset)
|
|||
*/
|
||||
ENTRY(memcpy)
|
||||
ltgr %r4,%r4
|
||||
bzr %r14
|
||||
jz .Lmemcpy_exit
|
||||
aghi %r4,-1
|
||||
srlg %r5,%r4,8
|
||||
ltgr %r5,%r5
|
||||
|
@ -82,6 +84,7 @@ ENTRY(memcpy)
|
|||
.Lmemcpy_rest:
|
||||
larl %r5,.Lmemcpy_mvc
|
||||
ex %r4,0(%r5)
|
||||
.Lmemcpy_exit:
|
||||
BR_EX %r14
|
||||
.Lmemcpy_loop:
|
||||
mvc 0(256,%r1),0(%r3)
|
||||
|
|
|
@ -462,6 +462,8 @@ retry:
|
|||
/* No reason to continue if interrupted by SIGKILL. */
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
if (flags & FAULT_FLAG_RETRY_NOWAIT)
|
||||
goto out_up;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(fault & VM_FAULT_ERROR))
|
||||
|
|
|
@ -517,8 +517,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
|
|||
/* br %r1 */
|
||||
_EMIT2(0x07f1);
|
||||
} else {
|
||||
/* larl %r1,.+14 */
|
||||
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
|
||||
/* ex 0,S390_lowcore.br_r1_tampoline */
|
||||
EMIT4_DISP(0x44000000, REG_0, REG_0,
|
||||
offsetof(struct lowcore, br_r1_trampoline));
|
||||
|
@ -1386,6 +1384,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
goto free_addrs;
|
||||
}
|
||||
if (bpf_jit_prog(&jit, fp)) {
|
||||
bpf_jit_binary_free(header);
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
|
|
|
@ -133,26 +133,14 @@ void __init numa_setup(void)
|
|||
{
|
||||
pr_info("NUMA mode: %s\n", mode->name);
|
||||
nodes_clear(node_possible_map);
|
||||
/* Initially attach all possible CPUs to node 0. */
|
||||
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
|
||||
if (mode->setup)
|
||||
mode->setup();
|
||||
numa_setup_memory();
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
/*
|
||||
* numa_init_early() - Initialization initcall
|
||||
*
|
||||
* This runs when only one CPU is online and before the first
|
||||
* topology update is called for by the scheduler.
|
||||
*/
|
||||
static int __init numa_init_early(void)
|
||||
{
|
||||
/* Attach all possible CPUs to node 0 for now. */
|
||||
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(numa_init_early);
|
||||
|
||||
/*
|
||||
* numa_init_late() - Initialization initcall
|
||||
*
|
||||
|
|
|
@ -407,6 +407,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
|||
hwirq = 0;
|
||||
for_each_pci_msi_entry(msi, pdev) {
|
||||
rc = -EIO;
|
||||
if (hwirq >= msi_vecs)
|
||||
break;
|
||||
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
|
||||
if (irq < 0)
|
||||
goto out_msi;
|
||||
|
|
|
@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
|
|||
{
|
||||
struct pci_dev *dev;
|
||||
int i, has_io, has_mem;
|
||||
unsigned int cmd;
|
||||
unsigned int cmd = 0;
|
||||
struct linux_pcic *pcic;
|
||||
/* struct linux_pbm_info* pbm = &pcic->pbm; */
|
||||
int node;
|
||||
|
|
|
@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
|
|||
|
||||
asmlinkage long sys_getdomainname(char __user *name, int len)
|
||||
{
|
||||
int nlen, err;
|
||||
|
||||
int nlen, err;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
nlen = strlen(utsname()->domainname) + 1;
|
||||
err = -EINVAL;
|
||||
if (nlen > len)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
memcpy(tmp, utsname()->domainname, nlen);
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, utsname()->domainname, nlen))
|
||||
err = 0;
|
||||
up_read(&uts_sem);
|
||||
|
||||
out:
|
||||
if (copy_to_user(name, tmp, nlen))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -524,23 +524,27 @@ extern void check_pending(int signum);
|
|||
|
||||
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
|
||||
{
|
||||
int nlen, err;
|
||||
int nlen, err;
|
||||
char tmp[__NEW_UTS_LEN + 1];
|
||||
|
||||
if (len < 0)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
nlen = strlen(utsname()->domainname) + 1;
|
||||
err = -EINVAL;
|
||||
if (nlen > len)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
memcpy(tmp, utsname()->domainname, nlen);
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, utsname()->domainname, nlen))
|
||||
err = 0;
|
||||
up_read(&uts_sem);
|
||||
|
||||
out:
|
||||
if (copy_to_user(name, tmp, nlen))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -101,9 +101,13 @@ define cmd_check_data_rel
|
|||
done
|
||||
endef
|
||||
|
||||
# We need to run two commands under "if_changed", so merge them into a
|
||||
# single invocation.
|
||||
quiet_cmd_check-and-link-vmlinux = LD $@
|
||||
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(call if_changed,check_data_rel)
|
||||
$(call if_changed,ld)
|
||||
$(call if_changed,check-and-link-vmlinux)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
|
|
|
@ -774,7 +774,7 @@ ENTRY(\sym)
|
|||
|
||||
call \do_sym
|
||||
|
||||
jmp error_exit /* %ebx: no swapgs flag */
|
||||
jmp error_exit
|
||||
.endif
|
||||
END(\sym)
|
||||
.endm
|
||||
|
@ -1043,7 +1043,6 @@ END(paranoid_exit)
|
|||
|
||||
/*
|
||||
* Save all registers in pt_regs, and switch gs if needed.
|
||||
* Return: EBX=0: came from user mode; EBX=1: otherwise
|
||||
*/
|
||||
ENTRY(error_entry)
|
||||
cld
|
||||
|
@ -1056,7 +1055,6 @@ ENTRY(error_entry)
|
|||
* the kernel CR3 here.
|
||||
*/
|
||||
SWITCH_KERNEL_CR3
|
||||
xorl %ebx, %ebx
|
||||
testb $3, CS+8(%rsp)
|
||||
jz .Lerror_kernelspace
|
||||
|
||||
|
@ -1087,7 +1085,6 @@ ENTRY(error_entry)
|
|||
* for these here too.
|
||||
*/
|
||||
.Lerror_kernelspace:
|
||||
incl %ebx
|
||||
leaq native_irq_return_iret(%rip), %rcx
|
||||
cmpq %rcx, RIP+8(%rsp)
|
||||
je .Lerror_bad_iret
|
||||
|
@ -1119,28 +1116,19 @@ ENTRY(error_entry)
|
|||
|
||||
/*
|
||||
* Pretend that the exception came from user mode: set up pt_regs
|
||||
* as if we faulted immediately after IRET and clear EBX so that
|
||||
* error_exit knows that we will be returning to user mode.
|
||||
* as if we faulted immediately after IRET.
|
||||
*/
|
||||
mov %rsp, %rdi
|
||||
call fixup_bad_iret
|
||||
mov %rax, %rsp
|
||||
decl %ebx
|
||||
jmp .Lerror_entry_from_usermode_after_swapgs
|
||||
END(error_entry)
|
||||
|
||||
|
||||
/*
|
||||
* On entry, EBX is a "return to kernel mode" flag:
|
||||
* 1: already in kernel mode, don't need SWAPGS
|
||||
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
|
||||
*/
|
||||
ENTRY(error_exit)
|
||||
movl %ebx, %eax
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl %eax, %eax
|
||||
jnz retint_kernel
|
||||
testb $3, CS(%rsp)
|
||||
jz retint_kernel
|
||||
jmp retint_user
|
||||
END(error_exit)
|
||||
|
||||
|
|
|
@ -578,7 +578,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
|||
{
|
||||
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
|
||||
struct perf_event *event = pcpu->event;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hw_perf_event *hwc;
|
||||
struct perf_sample_data data;
|
||||
struct perf_raw_record raw;
|
||||
struct pt_regs regs;
|
||||
|
@ -601,6 +601,10 @@ fail:
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!event))
|
||||
goto fail;
|
||||
|
||||
hwc = &event->hw;
|
||||
msr = hwc->config_base;
|
||||
buf = ibs_data.regs;
|
||||
rdmsrl(msr, *buf);
|
||||
|
|
|
@ -32,7 +32,8 @@ extern inline unsigned long native_save_fl(void)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static inline void native_restore_fl(unsigned long flags)
|
||||
extern inline void native_restore_fl(unsigned long flags);
|
||||
extern inline void native_restore_fl(unsigned long flags)
|
||||
{
|
||||
asm volatile("push %0 ; popf"
|
||||
: /* no output */
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
|
||||
#define _ASM_X86_PGTABLE_3LEVEL_H
|
||||
|
||||
#include <asm/atomic64_32.h>
|
||||
|
||||
/*
|
||||
* Intel Physical Address Extension (PAE) Mode - three-level page
|
||||
* tables on PPro+ CPUs.
|
||||
|
@ -142,10 +144,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
|
|||
{
|
||||
pte_t res;
|
||||
|
||||
/* xchg acts as a barrier before the setting of the high bits */
|
||||
res.pte_low = xchg(&ptep->pte_low, 0);
|
||||
res.pte_high = ptep->pte_high;
|
||||
ptep->pte_high = 0;
|
||||
res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
|||
|
||||
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
phys_addr_t pfn = page_nr << PAGE_SHIFT;
|
||||
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PHYSICAL_PUD_PAGE_MASK;
|
||||
return __pud(pfn | massage_pgprot(pgprot));
|
||||
|
|
|
@ -136,6 +136,8 @@ struct cpuinfo_x86 {
|
|||
/* Index into per_cpu list: */
|
||||
u16 cpu_index;
|
||||
u32 microcode;
|
||||
/* Address space bits used by the cache internally */
|
||||
u8 x86_cache_bits;
|
||||
};
|
||||
|
||||
#define X86_VENDOR_INTEL 0
|
||||
|
@ -173,9 +175,9 @@ extern const struct seq_operations cpuinfo_op;
|
|||
|
||||
extern void cpu_detect(struct cpuinfo_x86 *c);
|
||||
|
||||
static inline unsigned long l1tf_pfn_limit(void)
|
||||
static inline unsigned long long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
|
||||
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
|
|
|
@ -651,6 +651,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
|
|||
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
||||
|
||||
/*
|
||||
* These CPUs all support 44bits physical address space internally in the
|
||||
* cache but CPUID can report a smaller number of physical address bits.
|
||||
*
|
||||
* The L1TF mitigation uses the top most address bit for the inversion of
|
||||
* non present PTEs. When the installed memory reaches into the top most
|
||||
* address bit due to memory holes, which has been observed on machines
|
||||
* which report 36bits physical address bits and have 32G RAM installed,
|
||||
* then the mitigation range check in l1tf_select_mitigation() triggers.
|
||||
* This is a false positive because the mitigation is still possible due to
|
||||
* the fact that the cache uses 44bit internally. Use the cache bits
|
||||
* instead of the reported physical bits and adjust them on the affected
|
||||
* machines to 44bit if the reported bits are less than 44.
|
||||
*/
|
||||
static void override_cache_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86 != 6)
|
||||
return;
|
||||
|
||||
switch (c->x86_model) {
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_HASWELL_CORE:
|
||||
case INTEL_FAM6_HASWELL_ULT:
|
||||
case INTEL_FAM6_HASWELL_GT3E:
|
||||
case INTEL_FAM6_BROADWELL_CORE:
|
||||
case INTEL_FAM6_BROADWELL_GT3E:
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
if (c->x86_cache_bits < 44)
|
||||
c->x86_cache_bits = 44;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
@ -658,6 +697,8 @@ static void __init l1tf_select_mitigation(void)
|
|||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
override_cache_bits(&boot_cpu_data);
|
||||
|
||||
switch (l1tf_mitigation) {
|
||||
case L1TF_MITIGATION_OFF:
|
||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||
|
@ -677,14 +718,13 @@ static void __init l1tf_select_mitigation(void)
|
|||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
|
||||
half_pa);
|
||||
pr_info("However, doing so will make a part of your RAM unusable.\n");
|
||||
pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
c->x86_cache_bits = c->x86_phys_bits;
|
||||
}
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
|
||||
|
|
|
@ -109,6 +109,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
|||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
if (c->x86 != 6)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
|
||||
if (c->x86_model == spectre_bad_microcodes[i].model &&
|
||||
c->x86_stepping == spectre_bad_microcodes[i].stepping)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/bug.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/unwind.h>
|
||||
|
@ -229,7 +230,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
|
|||
* We're not going to return, but we might be on an IST stack or
|
||||
* have very little stack space left. Rewind the stack and kill
|
||||
* the task.
|
||||
* Before we rewind the stack, we have to tell KASAN that we're going to
|
||||
* reuse the task stack and that existing poisons are invalid.
|
||||
*/
|
||||
kasan_unpoison_task_stack(current);
|
||||
rewind_stack_do_exit(signr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(oops_end);
|
||||
|
|
|
@ -529,7 +529,7 @@ static int bzImage64_cleanup(void *loader_data)
|
|||
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
|
||||
{
|
||||
return verify_pefile_signature(kernel, kernel_len,
|
||||
NULL,
|
||||
VERIFY_USE_SECONDARY_KEYRING,
|
||||
VERIFYING_KEXEC_PE_SIGNATURE);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -360,6 +360,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
|||
start_thread_common(regs, new_ip, new_sp,
|
||||
__USER_CS, __USER_DS, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_thread);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
|
||||
|
|
|
@ -4973,8 +4973,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
clgi();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
|
||||
* it's non-zero. Since vmentry is serialising on affected CPUs, there
|
||||
|
@ -4983,6 +4981,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
asm volatile (
|
||||
"push %%" _ASM_BP "; \n\t"
|
||||
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
|
||||
|
@ -5105,12 +5105,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
reload_tss(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
|
||||
|
||||
vcpu->arch.cr2 = svm->vmcb->save.cr2;
|
||||
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
|
||||
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
||||
|
|
|
@ -198,12 +198,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
|
|||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum vmx_l1d_flush_state cmd;
|
||||
bool for_parse;
|
||||
} vmentry_l1d_param[] = {
|
||||
{"auto", VMENTER_L1D_FLUSH_AUTO},
|
||||
{"never", VMENTER_L1D_FLUSH_NEVER},
|
||||
{"cond", VMENTER_L1D_FLUSH_COND},
|
||||
{"always", VMENTER_L1D_FLUSH_ALWAYS},
|
||||
[VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
|
||||
[VMENTER_L1D_FLUSH_NEVER] = {"never", true},
|
||||
[VMENTER_L1D_FLUSH_COND] = {"cond", true},
|
||||
[VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
|
||||
[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
|
||||
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
|
||||
};
|
||||
|
||||
#define L1D_CACHE_ORDER 4
|
||||
|
@ -287,8 +289,9 @@ static int vmentry_l1d_flush_parse(const char *s)
|
|||
|
||||
if (s) {
|
||||
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
|
||||
if (sysfs_streq(s, vmentry_l1d_param[i].option))
|
||||
return vmentry_l1d_param[i].cmd;
|
||||
if (vmentry_l1d_param[i].for_parse &&
|
||||
sysfs_streq(s, vmentry_l1d_param[i].option))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
|
@ -298,13 +301,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
|
|||
{
|
||||
int l1tf, ret;
|
||||
|
||||
if (!boot_cpu_has(X86_BUG_L1TF))
|
||||
return 0;
|
||||
|
||||
l1tf = vmentry_l1d_flush_parse(s);
|
||||
if (l1tf < 0)
|
||||
return l1tf;
|
||||
|
||||
if (!boot_cpu_has(X86_BUG_L1TF))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Has vmx_init() run already? If not then this is the pre init
|
||||
* parameter parsing. In that case just store the value and let
|
||||
|
@ -324,6 +327,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
|
|||
|
||||
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
|
||||
{
|
||||
if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
|
||||
return sprintf(s, "???\n");
|
||||
|
||||
return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
|
||||
}
|
||||
|
||||
|
@ -8670,9 +8676,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|||
* information but as all relevant affected CPUs have 32KiB L1D cache size
|
||||
* there is no point in doing so.
|
||||
*/
|
||||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = PAGE_SIZE << L1D_CACHE_ORDER;
|
||||
|
|
|
@ -792,7 +792,7 @@ unsigned long max_swapfile_size(void)
|
|||
|
||||
if (boot_cpu_has_bug(X86_BUG_L1TF)) {
|
||||
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
|
||||
unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
|
||||
unsigned long long l1tf_limit = l1tf_pfn_limit();
|
||||
/*
|
||||
* We encode swap offsets also with 3 bits below those for pfn
|
||||
* which makes the usable limit higher.
|
||||
|
@ -800,7 +800,7 @@ unsigned long max_swapfile_size(void)
|
|||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
|
||||
#endif
|
||||
pages = min_t(unsigned long, l1tf_limit, pages);
|
||||
pages = min_t(unsigned long long, l1tf_limit, pages);
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
|||
/* If it's real memory always allow */
|
||||
if (pfn_valid(pfn))
|
||||
return true;
|
||||
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
||||
if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -31,16 +31,32 @@
|
|||
*
|
||||
*/
|
||||
|
||||
.macro __loop_cache_all ar at insn size line_width
|
||||
|
||||
.macro __loop_cache_unroll ar at insn size line_width max_immed
|
||||
|
||||
.if (1 << (\line_width)) > (\max_immed)
|
||||
.set _reps, 1
|
||||
.elseif (2 << (\line_width)) > (\max_immed)
|
||||
.set _reps, 2
|
||||
.else
|
||||
.set _reps, 4
|
||||
.endif
|
||||
|
||||
__loopi \ar, \at, \size, (_reps << (\line_width))
|
||||
.set _index, 0
|
||||
.rep _reps
|
||||
\insn \ar, _index << (\line_width)
|
||||
.set _index, _index + 1
|
||||
.endr
|
||||
__endla \ar, \at, _reps << (\line_width)
|
||||
|
||||
.endm
|
||||
|
||||
|
||||
.macro __loop_cache_all ar at insn size line_width max_immed
|
||||
|
||||
movi \ar, 0
|
||||
|
||||
__loopi \ar, \at, \size, (4 << (\line_width))
|
||||
\insn \ar, 0 << (\line_width)
|
||||
\insn \ar, 1 << (\line_width)
|
||||
\insn \ar, 2 << (\line_width)
|
||||
\insn \ar, 3 << (\line_width)
|
||||
__endla \ar, \at, 4 << (\line_width)
|
||||
__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
|
||||
|
||||
.endm
|
||||
|
||||
|
@ -57,14 +73,9 @@
|
|||
.endm
|
||||
|
||||
|
||||
.macro __loop_cache_page ar at insn line_width
|
||||
.macro __loop_cache_page ar at insn line_width max_immed
|
||||
|
||||
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
|
||||
\insn \ar, 0 << (\line_width)
|
||||
\insn \ar, 1 << (\line_width)
|
||||
\insn \ar, 2 << (\line_width)
|
||||
\insn \ar, 3 << (\line_width)
|
||||
__endla \ar, \at, 4 << (\line_width)
|
||||
__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
|
||||
|
||||
.endm
|
||||
|
||||
|
@ -72,7 +83,8 @@
|
|||
.macro ___unlock_dcache_all ar at
|
||||
|
||||
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
|
||||
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
|
||||
XCHAL_DCACHE_LINEWIDTH 240
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -81,7 +93,8 @@
|
|||
.macro ___unlock_icache_all ar at
|
||||
|
||||
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
|
||||
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
|
||||
XCHAL_ICACHE_LINEWIDTH 240
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -90,7 +103,8 @@
|
|||
.macro ___flush_invalidate_dcache_all ar at
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
|
||||
XCHAL_DCACHE_LINEWIDTH 240
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -99,7 +113,8 @@
|
|||
.macro ___flush_dcache_all ar at
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
|
||||
XCHAL_DCACHE_LINEWIDTH 240
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -108,8 +123,8 @@
|
|||
.macro ___invalidate_dcache_all ar at
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
|
||||
XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
|
||||
XCHAL_DCACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -118,8 +133,8 @@
|
|||
.macro ___invalidate_icache_all ar at
|
||||
|
||||
#if XCHAL_ICACHE_SIZE
|
||||
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
|
||||
XCHAL_ICACHE_LINEWIDTH
|
||||
__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
|
||||
XCHAL_ICACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -166,7 +181,7 @@
|
|||
.macro ___flush_invalidate_dcache_page ar as
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -175,7 +190,7 @@
|
|||
.macro ___flush_dcache_page ar as
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -184,7 +199,7 @@
|
|||
.macro ___invalidate_dcache_page ar as
|
||||
|
||||
#if XCHAL_DCACHE_SIZE
|
||||
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
|
||||
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -193,7 +208,7 @@
|
|||
.macro ___invalidate_icache_page ar as
|
||||
|
||||
#if XCHAL_ICACHE_SIZE
|
||||
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
|
||||
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
|
|
@ -155,7 +155,7 @@ out:
|
|||
|
||||
unsigned int bvec_nr_vecs(unsigned short idx)
|
||||
{
|
||||
return bvec_slabs[idx].nr_vecs;
|
||||
return bvec_slabs[--idx].nr_vecs;
|
||||
}
|
||||
|
||||
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/cred.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/verification.h>
|
||||
#include <keys/asymmetric-type.h>
|
||||
#include <keys/system_keyring.h>
|
||||
#include <crypto/pkcs7.h>
|
||||
|
@ -207,7 +208,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
|
|||
|
||||
if (!trusted_keys) {
|
||||
trusted_keys = builtin_trusted_keys;
|
||||
} else if (trusted_keys == (void *)1UL) {
|
||||
} else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
|
||||
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
|
||||
trusted_keys = secondary_trusted_keys;
|
||||
#else
|
||||
|
|
|
@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
|
|||
|
||||
return verify_pkcs7_signature(NULL, 0,
|
||||
prep->data, prep->datalen,
|
||||
(void *)1UL, usage,
|
||||
VERIFY_USE_SECONDARY_KEYRING, usage,
|
||||
pkcs7_view_content, prep);
|
||||
}
|
||||
|
||||
|
|
|
@ -201,6 +201,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
const u8 *uuid;
|
||||
int rc, i;
|
||||
|
||||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
func = cmd;
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
call_pkg = buf;
|
||||
|
@ -288,6 +290,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
|||
* If we return an error (like elsewhere) then caller wouldn't
|
||||
* be able to rely upon data returned to make calculation.
|
||||
*/
|
||||
if (cmd_rc)
|
||||
*cmd_rc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1453,7 +1453,8 @@ static int acpi_add_single_object(struct acpi_device **child,
|
|||
* Note this must be done before the get power-/wakeup_dev-flags calls.
|
||||
*/
|
||||
if (type == ACPI_BUS_TYPE_DEVICE)
|
||||
acpi_bus_get_status(device);
|
||||
if (acpi_bus_get_status(device) < 0)
|
||||
acpi_set_device_status(device, 0);
|
||||
|
||||
acpi_bus_get_power_flags(device);
|
||||
acpi_bus_get_wakeup_device_flags(device);
|
||||
|
@ -1531,7 +1532,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
|
|||
* acpi_add_single_object updates this once we've an acpi_device
|
||||
* so that acpi_bus_get_status' quirk handling can be used.
|
||||
*/
|
||||
*sta = 0;
|
||||
*sta = ACPI_STA_DEFAULT;
|
||||
break;
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
*type = ACPI_BUS_TYPE_PROCESSOR;
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -1124,10 +1125,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
|
|||
|
||||
/* get the slot number from the message */
|
||||
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
|
||||
if (pmp < EM_MAX_SLOTS)
|
||||
if (pmp < EM_MAX_SLOTS) {
|
||||
pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
|
||||
emp = &pp->em_priv[pmp];
|
||||
else
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* mask off the activity bits if we are in sw_activity
|
||||
* mode, user should turn off sw_activity before setting
|
||||
|
|
|
@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
|
|||
int of_pm_clk_add_clks(struct device *dev)
|
||||
{
|
||||
struct clk **clks;
|
||||
unsigned int i, count;
|
||||
int i, count;
|
||||
int ret;
|
||||
|
||||
if (!dev || !dev->of_node)
|
||||
|
|
|
@ -2536,7 +2536,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
|
|||
if (!CDROM_CAN(CDC_SELECT_DISC) ||
|
||||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
|
||||
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
|
||||
if (((int)arg >= cdi->capacity))
|
||||
if (arg >= cdi->capacity)
|
||||
return -EINVAL;
|
||||
return cdrom_slot_status(cdi, arg);
|
||||
}
|
||||
|
|
|
@ -629,7 +629,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
|
|||
MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
|
||||
RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
|
||||
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
|
||||
RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
|
||||
RK3399_CLKSEL_CON(31), 2, 1, MFLAGS,
|
||||
RK3399_CLKGATE_CON(8), 12, GFLAGS),
|
||||
|
||||
/* uart */
|
||||
|
@ -1521,6 +1521,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
|
|||
"pclk_pmu_src",
|
||||
"fclk_cm0s_src_pmu",
|
||||
"clk_timer_src_pmu",
|
||||
"pclk_rkpwm_pmu",
|
||||
};
|
||||
|
||||
static void __init rk3399_clk_init(struct device_node *np)
|
||||
|
|
|
@ -201,7 +201,8 @@ static void caam_jr_dequeue(unsigned long devarg)
|
|||
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
|
||||
|
||||
/* Unmap just-run descriptor so we can post-process */
|
||||
dma_unmap_single(dev, jrp->outring[hw_idx].desc,
|
||||
dma_unmap_single(dev,
|
||||
caam_dma_to_cpu(jrp->outring[hw_idx].desc),
|
||||
jrp->entinfo[sw_idx].desc_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
|
|
|
@ -111,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
|
||||
nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->enc_key, walk.iv, 1);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -152,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
|
|||
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
|
||||
nbytes);
|
||||
} else {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
aes_p8_cbc_encrypt(walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK,
|
||||
&ctx->dec_key, walk.iv, 0);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -123,32 +123,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
|
|||
ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
|
||||
crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
|
||||
} else {
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
ret = blkcipher_walk_virt(desc, &walk);
|
||||
iv = walk.iv;
|
||||
memset(tweak, 0, AES_BLOCK_SIZE);
|
||||
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
if (enc)
|
||||
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
|
||||
else
|
||||
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
ret = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -792,7 +792,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
|
|||
struct k3_dma_dev *d = ofdma->of_dma_data;
|
||||
unsigned int request = dma_spec->args[0];
|
||||
|
||||
if (request > d->dma_requests)
|
||||
if (request >= d->dma_requests)
|
||||
return NULL;
|
||||
|
||||
return dma_get_slave_channel(&(d->chans[request].vc.chan));
|
||||
|
|
|
@ -2951,7 +2951,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
|
||||
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
|
||||
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
|
||||
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
|
||||
1 : PL330_MAX_BURST);
|
||||
|
||||
|
|
|
@ -432,7 +432,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
|
|||
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
|
||||
},
|
||||
.se_irq_map = {
|
||||
.irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
|
||||
.irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
|
||||
MALIDP500_SE_IRQ_GLOBAL,
|
||||
.vsync_irq = 0,
|
||||
},
|
||||
.dc_irq_map = {
|
||||
|
|
|
@ -160,6 +160,7 @@ enum {
|
|||
CFG_ALPHAM_GRA = 0x1 << 16,
|
||||
CFG_ALPHAM_CFG = 0x2 << 16,
|
||||
CFG_ALPHA_MASK = 0xff << 8,
|
||||
#define CFG_ALPHA(x) ((x) << 8)
|
||||
CFG_PIXCMD_MASK = 0xff,
|
||||
};
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ struct armada_ovl_plane_properties {
|
|||
uint16_t contrast;
|
||||
uint16_t saturation;
|
||||
uint32_t colorkey_mode;
|
||||
uint32_t colorkey_enable;
|
||||
};
|
||||
|
||||
struct armada_ovl_plane {
|
||||
|
@ -62,11 +63,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
|
|||
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
|
||||
CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
|
||||
dcrtc->base + LCD_SPU_DMA_CTRL1);
|
||||
|
||||
armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
|
||||
armada_updatel(prop->colorkey_mode,
|
||||
CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
|
||||
dcrtc->base + LCD_SPU_DMA_CTRL1);
|
||||
if (dcrtc->variant->has_spu_adv_reg)
|
||||
armada_updatel(prop->colorkey_enable,
|
||||
ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
|
||||
dcrtc->base + LCD_SPU_ADV_REG);
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
}
|
||||
|
||||
|
@ -340,8 +343,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
|
|||
dplane->prop.colorkey_vb |= K2B(val);
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_mode_prop) {
|
||||
dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
|
||||
dplane->prop.colorkey_mode |= CFG_CKMODE(val);
|
||||
if (val == CKMODE_DISABLE) {
|
||||
dplane->prop.colorkey_mode =
|
||||
CFG_CKMODE(CKMODE_DISABLE) |
|
||||
CFG_ALPHAM_CFG | CFG_ALPHA(255);
|
||||
dplane->prop.colorkey_enable = 0;
|
||||
} else {
|
||||
dplane->prop.colorkey_mode =
|
||||
CFG_CKMODE(val) |
|
||||
CFG_ALPHAM_GRA | CFG_ALPHA(0);
|
||||
dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
|
||||
}
|
||||
update_attr = true;
|
||||
} else if (property == priv->brightness_prop) {
|
||||
dplane->prop.brightness = val - 256;
|
||||
|
@ -471,7 +483,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
|||
dplane->prop.colorkey_yr = 0xfefefe00;
|
||||
dplane->prop.colorkey_ug = 0x01010100;
|
||||
dplane->prop.colorkey_vb = 0x01010100;
|
||||
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
|
||||
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
|
||||
CFG_ALPHAM_GRA | CFG_ALPHA(0);
|
||||
dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
|
||||
dplane->prop.brightness = 0;
|
||||
dplane->prop.contrast = 0x4000;
|
||||
dplane->prop.saturation = 0x4000;
|
||||
|
|
|
@ -426,6 +426,18 @@ static void adv7511_hpd_work(struct work_struct *work)
|
|||
else
|
||||
status = connector_status_disconnected;
|
||||
|
||||
/*
|
||||
* The bridge resets its registers on unplug. So when we get a plug
|
||||
* event and we're already supposed to be powered, cycle the bridge to
|
||||
* restore its state.
|
||||
*/
|
||||
if (status == connector_status_connected &&
|
||||
adv7511->connector.status == connector_status_disconnected &&
|
||||
adv7511->powered) {
|
||||
regcache_mark_dirty(adv7511->regmap);
|
||||
adv7511_power_on(adv7511);
|
||||
}
|
||||
|
||||
if (adv7511->connector.status != status) {
|
||||
adv7511->connector.status = status;
|
||||
drm_kms_helper_hotplug_event(adv7511->connector.dev);
|
||||
|
|
|
@ -113,6 +113,9 @@ static const struct edid_quirk {
|
|||
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
|
||||
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
|
||||
|
||||
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
|
||||
{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
|
||||
|
||||
/* Belinea 10 15 55 */
|
||||
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
|
|
|
@ -199,7 +199,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
|
|||
unsigned long val;
|
||||
|
||||
val = readl(ctx->addr + DECON_WINCONx(win));
|
||||
val &= ~WINCONx_BPPMODE_MASK;
|
||||
val &= WINCONx_ENWIN_F;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
|
@ -291,8 +291,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
|||
COORDINATE_Y(state->crtc.y + state->crtc.h - 1);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxB(win));
|
||||
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
|
||||
VIDOSD_Wx_ALPHA_B_F(0x0);
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
|
||||
VIDOSD_Wx_ALPHA_B_F(0xff);
|
||||
writel(val, ctx->addr + DECON_VIDOSDxC(win));
|
||||
|
||||
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
|
||||
|
|
|
@ -532,21 +532,25 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
|
|||
GSC_IN_CHROMA_ORDER_CRCB);
|
||||
break;
|
||||
case DRM_FORMAT_NV21:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
|
||||
GSC_IN_YUV420_2P);
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
|
||||
break;
|
||||
case DRM_FORMAT_YUV422:
|
||||
cfg |= GSC_IN_YUV422_3P;
|
||||
break;
|
||||
case DRM_FORMAT_YUV420:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_YVU420:
|
||||
cfg |= GSC_IN_YUV420_3P;
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_NV12:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV16:
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
|
||||
GSC_IN_YUV420_2P);
|
||||
cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
|
||||
break;
|
||||
default:
|
||||
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
|
||||
|
@ -806,18 +810,25 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
|
|||
GSC_OUT_CHROMA_ORDER_CRCB);
|
||||
break;
|
||||
case DRM_FORMAT_NV21:
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV61:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
|
||||
break;
|
||||
case DRM_FORMAT_YUV422:
|
||||
cfg |= GSC_OUT_YUV422_3P;
|
||||
break;
|
||||
case DRM_FORMAT_YUV420:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_YVU420:
|
||||
cfg |= GSC_OUT_YUV420_3P;
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
|
||||
break;
|
||||
case DRM_FORMAT_NV12:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
|
||||
break;
|
||||
case DRM_FORMAT_NV16:
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
|
||||
GSC_OUT_YUV420_2P);
|
||||
cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
|
||||
break;
|
||||
default:
|
||||
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
|
||||
|
|
|
@ -138,6 +138,7 @@
|
|||
#define GSC_OUT_YUV420_3P (3 << 4)
|
||||
#define GSC_OUT_YUV422_1P (4 << 4)
|
||||
#define GSC_OUT_YUV422_2P (5 << 4)
|
||||
#define GSC_OUT_YUV422_3P (6 << 4)
|
||||
#define GSC_OUT_YUV444 (7 << 4)
|
||||
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
|
||||
#define GSC_OUT_TILE_C_16x8 (0 << 2)
|
||||
|
|
|
@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
|||
I915_USERPTR_UNSYNCHRONIZED))
|
||||
return -EINVAL;
|
||||
|
||||
if (!args->user_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (offset_in_page(args->user_ptr | args->user_size))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1237,6 +1237,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
return PTR_ERR(imx_ldb->regmap);
|
||||
}
|
||||
|
||||
/* disable LDB by resetting the control register to POR default */
|
||||
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
|
||||
|
||||
imx_ldb->dev = dev;
|
||||
imx_ldb->ldb_ctrl_reg = devtype->ctrl_reg;
|
||||
imx_ldb->lvds_mux = devtype->bus_mux;
|
||||
|
@ -1335,14 +1338,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
goto get_phy;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(child))
|
||||
continue;
|
||||
|
||||
if (dual && i > 0) {
|
||||
dev_warn(dev, "dual-channel mode, ignoring second output\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(child))
|
||||
continue;
|
||||
|
||||
channel = &imx_ldb->channel[i];
|
||||
channel->ldb = imx_ldb;
|
||||
channel->chno = i;
|
||||
|
|
|
@ -601,7 +601,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
|||
struct nouveau_bo *nvbo;
|
||||
uint32_t data;
|
||||
|
||||
if (unlikely(r->bo_index > req->nr_buffers)) {
|
||||
if (unlikely(r->bo_index >= req->nr_buffers)) {
|
||||
NV_PRINTK(err, cli, "reloc bo index invalid\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -611,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
|||
if (b->presumed.valid)
|
||||
continue;
|
||||
|
||||
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
|
||||
if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
|
||||
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -217,7 +217,7 @@ static int udl_fb_open(struct fb_info *info, int user)
|
|||
|
||||
struct fb_deferred_io *fbdefio;
|
||||
|
||||
fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
|
||||
fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
|
||||
|
||||
if (fbdefio) {
|
||||
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
|
||||
|
|
|
@ -169,18 +169,13 @@ static void udl_free_urb_list(struct drm_device *dev)
|
|||
struct list_head *node;
|
||||
struct urb_node *unode;
|
||||
struct urb *urb;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
|
||||
|
||||
/* keep waiting and freeing, until we've got 'em all */
|
||||
while (count--) {
|
||||
|
||||
/* Getting interrupted means a leak, but ok at shutdown*/
|
||||
ret = down_interruptible(&udl->urbs.limit_sem);
|
||||
if (ret)
|
||||
break;
|
||||
down(&udl->urbs.limit_sem);
|
||||
|
||||
spin_lock_irqsave(&udl->urbs.lock, flags);
|
||||
|
||||
|
@ -204,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
|
|||
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
||||
{
|
||||
struct udl_device *udl = dev->dev_private;
|
||||
int i = 0;
|
||||
struct urb *urb;
|
||||
struct urb_node *unode;
|
||||
char *buf;
|
||||
size_t wanted_size = count * size;
|
||||
|
||||
spin_lock_init(&udl->urbs.lock);
|
||||
|
||||
retry:
|
||||
udl->urbs.size = size;
|
||||
INIT_LIST_HEAD(&udl->urbs.list);
|
||||
|
||||
while (i < count) {
|
||||
sema_init(&udl->urbs.limit_sem, 0);
|
||||
udl->urbs.count = 0;
|
||||
udl->urbs.available = 0;
|
||||
|
||||
while (udl->urbs.count * size < wanted_size) {
|
||||
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
|
||||
if (!unode)
|
||||
break;
|
||||
|
@ -230,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
|||
}
|
||||
unode->urb = urb;
|
||||
|
||||
buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
|
||||
buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
if (!buf) {
|
||||
kfree(unode);
|
||||
usb_free_urb(urb);
|
||||
if (size > PAGE_SIZE) {
|
||||
size /= 2;
|
||||
udl_free_urb_list(dev);
|
||||
goto retry;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -245,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
|||
|
||||
list_add_tail(&unode->entry, &udl->urbs.list);
|
||||
|
||||
i++;
|
||||
up(&udl->urbs.limit_sem);
|
||||
udl->urbs.count++;
|
||||
udl->urbs.available++;
|
||||
}
|
||||
|
||||
sema_init(&udl->urbs.limit_sem, i);
|
||||
udl->urbs.count = i;
|
||||
udl->urbs.available = i;
|
||||
DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
|
||||
|
||||
DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
|
||||
|
||||
return i;
|
||||
return udl->urbs.count;
|
||||
}
|
||||
|
||||
struct urb *udl_get_urb(struct drm_device *dev)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue