Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2016-09-23 06:46:57 -04:00
commit d6989d4bbe
289 changed files with 2982 additions and 1761 deletions

View File

@ -10,7 +10,7 @@ Required properties:
subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
family).
- clock-names: Should be "mmc".
- clock-names: Should be "mmc" and "icn". (NB: The latter is not compulsory)
See: Documentation/devicetree/bindings/resource-names.txt
- clocks: Phandle to the clock.
See: Documentation/devicetree/bindings/clock/clock-bindings.txt

View File

@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
- ``flags``
- Flags. No flags are defined yet, so set this to 0.
- Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
- .. row 7
@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
give the CEC framework more information about the device type, even
though the framework won't use it directly in the CEC message.
.. _cec-log-addrs-flags:
.. flat-table:: Flags for struct cec_log_addrs
:header-rows: 0
:stub-columns: 0
:widths: 3 1 4
- .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
- ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
- 1
- By default if no logical address of the requested type can be claimed, then
it will go back to the unconfigured state. If this flag is set, then it will
fallback to the Unregistered logical address. Note that if the Unregistered
logical address was explicitly requested, then this flag has no effect.
.. _cec-versions:
.. flat-table:: CEC Versions

View File

@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
- ``phys_addr``
- The current physical address.
- The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
valid physical address is set.
- .. row 2
@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
- ``log_addr_mask``
- The current set of claimed logical addresses.
- The current set of claimed logical addresses. This is 0 if no logical
addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
has the unregistered logical address. In that case all other bits are 0.

View File

@ -1634,6 +1634,7 @@ N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
M: Krzysztof Kozlowski <krzk@kernel.org>
R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
@ -2509,7 +2510,7 @@ S: Supported
F: kernel/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
M: Gary Zambrano <zambrano@broadcom.com>
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
@ -6111,7 +6112,7 @@ S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M: Maik Broemme <mbroemme@plusserver.de>
M: Maik Broemme <mbroemme@libmpq.org>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: Documentation/fb/intelfb.txt
@ -8169,6 +8170,15 @@ S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M: Florian Fainelli <f.fainelli@gmail.com>
S: Maintained
F: net/dsa/
F: include/net/dsa.h
F: drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
@ -12585,7 +12595,7 @@ F: include/linux/if_*vlan.h
F: net/8021q/
VLYNQ BUS
M: Florian Fainelli <florian@openwrt.org>
M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Maintained
F: drivers/vlynq/vlynq.c

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*

View File

@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
return __cu_len;
}
extern inline long
__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
{
if (__access_ok((unsigned long)validate, len, get_fs()))
len = __copy_tofrom_user_nocheck(to, from, len);
return len;
}
#define __copy_to_user(to, from, n) \
({ \
__chk_user_ptr(to); \
@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
extern inline long
copy_to_user(void __user *to, const void *from, long n)
{
return __copy_tofrom_user((__force void *)to, from, n, to);
if (likely(__access_ok((unsigned long)to, n, get_fs())))
n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
return n;
}
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
return __copy_tofrom_user(to, (__force void *)from, n, from);
if (likely(__access_ok((unsigned long)from, n, get_fs())))
n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
else
memset(to, 0, n);
return n;
}
extern void __do_clear_user(void);

View File

@ -83,7 +83,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@ -101,7 +104,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
"3: mov %0, %3\n" \
"3: # return -EFAULT\n" \
" mov %0, %3\n" \
" # zero out dst ptr\n" \
" mov %1, 0\n" \
" mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \

View File

@ -2,6 +2,7 @@
/ {
memory {
device_type = "memory";
reg = <0 0x10000000>;
};

View File

@ -2,7 +2,6 @@
#include <dt-bindings/clock/bcm2835.h>
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
#include "skeleton.dtsi"
/* This include file covers the common peripherals and configuration between
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@ -13,6 +12,8 @@
compatible = "brcm,bcm2835";
model = "BCM2835";
interrupt-parent = <&intc>;
#address-cells = <1>;
#size-cells = <1>;
chosen {
bootargs = "earlyprintk console=ttyAMA0";

View File

@ -550,8 +550,9 @@
interrupt-names = "mmcirq";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0>;
clock-names = "mmc";
clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
clock-names = "mmc", "icn";
clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
bus-width = <8>;
non-removable;
};
@ -565,8 +566,9 @@
interrupt-names = "mmcirq";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sd1>;
clock-names = "mmc";
clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
clock-names = "mmc", "icn";
clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
<&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
resets = <&softreset STIH407_MMC1_SOFTRESET>;
bus-width = <4>;
};

View File

@ -41,7 +41,8 @@
compatible = "st,st-ohci-300x";
reg = <0x9a03c00 0x100>;
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
@ -57,7 +58,8 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
@ -71,7 +73,8 @@
compatible = "st,st-ohci-300x";
reg = <0x9a83c00 0x100>;
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";
@ -87,7 +90,8 @@
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
<&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";

View File

@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
static void locomo_handler(struct irq_desc *desc)
{
struct locomo *lchip = irq_desc_get_chip_data(desc);
struct locomo *lchip = irq_desc_get_handler_data(desc);
int req, i;
/* Acknowledge the parent IRQ */
@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
* Install handler for IRQ_LOCOMO_HW.
*/
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
irq_set_chip_data(lchip->irq, lchip);
irq_set_chained_handler(lchip->irq, locomo_handler);
irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
/* Install handlers for IRQ_LOCOMO_* */
for ( ; irq <= lchip->irq_base + 3; irq++) {

View File

@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
sa1111_writel(0, irqbase + SA1111_INTPOL0);
sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
BIT(IRQ_S1_READY_NINT & 31),
irqbase + SA1111_INTPOL1);
/* clear all IRQs */
@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
if (sachip->irq != NO_IRQ) {
ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret)
goto err_unmap;
goto err_clk;
}
#ifdef CONFIG_ARCH_SA1100
@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
return 0;
err_clk:
clk_disable(sachip->clk);
err_unmap:
iounmap(sachip->base);
err_clk_unprep:
@ -869,9 +871,9 @@ struct sa1111_save_data {
#ifdef CONFIG_PM
static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
static int sa1111_suspend_noirq(struct device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags;
unsigned int val;
@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
* restored by their respective drivers, and must be called
* via LDM after this function.
*/
static int sa1111_resume(struct platform_device *dev)
static int sa1111_resume_noirq(struct device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags, id;
void __iomem *base;
@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
platform_set_drvdata(dev, NULL);
dev_set_drvdata(dev, NULL);
kfree(save);
return 0;
}
@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
}
#else
#define sa1111_suspend NULL
#define sa1111_resume NULL
#define sa1111_suspend_noirq NULL
#define sa1111_resume_noirq NULL
#endif
static int sa1111_probe(struct platform_device *pdev)
@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return irq;
return __sa1111_probe(&pdev->dev, mem, irq);
}
@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
return 0;
}
static struct dev_pm_ops sa1111_pm_ops = {
.suspend_noirq = sa1111_suspend_noirq,
.resume_noirq = sa1111_resume_noirq,
};
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe,
.remove = sa1111_remove,
.suspend = sa1111_suspend,
.resume = sa1111_resume,
.driver = {
.name = "sa1111",
.pm = &sa1111_pm_ops,
},
};

View File

@ -161,6 +161,7 @@ CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_KEYSTONE_USB_PHY=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y

View File

@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
CONFIG_DMA_BCM2835=y
CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_XILINX_VDMA=y
CONFIG_XILINX_DMA=y
CONFIG_DMA_SUN6I=y
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y

View File

@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View File

@ -47,6 +47,7 @@
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/*

View File

@ -62,6 +62,7 @@
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
/*
* + Level 3 descriptor (PTE)

View File

@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
kvm_free_stage2_pgd(kvm);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);

View File

@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
hyp_idmap_start < kern_hyp_va(~0UL)) {
hyp_idmap_start < kern_hyp_va(~0UL) &&
hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
/*
* The idmap page is intersecting with the VA space,
* it is not safe to continue further.
@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
kvm_free_stage2_pgd(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,

View File

@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
return -ENOMEM;
}
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
* later the Exynos PMU platform device won't be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
return 0;
}

View File

@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software
};
static void lubbock_init_pcmcia(void)
{
struct clk *clk;
/* Add an alias for the SA1111 PCMCIA clock */
clk = clk_get_sys("pxa2xx-pcmcia", NULL);
if (!IS_ERR(clk)) {
clkdev_create(clk, NULL, "1800");
clk_put(clk);
}
}
static struct resource sa1111_resources[] = {
[0] = {
.start = 0x10000000,
@ -467,6 +479,8 @@ static void __init lubbock_init(void)
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
lubbock_init_pcmcia();
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
pxa_set_udc_info(&udc_info);
pxa_set_fb_info(NULL, &sharp_lm8v31);

View File

@ -41,40 +41,27 @@
#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
/* start of DA9210 System Control and Event Registers */
#define DA9210_REG_MASK_A 0x54
static void __iomem *irqc;
static const u8 da9063_mask_regs[] = {
DA9063_REG_IRQ_MASK_A,
DA9063_REG_IRQ_MASK_B,
DA9063_REG_IRQ_MASK_C,
DA9063_REG_IRQ_MASK_D,
/* first byte sets the memory pointer, following are consecutive reg values */
static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
static struct i2c_msg da9xxx_msgs[2] = {
{
.addr = 0x58,
.len = ARRAY_SIZE(da9063_irq_clr),
.buf = da9063_irq_clr,
}, {
.addr = 0x68,
.len = ARRAY_SIZE(da9210_irq_clr),
.buf = da9210_irq_clr,
},
};
/* DA9210 System Control and Event Registers */
#define DA9210_REG_MASK_A 0x54
#define DA9210_REG_MASK_B 0x55
static const u8 da9210_mask_regs[] = {
DA9210_REG_MASK_A,
DA9210_REG_MASK_B,
};
static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
unsigned int nregs)
{
unsigned int i;
dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
for (i = 0; i < nregs; i++) {
int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
if (error) {
dev_err(&client->dev, "i2c error %d\n", error);
return;
}
}
}
static int regulator_quirk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
client = to_i2c_client(dev);
dev_dbg(dev, "Detected %s\n", client->name);
if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
da9xxx_mask_irqs(client, da9063_mask_regs,
ARRAY_SIZE(da9063_mask_regs));
else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
da9xxx_mask_irqs(client, da9210_mask_regs,
ARRAY_SIZE(da9210_mask_regs));
if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
(client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
int ret;
dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
if (ret != ARRAY_SIZE(da9xxx_msgs))
dev_err(&client->dev, "i2c error %d\n", ret);
}
mon = ioread32(irqc + IRQC_MONITOR);
if (mon & REGULATOR_IRQ_MASK)

View File

@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
initial_pmd_value = pmd;
pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
pmd &= PMD_SECT_CACHE_MASK;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
if (cache_policies[i].pmd == pmd) {

View File

@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
/* Direct vCPU id mapping for ARM guests. */
per_cpu(xen_vcpu_id, cpu) = cpu;
info.mfn = virt_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup);
@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
{
struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL;
int cpu;
if (!xen_domain())
return 0;
@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
return -ENOMEM;
/* Direct vCPU id mapping for ARM guests. */
per_cpu(xen_vcpu_id, 0) = 0;
for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,

View File

@ -255,10 +255,10 @@
/* Local timer */
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0xf01>,
<1 14 0xf01>,
<1 11 0xf01>,
<1 10 0xf01>;
interrupts = <1 13 0xf08>,
<1 14 0xf08>,
<1 11 0xf08>,
<1 10 0xf08>;
};
timer0: timer0@ffc03000 {

View File

@ -102,13 +102,13 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
};
xtal: xtal-clk {

View File

@ -110,10 +110,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
<1 13 0xff01>, /* Non-secure Phys IRQ */
<1 14 0xff01>, /* Virt IRQ */
<1 15 0xff01>; /* Hyp IRQ */
interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
<1 13 0xff08>, /* Non-secure Phys IRQ */
<1 14 0xff08>, /* Virt IRQ */
<1 15 0xff08>; /* Hyp IRQ */
clock-frequency = <50000000>;
};

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm2835-rpi.dtsi

View File

@ -1,7 +1,7 @@
/dts-v1/;
#include "bcm2837.dtsi"
#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
#include "bcm2835-rpi.dtsi"
#include "bcm283x-rpi-smsc9514.dtsi"
/ {
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";

View File

@ -1,4 +1,4 @@
#include "../../../../arm/boot/dts/bcm283x.dtsi"
#include "bcm283x.dtsi"
/ {
compatible = "brcm,bcm2836";

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi

View File

@ -0,0 +1 @@
../../../../arm/boot/dts/bcm283x.dtsi

View File

@ -88,13 +88,13 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>,
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>,
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>,
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
IRQ_TYPE_EDGE_RISING)>;
IRQ_TYPE_LEVEL_LOW)>;
};
pmu {

View File

@ -354,10 +354,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0xff01>,
<1 14 0xff01>,
<1 11 0xff01>,
<1 10 0xff01>;
interrupts = <1 13 4>,
<1 14 4>,
<1 11 4>,
<1 10 4>;
};
pmu {

View File

@ -473,10 +473,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0xff01>,
<1 14 0xff01>,
<1 11 0xff01>,
<1 10 0xff01>;
interrupts = <1 13 0xff08>,
<1 14 0xff08>,
<1 11 0xff08>,
<1 10 0xff08>;
};
pmu_system_controller: system-controller@105c0000 {

View File

@ -119,10 +119,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0x1>, /* Physical Secure PPI */
<1 14 0x1>, /* Physical Non-Secure PPI */
<1 11 0x1>, /* Virtual PPI */
<1 10 0x1>; /* Hypervisor PPI */
interrupts = <1 13 0xf08>, /* Physical Secure PPI */
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
};
pmu {

View File

@ -191,10 +191,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
<1 14 0x8>, /* Physical Non-Secure PPI, active-low */
<1 11 0x8>, /* Virtual PPI, active-low */
<1 10 0x8>; /* Hypervisor PPI, active-low */
interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
};
pmu {

View File

@ -122,10 +122,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
odmi: odmi@300000 {

View File

@ -129,10 +129,10 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <1 13 0xf01>,
<1 14 0xf01>,
<1 11 0xf01>,
<1 10 0xf01>;
interrupts = <1 13 4>,
<1 14 4>,
<1 11 4>,
<1 10 4>;
};
soc {

View File

@ -65,10 +65,10 @@
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
interrupts = <1 13 0xf01>,
<1 14 0xf01>,
<1 11 0xf01>,
<1 10 0xf01>;
interrupts = <1 13 0xf08>,
<1 14 0xf08>,
<1 11 0xf08>,
<1 10 0xf08>;
};
amba_apu {

View File

@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
if (nbytes) {
if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];

View File

@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n);
extern __kernel_size_t copy_from_user(void *to, const void __user *from,
extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
{
return __copy_user(to, (const void __force *)from, n);
}
static inline __kernel_size_t copy_from_user(void *to,
const void __user *from,
__kernel_size_t n)
{
size_t res = ___copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user

View File

@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);

View File

@ -23,13 +23,13 @@
*/
.text
.align 1
.global copy_from_user
.type copy_from_user, @function
copy_from_user:
.global ___copy_from_user
.type ___copy_from_user, @function
___copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
.size copy_from_user, . - copy_from_user
.size ___copy_from_user, . - ___copy_from_user
.global copy_to_user
.type copy_to_user, @function

View File

@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n);
else
return n;
return 0;
return 0;
}
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check

View File

@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline unsigned long
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __copy_user(to, from, n);
return n;
}
static inline unsigned long
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
return __copy_user_zeroing(to, from, n);
return n;
}
static inline unsigned long
__generic_clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __do_clear_user(to, n);
return n;
}
static inline long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24)
__asm_copy_from_user_24(to, from, ret);
else
ret = __generic_copy_from_user(to, from, n);
ret = __copy_user_zeroing(to, from, n);
return ret;
}
@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
else if (n == 24)
__asm_copy_to_user_24(to, from, ret);
else
ret = __generic_copy_to_user(to, from, n);
ret = __copy_user(to, from, n);
return ret;
}
@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
else if (n == 24)
__asm_clear_24(to, ret);
else
ret = __generic_clear_user(to, n);
ret = __do_clear_user(to, n);
return ret;
}
#define clear_user(to, n) \
(__builtin_constant_p(n) ? \
__constant_clear_user(to, n) : \
__generic_clear_user(to, n))
static inline size_t clear_user(void __user *to, size_t n)
{
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
if (__builtin_constant_p(n))
return __constant_clear_user(to, n);
else
return __do_clear_user(to, n);
}
#define copy_from_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
__generic_copy_from_user(to, from, n))
static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
{
if (unlikely(!access_ok(VERIFY_READ, from, n))) {
memset(to, 0, n);
return n;
}
if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n);
else
return __copy_user_zeroing(to, from, n);
}
#define copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
{
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n);
else
return __copy_user(to, from, n);
}
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.

View File

@ -263,19 +263,25 @@ do { \
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
#define clear_user(dst,count) __memset_user(____force(dst), (count))
#define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
#define __clear_user clear_user
static inline unsigned long __must_check
clear_user(void __user *to, unsigned long n)
{
if (likely(__access_ok(to, n)))
n = __clear_user(to, n);
return n;
}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)

View File

@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
{
long res = __strnlen_user(src, n);
/* return from strnlen can't be zero -- that would be rubbish. */
if (unlikely(!res))
return -EFAULT;
if (res > n) {
copy_from_user(dst, src, n);

View File

@ -269,19 +269,16 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
__cu_len; \
})
#define copy_from_user(to, from, n) \
({ \
void *__cu_to = (to); \
const void __user *__cu_from = (from); \
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
} \
__cu_len; \
})
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
if (likely(__access_ok(from, n, get_fs())))
n = __copy_user((__force void __user *) to, from, n);
else
memset(to, 0, n);
return n;
}
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))

View File

@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
might_fault(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \

View File

@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
memset(to, 0, n);
return n;
}

View File

@ -227,7 +227,7 @@ extern long __user_bad(void);
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
@ -373,10 +373,13 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
#define __copy_to_user(to, from, n) \

View File

@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm-eva.h>
/*
@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len = __invoke_copy_from_user(__cu_to, \
__cu_from, \
__cu_len); \
} else { \
memset(__cu_to, 0, __cu_len); \
} \
} \
__cu_len; \

View File

@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3:\n\t" \
" mov 0,%1\n" \
" mov %3,%0\n" \
" jmp 2b\n" \
" .previous\n" \

View File

@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <asm/uaccess.h>
#include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
return n;
}

View File

@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
static inline long copy_from_user(void *to, const void __user *from,
unsigned long n)
{
if (!access_ok(VERIFY_READ, from, n))
return n;
return __copy_from_user(to, from, n);
unsigned long res = n;
if (access_ok(VERIFY_READ, from, n))
res = __copy_from_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline long copy_to_user(void __user *to, const void *from,
@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
#define __get_user_unknown(val, size, ptr, err) do { \
err = 0; \
if (copy_from_user(&(val), ptr, size)) { \
if (__copy_from_user(&(val), ptr, size)) { \
err = -EFAULT; \
} \
} while (0)
@ -166,7 +169,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
unsigned long __gu_val; \
unsigned long __gu_val = 0; \
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
(x) = (__force __typeof__(x))__gu_val; \
__gu_err; \

View File

@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
unsigned long res = n;
if (access_ok(VERIFY_READ, from, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
return n;
if (likely(access_ok(VERIFY_READ, from, n)))
res = __copy_tofrom_user(to, from, n);
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n))
return __copy_tofrom_user(to, from, n);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
return __copy_tofrom_user(to, from, n - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, to, n)))
n = __copy_tofrom_user(to, from, n);
return n;
}
@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
static inline __must_check unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
if (likely(access_ok(VERIFY_WRITE, addr, size)))
size = __clear_user(addr, size);
return size;
}

View File

@ -10,6 +10,7 @@
#include <asm-generic/uaccess-unaligned.h>
#include <linux/bug.h>
#include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@ -221,7 +222,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
unsigned long n)
{
int sz = __compiletime_object_size(to);
int ret = -EFAULT;
unsigned long ret = n;
if (likely(sz == -1 || sz >= n))
ret = __copy_from_user(to, from, n);
@ -230,6 +231,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
else
__bad_copy_user();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
return ret;
}

View File

@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>
#define NUM_CPU_FTR_KEYS 64
#define NUM_CPU_FTR_KEYS BITS_PER_LONG
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];

View File

@ -308,36 +308,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_READ, from, n)) {
if (likely(access_ok(VERIFY_READ, from, n))) {
check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
memset(to, 0, n);
return n;
}
static inline unsigned long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, n)) {
check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
}
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
return n;
}
@ -434,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
unsigned long over = (unsigned long)addr + size - TASK_SIZE;
return __clear_user(addr, size - over) + over;
}
return size;
}

View File

@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*
* r13 - PACA
* cr3 - gt if waking up with partial/complete hypervisor state loss
* cr4 - eq if waking up from complete hypervisor state loss.
* cr4 - gt or eq if waking up from complete hypervisor state loss.
*/
_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
@ -453,7 +453,7 @@ lwarx_loop2:
* At this stage
* cr2 - eq if first thread to wakeup in core
* cr3- gt if waking up with partial/complete hypervisor state loss
* cr4 - eq if waking up from complete hypervisor state loss.
* cr4 - gt or eq if waking up from complete hypervisor state loss.
*/
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
@ -481,7 +481,7 @@ first_thread_in_subcore:
* If waking up from sleep, subcore state is not lost. Hence
* skip subcore state restore
*/
bne cr4,subcore_state_restored
blt cr4,subcore_state_restored
/* Restore per-subcore state */
ld r4,_SDR1(r1)
@ -526,7 +526,7 @@ timebase_resync:
* If waking up from sleep, per core state is not lost, skip to
* clear_lock.
*/
bne cr4,clear_lock
blt cr4,clear_lock
/*
* First thread in the core to wake up and its waking up with
@ -557,7 +557,7 @@ common_exit:
* If waking up from sleep, hypervisor state is not lost. Hence
* skip hypervisor state restore.
*/
bne cr4,hypervisor_state_restored
blt cr4,hypervisor_state_restored
/* Waking up from winkle */

View File

@ -2217,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
pnv_pci_link_table_and_group(phb->hose->node, num,
tbl, &pe->table_group);
pnv_pci_phb3_tce_invalidate_pe(pe);
pnv_pci_ioda2_tce_invalidate_pe(pe);
return 0;
}
@ -2355,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
if (ret)
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
else
pnv_pci_phb3_tce_invalidate_pe(pe);
pnv_pci_ioda2_tce_invalidate_pe(pe);
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
@ -3426,7 +3426,17 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
}
}
pnv_ioda_free_pe(pe);
/*
* The PE for root bus can be removed because of hotplug in EEH
* recovery for fenced PHB error. We need to mark the PE dead so
* that it can be populated again in PCI hot add path. The PE
* shouldn't be destroyed as it's the global reserved resource.
*/
if (phb->ioda.root_pe_populated &&
phb->ioda.root_pe_idx == pe->pe_number)
phb->ioda.root_pe_populated = false;
else
pnv_ioda_free_pe(pe);
}
static void pnv_pci_release_device(struct pci_dev *pdev)
@ -3442,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return;
/*
* PCI hotplug can happen as part of EEH error recovery. The @pdn
* isn't removed and added afterwards in this scenario. We should
* set the PE number in @pdn to an invalid one. Otherwise, the PE's
* device count is decreased on removing devices while failing to
* be increased on adding devices. It leads to unbalanced PE's device
* count and eventually make normal PCI hotplug path broken.
*/
pe = &phb->ioda.pe_array[pdn->pe_number];
pdn->pe_number = IODA_INVALID_PE;
WARN_ON(--pe->device_count < 0);
if (pe->device_count == 0)
pnv_ioda_release_pe(pe);

View File

@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
unsigned char __x; \
unsigned char __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
unsigned short __x; \
unsigned short __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
unsigned int __x; \
unsigned int __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
unsigned long long __x; \
unsigned long long __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \

View File

@ -2231,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
current->thread.fpu.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
else
memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0;
}
@ -2242,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
/* make sure we have the latest values */
save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
convert_vx_to_fp((freg_t *) fpu->fprs,
(__vector128 *) vcpu->run->s.regs.vrs);
else
memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc;
return 0;
}

View File

@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Validity 0x0044 will be checked by SIE */
if (rc)
goto unpin;
scb_s->gvrd = hpa;
scb_s->riccbd = hpa;
}
return 0;
unpin:

View File

@ -163,7 +163,7 @@ do { \
__get_user_asm(val, "lw", ptr); \
break; \
case 8: \
if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
if (__copy_from_user((void *)&val, ptr, 8) == 0) \
__gu_err = 0; \
else \
__gu_err = -EFAULT; \
@ -188,6 +188,8 @@ do { \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
else \
(x) = 0; \
\
__gu_err; \
})
@ -201,6 +203,7 @@ do { \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"li %1, 0\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
unsigned long res = len;
if (access_ok(VERIFY_READ, from, len))
return __copy_tofrom_user(to, from, len);
if (likely(access_ok(VERIFY_READ, from, len)))
res = __copy_tofrom_user(to, from, len);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
if (unlikely(res))
memset(to + (len - res), 0, res);
return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
if (likely(access_ok(VERIFY_WRITE, to, len)))
len = __copy_tofrom_user(to, from, len);
if (access_ok(VERIFY_WRITE, to, len))
return __copy_tofrom_user(to, from, len);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
}
#define __copy_from_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
static inline unsigned long
__copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long left = __copy_tofrom_user(to, from, len);
if (unlikely(left))
memset(to + (len - left), 0, left);
return left;
}
#define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
static inline unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
{
return __copy_from_user(to, from, len);
return __copy_tofrom_user(to, from, len);
}
#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len)
{
if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len))
return copy_from_user(to, from, len);
return __copy_tofrom_user(to, from, len);
}
/*

View File

@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
__kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_from, __copy_size))
return __copy_user(to, from, __copy_size);
__copy_size = __copy_user(to, from, __copy_size);
if (unlikely(__copy_size))
memset(to + (n - __copy_size), 0, __copy_size);
return __copy_size;
}

View File

@ -24,6 +24,7 @@
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
x = 0; \
switch (size) { \
case 1: \
retval = __get_user_asm_b((void *)&x, \

View File

@ -266,8 +266,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
if (n && __access_ok((unsigned long) from, n)) {
check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
} else
} else {
memset(to, 0, n);
return n;
}
}
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)

View File

@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}
struct exit_boot_struct {
struct boot_params *boot_params;
struct efi_info *efi;
struct setup_data *e820ext;
__u32 e820ext_size;
bool is64;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
static bool first = true;
const char *signature;
__u32 nr_desc;
efi_status_t status;
struct exit_boot_struct *p = priv;
if (first) {
nr_desc = *map->buff_size / *map->desc_size;
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
u32 nr_e820ext = nr_desc -
ARRAY_SIZE(p->boot_params->e820_map);
status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);
if (status != EFI_SUCCESS)
return status;
}
first = false;
}
signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
p->efi->efi_systab = (unsigned long)sys_table_arg;
p->efi->efi_memdesc_size = *map->desc_size;
p->efi->efi_memdesc_version = *map->desc_ver;
p->efi->efi_memmap = (unsigned long)*map->map;
p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
return EFI_SUCCESS;
}
static efi_status_t exit_boot(struct boot_params *boot_params,
void *handle, bool is64)
{
struct efi_info *efi = &boot_params->efi_info;
unsigned long map_sz, key, desc_size;
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
struct setup_data *e820ext;
const char *signature;
__u32 e820ext_size;
__u32 nr_desc, prev_nr_desc;
efi_status_t status;
__u32 desc_version;
bool called_exit = false;
u8 nr_entries;
int i;
struct efi_boot_memmap map;
struct exit_boot_struct priv;
nr_desc = 0;
e820ext = NULL;
e820ext_size = 0;
get_map:
status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
&desc_version, &key);
map.map = &mem_map;
map.map_size = &map_sz;
map.desc_size = &desc_size;
map.desc_ver = &desc_version;
map.key_ptr = &key;
map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
priv.e820ext = NULL;
priv.e820ext_size = 0;
priv.is64 = is64;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status != EFI_SUCCESS)
return status;
prev_nr_desc = nr_desc;
nr_desc = map_sz / desc_size;
if (nr_desc > prev_nr_desc &&
nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
if (status != EFI_SUCCESS)
goto free_mem_map;
efi_call_early(free_pool, mem_map);
goto get_map; /* Allocated memory, get map again */
}
signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
efi->efi_systab = (unsigned long)sys_table;
efi->efi_memdesc_size = desc_size;
efi->efi_memdesc_version = desc_version;
efi->efi_memmap = (unsigned long)mem_map;
efi->efi_memmap_size = map_sz;
#ifdef CONFIG_X86_64
efi->efi_systab_hi = (unsigned long)sys_table >> 32;
efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
#endif
/* Might as well exit boot services now */
status = efi_call_early(exit_boot_services, handle, key);
if (status != EFI_SUCCESS) {
/*
* ExitBootServices() will fail if any of the event
* handlers change the memory map. In which case, we
* must be prepared to retry, but only once so that
* we're guaranteed to exit on repeated failures instead
* of spinning forever.
*/
if (called_exit)
goto free_mem_map;
called_exit = true;
efi_call_early(free_pool, mem_map);
goto get_map;
}
e820ext = priv.e820ext;
e820ext_size = priv.e820ext_size;
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
@ -1085,10 +1093,6 @@ get_map:
return status;
return EFI_SUCCESS;
free_mem_map:
efi_call_early(free_pool, mem_map);
return status;
}
/*

View File

@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */

View File

@ -29,6 +29,8 @@
#define COUNTER_SHIFT 16
static HLIST_HEAD(uncore_unused_list);
struct amd_uncore {
int id;
int refcnt;
@ -39,7 +41,7 @@ struct amd_uncore {
cpumask_t *active_mask;
struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS];
struct amd_uncore *free_when_cpu_online;
struct hlist_node node;
};
static struct amd_uncore * __percpu *amd_uncore_nb;
@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu;
uncore_nb->id = -1;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
}
@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore_l2->active_mask = &amd_l2_active_mask;
uncore_l2->pmu = &amd_l2_pmu;
uncore_l2->id = -1;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
}
@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
continue;
if (this->id == that->id) {
that->free_when_cpu_online = this;
hlist_add_head(&this->node, &uncore_unused_list);
this = that;
break;
}
@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
return 0;
}
static void uncore_clean_online(void)
{
struct amd_uncore *uncore;
struct hlist_node *n;
hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
hlist_del(&uncore->node);
kfree(uncore);
}
}
static void uncore_online(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
kfree(uncore->free_when_cpu_online);
uncore->free_when_cpu_online = NULL;
uncore_clean_online();
if (cpu == uncore->cpu)
cpumask_set_cpu(cpu, uncore->active_mask);

View File

@ -31,7 +31,17 @@
struct bts_ctx {
struct perf_output_handle handle;
struct debug_store ds_back;
int started;
int state;
};
/* BTS context states: */
enum {
/* no ongoing AUX transactions */
BTS_STATE_STOPPED = 0,
/* AUX transaction is on, BTS tracing is disabled */
BTS_STATE_INACTIVE,
/* AUX transaction is on, BTS tracing is running */
BTS_STATE_ACTIVE,
};
static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
static int
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
/*
* Ordering PMU callbacks wrt themselves and the PMI is done by means
* of bts::state, which:
* - is set when bts::handle::event is valid, that is, between
* perf_aux_output_begin() and perf_aux_output_end();
* - is zero otherwise;
* - is ordered against bts::handle::event with a compiler barrier.
*/
static void __bts_event_start(struct perf_event *event)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
/*
* local barrier to make sure that ds configuration made it
* before we enable BTS
* before we enable BTS and bts::state goes ACTIVE
*/
wmb();
/* INACTIVE/STOPPED -> ACTIVE */
WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
intel_pmu_enable_bts(config);
}
@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
__bts_event_start(event);
/* PMI handler: this counter is running and likely generating PMIs */
ACCESS_ONCE(bts->started) = 1;
return;
fail_end_stop:
@ -263,30 +282,34 @@ fail_stop:
event->hw.state = PERF_HES_STOPPED;
}
static void __bts_event_stop(struct perf_event *event)
static void __bts_event_stop(struct perf_event *event, int state)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
WRITE_ONCE(bts->state, state);
/*
* No extra synchronization is mandated by the documentation to have
* BTS data stores globally visible.
*/
intel_pmu_disable_bts();
if (event->hw.state & PERF_HES_STOPPED)
return;
ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
}
static void bts_event_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
struct bts_buffer *buf = perf_get_aux(&bts->handle);
struct bts_buffer *buf = NULL;
int state = READ_ONCE(bts->state);
/* PMI handler: don't restart this counter */
ACCESS_ONCE(bts->started) = 0;
if (state == BTS_STATE_ACTIVE)
__bts_event_stop(event, BTS_STATE_STOPPED);
__bts_event_stop(event);
if (state != BTS_STATE_STOPPED)
buf = perf_get_aux(&bts->handle);
event->hw.state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) {
bts_update(bts);
@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
bts->handle.head =
local_xchg(&buf->data_size,
buf->nr_pages << PAGE_SHIFT);
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0));
}
@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
int state = READ_ONCE(bts->state);
if (bts->handle.event && bts->started)
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
* stay that way. Can't be ACTIVE here though.
*/
if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
return;
if (state == BTS_STATE_STOPPED)
return;
if (bts->handle.event)
__bts_event_start(bts->handle.event);
}
@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
* do nothing for STOPPED or INACTIVE.
*/
if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
return;
if (bts->handle.event)
__bts_event_stop(bts->handle.event);
__bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
}
static int
@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
return 0;
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
if (WARN_ON_ONCE(head != local_read(&buf->head)))
return -EINVAL;
phys = &buf->buf[buf->cur_buf];
space = phys->offset + phys->displacement + phys->size - head;
@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
struct perf_event *event = bts->handle.event;
struct bts_buffer *buf;
s64 old_head;
int err;
int err = -ENOSPC, handled = 0;
if (!event || !bts->started)
return 0;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
*/
if (ds->bts_index >= ds->bts_interrupt_threshold)
handled = 1;
/*
* this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
* so we can only be INACTIVE or STOPPED
*/
if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
return handled;
buf = perf_get_aux(&bts->handle);
if (!buf)
return handled;
/*
* Skip snapshot counters: they don't use the interrupt, but
* there's no other way of telling, because the pointer will
* keep moving
*/
if (!buf || buf->snapshot)
if (buf->snapshot)
return 0;
old_head = local_read(&buf->head);
@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
/* no new data */
if (old_head == local_read(&buf->head))
return 0;
return handled;
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0));
buf = perf_aux_output_begin(&bts->handle, event);
if (!buf)
return 1;
if (buf)
err = bts_buffer_reset(buf, &bts->handle);
err = bts_buffer_reset(buf, &bts->handle);
if (err)
perf_aux_output_end(&bts->handle, 0, false);
if (err) {
WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
if (buf) {
/*
* BTS_STATE_STOPPED should be visible before
* cleared handle::event
*/
barrier();
perf_aux_output_end(&bts->handle, 0, false);
}
}
return 1;
}

View File

@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
* disabled state if called consecutively.
*
* During consecutive calls, the same disable value will be written to related
* registers, so the PMU state remains unchanged. hw.state in
* intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
* calls.
* registers, so the PMU state remains unchanged.
*
* intel_bts events don't coexist with intel PMU's BTS events because of
* x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
* disabled around intel PMU's event batching etc, only inside the PMI handler.
*/
static void __intel_pmu_disable_all(void)
{
@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
else
intel_bts_disable_local();
intel_pmu_pebs_disable_all();
}
@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
return;
intel_pmu_enable_bts(event->hw.config);
} else
intel_bts_enable_local();
}
}
static void intel_pmu_enable_all(int added)
@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/
if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_bts_disable_local();
__intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
handled += intel_bts_interrupt();
@ -2172,6 +2172,7 @@ done:
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
if (cpuc->enabled)
__intel_pmu_enable_all(0, true);
intel_bts_enable_local();
/*
* Only unmask the NMI after the overflow counters

View File

@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
static void init_mbm_sample(u32 rmid, u32 evt_type);
static void __intel_mbm_event_count(void *info);
static bool is_cqm_event(int e)
{
return (e == QOS_L3_OCCUP_EVENT_ID);
}
static bool is_mbm_event(int e)
{
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
(event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
return -EINVAL;
if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
(is_mbm_event(event->attr.config) && !mbm_enabled))
return -EINVAL;
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||

View File

@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct pebs_record_nhm *p = at;
u64 pebs_status;
/* PEBS v3 has accurate status bits */
pebs_status = p->status & cpuc->pebs_enabled;
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
for_each_set_bit(bit, (unsigned long *)&p->status,
MAX_PEBS_EVENTS)
for_each_set_bit(bit, (unsigned long *)&pebs_status,
x86_pmu.max_pebs_events)
counts[bit]++;
continue;
}
pebs_status = p->status & cpuc->pebs_enabled;
pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
/*
* On some CPUs the PEBS status can be zero when PEBS is
* racing with clearing of GLOBAL_STATUS.
@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue;
event = cpuc->events[bit];
WARN_ON_ONCE(!event);
WARN_ON_ONCE(!event->attr.precise_ip);
if (WARN_ON_ONCE(!event))
continue;
if (WARN_ON_ONCE(!event->attr.precise_ip))
continue;
/* log dropped samples number */
if (error[bit])

View File

@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
event->hw.addr_filters = NULL;
}
static inline bool valid_kernel_ip(unsigned long ip)
{
return virt_addr_valid(ip) && kernel_ip(ip);
}
static int pt_event_addr_filters_validate(struct list_head *filters)
{
struct perf_addr_filter *filter;
@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
list_for_each_entry(filter, filters, entry) {
/* PT doesn't support single address triggers */
if (!filter->range)
if (!filter->range || !filter->size)
return -EOPNOTSUPP;
if (!filter->inode && !kernel_ip(filter->offset))
return -EINVAL;
if (!filter->inode) {
if (!valid_kernel_ip(filter->offset))
return -EINVAL;
if (!valid_kernel_ip(filter->offset + filter->size))
return -EINVAL;
}
if (++range > pt_cap_get(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
} else {
/* apply the offset */
msr_a = filter->offset + offs[range];
msr_b = filter->size + msr_a;
msr_b = filter->size + msr_a - 1;
}
filters->filter[range].msr_a = msr_a;

View File

@ -433,7 +433,11 @@ do { \
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
".section .fixup,\"ax\"\n" \
"3:xor"itype" %"rtype"0,%"rtype"0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_EX(1b, 3b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \

View File

@ -2093,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
return -EINVAL;
}
num_processors++;
if (apicid == boot_cpu_physical_apicid) {
/*
* x86_bios_cpu_apicid is required to have processors listed
@ -2116,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
thiscpu, apicid);
disabled_cpus++;
return -ENOSPC;
}
num_processors++;
/*
* Validate version
*/

View File

@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
*/
static u8 *container;
static size_t container_size;
static bool ucode_builtin;
static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
void __init load_ucode_amd_bsp(unsigned int family)
{
struct cpio_data cp;
bool *builtin;
void **data;
size_t *size;
#ifdef CONFIG_X86_32
data = (void **)__pa_nodebug(&ucode_cpio.data);
size = (size_t *)__pa_nodebug(&ucode_cpio.size);
builtin = (bool *)__pa_nodebug(&ucode_builtin);
#else
data = &ucode_cpio.data;
size = &ucode_cpio.size;
builtin = &ucode_builtin;
#endif
if (!load_builtin_amd_microcode(&cp, family))
*builtin = load_builtin_amd_microcode(&cp, family);
if (!*builtin)
cp = find_ucode_in_initrd();
if (!(cp.data && cp.size))
@ -373,7 +378,8 @@ void load_ucode_amd_ap(void)
return;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
if (!ucode_builtin)
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001);
eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
@ -439,7 +445,8 @@ int __init save_microcode_in_initrd_amd(void)
container = cont_va;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
if (!ucode_builtin)
container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);

View File

@ -289,6 +289,7 @@ void __init kvmclock_init(void)
put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC

View File

@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
bool new_val, old_val;
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
union kvm_ioapic_redirect_entry *e;
e = &ioapic->redirtbl[RTC_GSI];
@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
return;
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
old_val = test_bit(vcpu->vcpu_id, dest_map->map);
if (new_val == old_val)
return;
if (new_val) {
__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
__set_bit(vcpu->vcpu_id, dest_map->map);
dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
ioapic->rtc_status.pending_eoi++;
} else {
__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
__clear_bit(vcpu->vcpu_id, dest_map->map);
ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic);
}

View File

@ -23,8 +23,8 @@
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },

View File

@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
kvm_get_lapic_tscdeadline_msr(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
if (kvm_lapic_hv_timer_in_use(vcpu) &&
kvm_x86_ops->set_hv_timer(vcpu,
kvm_get_lapic_tscdeadline_msr(vcpu)))
kvm_lapic_switch_to_sw_timer(vcpu);
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration

View File

@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/*
* Broadwell EP Home Agent BARs erroneously return non-zero values when read.
* Device [8086:2fc0]
* Erratum HSE43
* CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
*
* See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
* entry BDF2.
* Devices [8086:6f60,6fa0,6fc0]
* Erratum BDF2
* PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
*/
static void pci_bdwep_bar(struct pci_dev *dev)
static void pci_invalid_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);

View File

@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
bsize = min(walk->walk_blocksize, n);
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);

View File

@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(req);
return crypto_shash_import(&rctx->desc, in);
desc->tfm = ctx->child;
desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,

View File

@ -1,8 +1,8 @@
/*
* echainiv: Encrypted Chain IV Generator
*
* This generator generates an IV based on a sequence number by xoring it
* with a salt and then encrypting it with the same key as used to encrypt
* This generator generates an IV based on a sequence number by multiplying
* it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
@ -24,81 +24,17 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/string.h>
#define MAX_IV_SIZE 16
static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
/* We don't care if we get preempted and read/write IVs from the next CPU. */
static void echainiv_read_iv(u8 *dst, unsigned size)
{
u32 *a = (u32 *)dst;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
*a++ = this_cpu_read(*b);
b++;
}
}
static void echainiv_write_iv(const u8 *src, unsigned size)
{
const u32 *a = (const u32 *)src;
u32 __percpu *b = echainiv_iv;
for (; size >= 4; size -= 4) {
this_cpu_write(*b, *a);
a++;
b++;
}
}
static void echainiv_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
unsigned int ivsize;
if (err == -EINPROGRESS)
return;
if (err)
goto out;
geniv = crypto_aead_reqtfm(req);
ivsize = crypto_aead_ivsize(geniv);
echainiv_write_iv(subreq->iv, ivsize);
if (req->iv != subreq->iv)
memcpy(req->iv, subreq->iv, ivsize);
out:
if (req->iv != subreq->iv)
kzfree(subreq->iv);
}
static void echainiv_encrypt_complete(struct crypto_async_request *base,
int err)
{
struct aead_request *req = base->data;
echainiv_encrypt_complete2(req, err);
aead_request_complete(req, err);
}
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
__be64 nseqno;
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child);
compl = echainiv_encrypt_complete;
data = req;
info = req->iv;
if (req->src != req->dst) {
@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err;
}
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
info = kmalloc(ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
GFP_ATOMIC);
if (!info)
return -ENOMEM;
memcpy(info, req->iv, ivsize);
}
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
crypto_xor(info, ctx->salt, ivsize);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
echainiv_read_iv(info, ivsize);
memcpy(&nseqno, info + ivsize - 8, 8);
seqno = be64_to_cpu(nseqno);
memset(info, 0, ivsize);
err = crypto_aead_encrypt(subreq);
echainiv_encrypt_complete2(req, err);
return err;
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
do {
u64 a;
memcpy(&a, ctx->salt + ivsize - 8, 8);
a |= 1;
a *= seqno;
memcpy(info + ivsize - 8, &a, 8);
} while ((ivsize -= 8));
return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u32) - 1) ||
inst->alg.ivsize > MAX_IV_SIZE)
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;

View File

@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
trace_rpm_suspend(dev, rpmflags);
trace_rpm_suspend_rcuidle(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;

View File

@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_I2S1] = { 0x2d0, BIT(13) },
[RST_BUS_I2S2] = { 0x2d0, BIT(14) },
[RST_BUS_I2C0] = { 0x2d4, BIT(0) },
[RST_BUS_I2C1] = { 0x2d4, BIT(1) },
[RST_BUS_I2C2] = { 0x2d4, BIT(2) },
[RST_BUS_UART0] = { 0x2d4, BIT(16) },
[RST_BUS_UART1] = { 0x2d4, BIT(17) },
[RST_BUS_UART2] = { 0x2d4, BIT(18) },
[RST_BUS_UART3] = { 0x2d4, BIT(19) },
[RST_BUS_SCR] = { 0x2d4, BIT(20) },
[RST_BUS_I2C0] = { 0x2d8, BIT(0) },
[RST_BUS_I2C1] = { 0x2d8, BIT(1) },
[RST_BUS_I2C2] = { 0x2d8, BIT(2) },
[RST_BUS_UART0] = { 0x2d8, BIT(16) },
[RST_BUS_UART1] = { 0x2d8, BIT(17) },
[RST_BUS_UART2] = { 0x2d8, BIT(18) },
[RST_BUS_UART3] = { 0x2d8, BIT(19) },
[RST_BUS_SCR] = { 0x2d8, BIT(20) },
};
static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {

View File

@ -14,9 +14,9 @@
#include "ccu_gate.h"
#include "ccu_nk.h"
void ccu_nk_find_best(unsigned long parent, unsigned long rate,
unsigned int max_n, unsigned int max_k,
unsigned int *n, unsigned int *k)
static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
unsigned int max_n, unsigned int max_k,
unsigned int *n, unsigned int *k)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;

View File

@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
SUN4I_PLL2_PRE_DIV_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock);
if (!prediv_clk) {
if (IS_ERR(prediv_clk)) {
pr_err("Couldn't register the prediv clock\n");
goto err_free_array;
}
@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
&mult->hw, &clk_multiplier_ops,
&gate->hw, &clk_gate_ops,
CLK_SET_RATE_PARENT);
if (!base_clk) {
if (IS_ERR(base_clk)) {
pr_err("Couldn't register the base multiplier clock\n");
goto err_free_multiplier;
}

View File

@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
return;
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
if (!reg) {
if (IS_ERR(reg)) {
pr_err("Could not get registers for sun8i-mbus-clk\n");
goto err_free_parents;
}

View File

@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
}
if (subnode) {
node = of_get_flat_dt_subnode_by_name(node, subnode);
if (node < 0)
int err = of_get_flat_dt_subnode_by_name(node, subnode);
if (err < 0)
return 0;
node = err;
}
return __find_uefi_params(node, info, dt_params[i].params);

View File

@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
#define EFI_MMAP_NR_SLACK_SLOTS 8
struct file_info {
efi_file_handle_t *handle;
u64 size;
@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
}
}
static inline bool mmap_has_headroom(unsigned long buff_size,
unsigned long map_size,
unsigned long desc_size)
{
unsigned long slack = buff_size - map_size;
return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
}
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
efi_memory_desc_t **map,
unsigned long *map_size,
unsigned long *desc_size,
u32 *desc_ver,
unsigned long *key_ptr)
struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
unsigned long key;
u32 desc_version;
*map_size = sizeof(*m) * 32;
*map->desc_size = sizeof(*m);
*map->map_size = *map->desc_size * 32;
*map->buff_size = *map->map_size;
again:
/*
* Add an additional efi_memory_desc_t because we're doing an
* allocation which may be in a new descriptor region.
*/
*map_size += sizeof(*m);
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
*map_size, (void **)&m);
*map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
*desc_size = 0;
*map->desc_size = 0;
key = 0;
status = efi_call_early(get_memory_map, map_size, m,
&key, desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
status = efi_call_early(get_memory_map, map->map_size, m,
&key, map->desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL ||
!mmap_has_headroom(*map->buff_size, *map->map_size,
*map->desc_size)) {
efi_call_early(free_pool, m);
/*
* Make sure there is some entries of headroom so that the
* buffer can be reused for a new map after allocations are
* no longer permitted. Its unlikely that the map will grow to
* exceed this headroom once we are ready to trigger
* ExitBootServices()
*/
*map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
*map->buff_size = *map->map_size;
goto again;
}
if (status != EFI_SUCCESS)
efi_call_early(free_pool, m);
if (key_ptr && status == EFI_SUCCESS)
*key_ptr = key;
if (desc_ver && status == EFI_SUCCESS)
*desc_ver = desc_version;
if (map->key_ptr && status == EFI_SUCCESS)
*map->key_ptr = key;
if (map->desc_ver && status == EFI_SUCCESS)
*map->desc_ver = desc_version;
fail:
*map = m;
*map->map = m;
return status;
}
@ -113,13 +128,20 @@ fail:
unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{
efi_status_t status;
unsigned long map_size;
unsigned long map_size, buff_size;
unsigned long membase = EFI_ERROR;
struct efi_memory_map map;
efi_memory_desc_t *md;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
&map_size, &map.desc_size, NULL, NULL);
boot_map.map = (efi_memory_desc_t **)&map.map;
boot_map.map_size = &map_size;
boot_map.desc_size = &map.desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
return membase;
@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
unsigned long map_size, desc_size;
unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
u64 max_addr = 0;
int i;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
NULL, NULL);
boot_map.map = &map;
boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr)
{
unsigned long map_size, desc_size;
unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
int i;
struct efi_boot_memmap boot_map;
status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
NULL, NULL);
boot_map.map = &map;
boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
/*
* Handle calling ExitBootServices according to the requirements set out by the
* spec. Obtains the current memory map, and returns that info after calling
* ExitBootServices. The client must specify a function to perform any
* processing of the memory map data prior to ExitBootServices. A client
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
*/
efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
void *handle,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func)
{
efi_status_t status;
status = efi_get_memory_map(sys_table_arg, map);
if (status != EFI_SUCCESS)
goto fail;
status = priv_func(sys_table_arg, map, priv);
if (status != EFI_SUCCESS)
goto free_map;
status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
if (status == EFI_INVALID_PARAMETER) {
/*
* The memory map changed between efi_get_memory_map() and
* exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
* EFI_BOOT_SERVICES.ExitBootServices we need to get the
* updated map, and try again. The spec implies one retry
* should be sufficent, which is confirmed against the EDK2
* implementation. Per the spec, we can only invoke
* get_memory_map() and exit_boot_services() - we cannot alloc
* so efi_get_memory_map() cannot be used, and we must reuse
* the buffer. For all practical purposes, the headroom in the
* buffer should account for any changes in the map so the call
* to get_memory_map() is expected to succeed here.
*/
*map->map_size = *map->buff_size;
status = efi_call_early(get_memory_map,
map->map_size,
*map->map,
map->key_ptr,
map->desc_size,
map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
status = priv_func(sys_table_arg, map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
}
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
goto fail;
return EFI_SUCCESS;
free_map:
efi_call_early(free_pool, *map->map);
fail:
return status;
}

View File

@ -152,6 +152,27 @@ fdt_set_fail:
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
struct exit_boot_struct *p = priv;
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight to SetVirtualAddressMap()
*/
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
p->runtime_map, p->runtime_entry_count);
return EFI_SUCCESS;
}
/*
* Allocate memory for a new FDT, then add EFI, commandline, and
* initrd related fields to the FDT. This routine increases the
@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size)
{
unsigned long map_size, desc_size;
unsigned long map_size, desc_size, buff_size;
u32 desc_ver;
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
int runtime_entry_count = 0;
struct efi_boot_memmap map;
struct exit_boot_struct priv;
map.map = &runtime_map;
map.map_size = &map_size;
map.desc_size = &desc_size;
map.desc_ver = &desc_ver;
map.key_ptr = &mmap_key;
map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
&desc_size, &desc_ver, &mmap_key);
status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status;
@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
/*
* Estimate size of new FDT, and allocate memory for it. We
* will allocate a bigger buffer if this ends up being too
@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* we can get the memory map key needed for
* exit_boot_services().
*/
status = efi_get_memory_map(sys_table, &memory_map, &map_size,
&desc_size, &desc_ver, &mmap_key);
status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight into SetVirtualAddressMap()
*/
efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
&runtime_entry_count);
/* Now we are ready to exit_boot_services.*/
status = sys_table->boottime->exit_boot_services(handle, mmap_key);
sys_table->boottime->free_pool(memory_map);
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;

View File

@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long random_seed)
{
unsigned long map_size, desc_size, total_slots = 0, target_slot;
unsigned long buff_size;
efi_status_t status;
efi_memory_desc_t *memory_map;
int map_offset;
struct efi_boot_memmap map;
status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
&desc_size, NULL, NULL);
map.map = &memory_map;
map.map_size = &map_size;
map.desc_size = &desc_size;
map.desc_ver = NULL;
map.key_ptr = NULL;
map.buff_size = &buff_size;
status = efi_get_memory_map(sys_table_arg, &map);
if (status != EFI_SUCCESS)
return status;

View File

@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
{
struct atmel_hlcdc_crtc_state *state;

View File

@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
if (state->crtc_w < state->src_w)
if (state->crtc_h < state->src_h)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
factor = ((8 * 256 * state->src_w) - (256 * 4)) /
state->crtc_w;
factor = ((8 * 256 * state->src_h) - (256 * 4)) /
state->crtc_h;
factor++;
max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
2048;
if (max_memsize > state->src_w)
if (max_memsize > state->src_h)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}

Some files were not shown because too many files have changed in this diff Show More