This is the 5.4.133 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmD1IXQACgkQONu9yGCS
 aT50sBAAtueT2WsCuD1psEN219cK0e7AuLrHXtAnkHFJIPWnzS0vyBy7/hmFwWtq
 sBSenqG8qufqtLVnMkeEAhu2/sk/5NHRPQJtK0k4hCzt8FQuiVQco1raOtetIJx+
 +wBfE4FAGDmiYGkcuzh61n1euvpBetXd9HFfWtSfQq7Q3nN+sfv0q1V9ZK0MUJ8v
 ipvSY1hTSDEQQJ7cU48DDJtZUGNxrfEFzi4CLI2YVphzoHEbowd5nxtHUL5cwDhx
 3sHYJoN+5RAkRinzGyviDlRpodNUUkLusBzs54xNIzgdzkckEKniKotZ2lUGsEu+
 QQgj0paNB95GLkY/Rgn6AL03AQdYBgGIjHQkSaYJ+UM9TlacqgFMiGugn28bj0o/
 1F4s6zCWG5tuhM5zNcnTsJmwSPA3eZ0uI6NCkjKC/RAyD5SC6JQqcf5zYCzygdT/
 PpeFRcZGoxyQqmOjW2e+tpNAbHuIeayExx/6/3rw3b/xaR9Ju9mYxNDiIpYZwdc6
 FIWOsHG+bEEZANiWv6Ju7DfOTKg8F7mbm4Zrd00euIWEsxuUZO/lAzxPR8pPzsn7
 2k46PDrhah25Y/tbSE5hdKrLqSorSjIg+7CxLAk7LWPmq13zzEd8y+e/Bk5rFJ4T
 7vPLUb23OYFdrVMOXd1UyKhcP4CKyOf7IvG4SsZwj9WfWoNEDNg=
 =2WCO
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmD25jEACgkQ7G51OISz
 Hs1QgxAAl7FiNoI3XJ2GbS0bQAxYXeUtcQ6lJmHRD/6IEikDr12bQ9XMvF1k4o+P
 15MzTOndSRSW0Yxi5QXRTGcBuqbC+HhnUVHggJpUJugUt9Q5TtO6ZxVX0dkbMhp3
 AXK55Zm465y4dX6ys59IWL138xMKd0pBIfchlb3oSaiQ9qBFmKDXMEYDGOxDC3e9
 VR1EO5PfpzEgrGONO+Xxu+2IDWRiWfKGyIaZCIWsRqlsrjdOFMKbTL2iBwMgrqmi
 D3kndN6kGvxqoHCe3P9chKqNfU+P3durBNomhhXyBZBRNT2XW19UVpk3VIhqa5Dw
 7DA6zHihFuZlI9XEZKAr4cokxS9IRFWZBayYE4diMu4+BA45mKIS+1BRyPDozgRG
 cyp6QaGI8IEzdI1oa6WW/CR/zkhQKyIj/lhwlx98XJlkGoDtfSgGMx6QdmtH8Pfk
 Gmgg3aHV/AQMRasSfKPDLGWD0f3nVzneHh9ceK9/j8gjY+T6msVYw/p4kIhXZQCE
 cZplDehOsFtJubB5lXxe1PZzHedM5p0mrYousngjVhHjbe/5h243fj3gBsTJK3zZ
 XP74VPZyqwBtEUrMEL2nPlsQSDfeZRnNSCiXNq1vJg+skqJTMicv2TUP0134ME8K
 yUjmJxd46diY/bOPBL737MuWMlpm9Bpg31qRe92jSHovKrbZVTk=
 =rRdV
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.133' into 5.4-2.3.x-imx

This is the 5.4.133 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-07-20 15:05:19 +00:00
commit 0615afea9f
143 changed files with 898 additions and 441 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 132 SUBLEVEL = 133
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -5,6 +5,7 @@
* Very small subset of simple string routines * Very small subset of simple string routines
*/ */
#include <linux/compiler_attributes.h>
#include <linux/types.h> #include <linux/types.h>
void *memcpy(void *dest, const void *src, size_t n) void *memcpy(void *dest, const void *src, size_t n)
@ -27,3 +28,19 @@ void *memset(void *s, int c, size_t n)
ss[i] = c; ss[i] = c;
return s; return s;
} }
void * __weak memmove(void *dest, const void *src, size_t n)
{
unsigned int i;
const char *s = src;
char *d = dest;
if ((uintptr_t)dest < (uintptr_t)src) {
for (i = 0; i < n; i++)
d[i] = s[i];
} else {
for (i = n; i > 0; i--)
d[i - 1] = s[i - 1];
}
return dest;
}

View File

@ -53,7 +53,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); /*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
} }
#define __HAVE_ARCH_HUGE_PTE_NONE #define __HAVE_ARCH_HUGE_PTE_NONE

View File

@ -2007,7 +2007,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \ ({ int __res; \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \ ".set\tpush\n\t" \
".set\tmips32r2\n\t" \ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \ _ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \ "mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \ ".set\tpop" \
@ -2020,7 +2020,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \ ({ unsigned long long __res; \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \ ".set\tpush\n\t" \
".set\tmips64r2\n\t" \ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \ _ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \ "dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \ ".set\tpop" \
@ -2033,7 +2033,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \ ".set\tpush\n\t" \
".set\tmips32r2\n\t" \ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \ _ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \ "mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \ ".set\tpop" \
@ -2045,7 +2045,7 @@ do { \
do { \ do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tpush\n\t" \ ".set\tpush\n\t" \
".set\tmips64r2\n\t" \ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \ _ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \ "dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \ ".set\tpop" \

View File

@ -62,11 +62,15 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd; pmd_t *pmd = NULL;
struct page *pg;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER); pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
if (pmd) if (pg) {
pgtable_pmd_page_ctor(pg);
pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
}
return pmd; return pmd;
} }

View File

@ -200,6 +200,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000), memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20); 32 << 20);
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
} }
} }

View File

@ -44,6 +44,8 @@
# define SMPWMB eieio # define SMPWMB eieio
#endif #endif
/* clang defines this macro for a builtin, which will not work with runtime patching */
#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync() #define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")

View File

@ -204,9 +204,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
{ {
int is_exec = TRAP(regs) == 0x400; int is_exec = TRAP(regs) == 0x400;
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ if (is_exec) {
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
DSISR_PROTFAULT))) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n", pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user", address >= TASK_SIZE ? "exec-protected" : "user",
address, address,

View File

@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
return; return;
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
has_sleeper = !wq_has_single_sleeper(&rqw->wait); TASK_UNINTERRUPTIBLE);
do { do {
/* The memory barrier in set_task_state saves us here. */ /* The memory barrier in set_task_state saves us here. */
if (data.got_token) if (data.got_token)

View File

@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
} }
static const struct ata_port_info ahci_sunxi_port_info = { static const struct ata_port_info ahci_sunxi_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4, .pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6, .udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops, .port_ops = &ahci_platform_ops,

View File

@ -3295,7 +3295,7 @@ static void __exit ia_module_exit(void)
{ {
pci_unregister_driver(&ia_driver); pci_unregister_driver(&ia_driver);
del_timer(&ia_timer); del_timer_sync(&ia_timer);
} }
module_init(ia_module_init); module_init(ia_module_init);

View File

@ -297,7 +297,7 @@ static void __exit nicstar_cleanup(void)
{ {
XPRINTK("nicstar: nicstar_cleanup() called.\n"); XPRINTK("nicstar: nicstar_cleanup() called.\n");
del_timer(&ns_timer); del_timer_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver); pci_unregister_driver(&nicstar_driver);
@ -525,6 +525,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM); writel(0x00000000, card->membase + VPM);
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Initialize TSQ */ /* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
@ -751,15 +760,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->efbie = 1; card->efbie = 1;
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Register device */ /* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL); -1, NULL);
@ -837,10 +837,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb); dev_kfree_skb_any(hb);
} }
if (error >= 12) { if (error >= 12) {
kfree(card->rsq.org); dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
} }
if (error >= 11) { if (error >= 11) {
kfree(card->tsq.org); dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
} }
if (error >= 10) { if (error >= 10) {
free_irq(card->pcidev->irq, card); free_irq(card->pcidev->irq, card);

View File

@ -2701,11 +2701,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_wmt_hdr *hdr; struct btmtk_wmt_hdr *hdr;
int err; int err;
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* Send the WMT command and wait until the WMT event returns */ /* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen; hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255) if (hlen > 255)
@ -2727,6 +2722,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
return err; return err;
} }
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* The vendor specific WMT commands are all answered by a vendor /* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command * specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control. * Complete as with usual HCI command flow control.
@ -3264,6 +3264,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
sent += size; sent += size;
count -= size; count -= size;
/* ep2 need time to switch from function acl to function dfu,
* so we add 20ms delay here.
*/
msleep(20);
while (count) { while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN); size = min_t(size_t, count, QCA_DFU_PACKET_LEN);

View File

@ -366,16 +366,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0; data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
if ((ipmi_version_major > 1) if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
|| ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { if ((ipmi_version_major > 1) ||
/* This is an IPMI 1.5-only feature. */ ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
data[0] |= WDOG_DONT_STOP_ON_SET; /* This is an IPMI 1.5-only feature. */
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { data[0] |= WDOG_DONT_STOP_ON_SET;
/* } else {
* In ipmi 1.0, setting the timer stops the watchdog, we /*
* need to start it back up again. * In ipmi 1.0, setting the timer stops the watchdog, we
*/ * need to start it back up again.
hbnow = 1; */
hbnow = 1;
}
} }
data[1] = 0; data[1] = 0;

View File

@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
/* Core Clock Outputs */ /* Core Clock Outputs */
DEF_FIXED("za2", R8A77995_CLK_ZA2, CLK_PLL0D3, 2, 1),
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1), DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1), DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1), DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),

View File

@ -1089,7 +1089,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
if (pll->lock) if (pll->lock)
spin_lock_irqsave(pll->lock, flags); spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw); if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll); ret = clk_pll_wait_for_lock(pll);
if (ret < 0) if (ret < 0)
@ -1706,15 +1707,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL; return -EINVAL;
} }
if (clk_pll_is_enabled(hw))
return 0;
input_rate = clk_hw_get_rate(__clk_get_hw(osc)); input_rate = clk_hw_get_rate(__clk_get_hw(osc));
if (pll->lock) if (pll->lock)
spin_lock_irqsave(pll->lock, flags); spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw); if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll); ret = clk_pll_wait_for_lock(pll);
if (ret < 0) if (ret < 0)

View File

@ -348,7 +348,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \ do { \
_val = read_sysreg(reg); \ _val = read_sysreg(reg); \
_retries--; \ _retries--; \
} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \ } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\ \
WARN_ON_ONCE(!_retries); \ WARN_ON_ONCE(!_retries); \
_val; \ _val; \

View File

@ -40,6 +40,10 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644); module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
static bool psp_dead; static bool psp_dead;
static int psp_timeout; static int psp_timeout;

View File

@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent); struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap; struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data; struct mrfld_extcon_data *data;
unsigned int status;
unsigned int id; unsigned int id;
int irq, ret; int irq, ret;
@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
/* Get initial state */ /* Get initial state */
mrfld_extcon_role_detect(data); mrfld_extcon_role_detect(data);
/*
* Cached status value is used for cable detection, see comments
* in mrfld_extcon_cable_detect(), we need to sync cached value
* with a real state of the hardware.
*/
regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
data->status = status;
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR); mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL); mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);

View File

@ -296,15 +296,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
return 0; return 0;
} }
static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf) static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
char *buf)
{ {
return sprintf(buf, "%u\n", fw_cfg_rev); return sprintf(buf, "%u\n", fw_cfg_rev);
} }
static const struct { static const struct kobj_attribute fw_cfg_rev_attr = {
struct attribute attr;
ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
} fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR }, .attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev, .show = fw_cfg_showrev,
}; };

View File

@ -476,6 +476,7 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv; struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr); fpga_mgr_unregister(mgr);
fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan); stratix10_svc_free_channel(priv->chan);
return 0; return 0;

View File

@ -55,12 +55,6 @@ static struct {
spinlock_t mem_limit_lock; spinlock_t mem_limit_lock;
} kfd_mem_limit; } kfd_mem_limit;
/* Struct used for amdgpu_amdkfd_bo_validate */
struct amdgpu_vm_parser {
uint32_t domain;
bool wait;
};
static const char * const domain_bit_to_string[] = { static const char * const domain_bit_to_string[] = {
"CPU", "CPU",
"GTT", "GTT",
@ -293,11 +287,9 @@ validate_fail:
return ret; return ret;
} }
static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{ {
struct amdgpu_vm_parser *p = param; return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
} }
/* vm_validate_pt_pd_bos - Validate page table and directory BOs /* vm_validate_pt_pd_bos - Validate page table and directory BOs
@ -311,20 +303,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{ {
struct amdgpu_bo *pd = vm->root.base.bo; struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
struct amdgpu_vm_parser param;
int ret; int ret;
param.domain = AMDGPU_GEM_DOMAIN_VRAM; ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
param.wait = false;
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
if (ret) { if (ret) {
pr_err("amdgpu: failed to validate PT BOs\n"); pr_err("amdgpu: failed to validate PT BOs\n");
return ret; return ret;
} }
ret = amdgpu_amdkfd_validate(&param, pd); ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) { if (ret) {
pr_err("amdgpu: failed to validate PD\n"); pr_err("amdgpu: failed to validate PD\n");
return ret; return ret;

View File

@ -2291,7 +2291,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH, AMD_IP_BLOCK_TYPE_IH,
}; };
for (i = 0; i < ARRAY_SIZE(ip_order); i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
int j; int j;
struct amdgpu_ip_block *block; struct amdgpu_ip_block *block;

View File

@ -1584,7 +1584,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd) struct qcm_process_device *qpd)
{ {
int retval; int retval;
struct queue *q, *next; struct queue *q;
struct kernel_queue *kq, *kq_next; struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn; struct device_process_node *cur, *next_dpn;
@ -1639,6 +1639,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false; qpd->reset_wavefronts = false;
} }
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
while (!list_empty(&qpd->queues_list)) {
q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_lock(dqm);
}
dqm_unlock(dqm); dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do /* Outside the DQM lock because under the DQM lock we can't do
@ -1647,17 +1660,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (found) if (found)
kfd_dec_compute_active(dqm->dev); kfd_dec_compute_active(dqm->dev);
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
return retval; return retval;
} }

View File

@ -2632,6 +2632,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16; scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16; scaling_info->src_rect.y = state->src_y >> 16;
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
* system hang. To avoid hangs (and maybe be overly cautious)
* let's reject both non-zero src_x and src_y.
*
* We currently know of only one use-case to reproduce a
* scenario with non-zero src_x and src_y for NV12, which
* is to gesture the YouTube Android app into full screen
* on ChromeOS.
*/
if (state->fb &&
state->fb->format->format == DRM_FORMAT_NV12 &&
(scaling_info->src_rect.x != 0 ||
scaling_info->src_rect.y != 0))
return -EINVAL;
/* /*
* For reasons we don't (yet) fully understand a non-zero * For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a * src_y coordinate into an NV12 buffer can cause a
@ -6832,7 +6849,8 @@ skip_modeset:
BUG_ON(dm_new_crtc_state->stream == NULL); BUG_ON(dm_new_crtc_state->stream == NULL);
/* Scaling or underscan settings */ /* Scaling or underscan settings */
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
drm_atomic_crtc_needs_modeset(new_crtc_state))
update_stream_scaling_settings( update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
@ -7406,6 +7424,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue; continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
if (ret)
goto fail;
if (!new_crtc_state->enable) if (!new_crtc_state->enable)
continue; continue;

View File

@ -387,6 +387,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void); void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state); struct dc_plane_state *dc_plane_state);

View File

@ -277,6 +277,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM; return res ? 0 : -ENOMEM;
} }
/**
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
* the expected size.
* Returns 0 on success.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
const struct drm_color_lut *lut = NULL;
uint32_t size = 0;
lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Degamma LUT size. Should be %u but got %u.\n",
MAX_COLOR_LUT_ENTRIES, size);
return -EINVAL;
}
lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES &&
size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
size);
return -EINVAL;
}
return 0;
}
/** /**
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
* @crtc: amdgpu_dm crtc state * @crtc: amdgpu_dm crtc state
@ -311,14 +342,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
bool is_legacy; bool is_legacy;
int r; int r;
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size); r = amdgpu_dm_verify_lut_sizes(&crtc->base);
if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES) if (r)
return -EINVAL; return r;
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size); regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
return -EINVAL;
has_degamma = has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size); degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);

View File

@ -1284,6 +1284,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type = dc_connection_single; link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0]; link->local_sink = link->remote_sinks[0];
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT; link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
dc_sink_retain(link->local_sink);
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
} else if (mst_enable == true && } else if (mst_enable == true &&
link->type == dc_connection_single && link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) { link->remote_sinks[0] != NULL) {

View File

@ -484,10 +484,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
int vtaps_c = scl_data->taps.v_taps_c; int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
if (dpp->base.ctx->dc->debug.use_max_lb) if (dpp->base.ctx->dc->debug.use_max_lb) {
return mem_cfg; if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10)
return LB_MEMORY_CONFIG_3;
return LB_MEMORY_CONFIG_0;
}
dpp->base.caps->dscl_calc_lb_num_partitions( dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);

View File

@ -126,7 +126,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
/* This value is dependent on the hardware pipeline delay so set once per SOC */ /* This value is dependent on the hardware pipeline delay so set once per SOC */
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
} }
void dcn20_display_init(struct dc *dc) void dcn20_display_init(struct dc *dc)
{ {

View File

@ -163,7 +163,7 @@ enum irq_type
}; };
#define DAL_VALID_IRQ_SRC_NUM(src) \ #define DAL_VALID_IRQ_SRC_NUM(src) \
((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID) ((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
/* Number of Page Flip IRQ Sources. */ /* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \ #define DAL_PFLIP_IRQ_SRC_NUM \

View File

@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/ */
typedef enum ENUM_NUM_SIMD_PER_CU { typedef enum ENUM_NUM_SIMD_PER_CU {
NUM_SIMD_PER_CU = 0x00000004, NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU; } ENUM_NUM_SIMD_PER_CU;
/* /*

View File

@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_disable = malidp_de_plane_disable, .atomic_disable = malidp_de_plane_disable,
}; };
static const uint64_t linear_only_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
int malidp_de_planes_init(struct drm_device *drm) int malidp_de_planes_init(struct drm_device *drm)
{ {
struct malidp_drm *malidp = drm->dev_private; struct malidp_drm *malidp = drm->dev_private;
@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
*/ */
ret = drm_universal_plane_init(drm, &plane->base, crtcs, ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats, n, &malidp_de_plane_funcs, formats, n,
(id == DE_SMART) ? NULL : modifiers, plane_type, (id == DE_SMART) ? linear_only_modifiers : modifiers,
NULL); plane_type, NULL);
if (ret < 0) if (ret < 0)
goto cleanup; goto cleanup;

View File

@ -1026,7 +1026,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet; struct mipi_dsi_packet packet;
int ret, i, tx_len, rx_len; int ret, i, tx_len, rx_len;
ret = pm_runtime_get_sync(host->dev); ret = pm_runtime_resume_and_get(host->dev);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@ -240,7 +240,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
drm_connector_list_iter_end(&conn_iter); drm_connector_list_iter_end(&conn_iter);
} }
ret = pm_runtime_get_sync(crtc->dev->dev); ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) { if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret); DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret; return ret;

View File

@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1) if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
dev->mode_config.allow_fb_modifiers = true;
out: out:
pm_runtime_put_sync(dev->dev); pm_runtime_put_sync(dev->dev);

View File

@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
return mdp4_plane->pipe; return mdp4_plane->pipe;
} }
static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* initialize plane */ /* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev, struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane) enum mdp4_pipe pipe_id, bool private_plane)
@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats, mdp4_plane->formats, mdp4_plane->nformats,
NULL, type, NULL); supported_format_modifiers, type, NULL);
if (ret) if (ret)
goto fail; goto fail;

View File

@ -10,7 +10,6 @@ config DRM_MXSFB
depends on COMMON_CLK depends on COMMON_CLK
select DRM_MXS select DRM_MXS
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER select DRM_KMS_CMA_HELPER
select DRM_PANEL select DRM_PANEL
help help

View File

@ -1333,6 +1333,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) { if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@ -231,7 +231,6 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd; struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata; const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata; struct dw_mipi_dsi_plat_data pdata;
int devcnt;
}; };
struct dphy_pll_parameter_map { struct dphy_pll_parameter_map {
@ -1001,9 +1000,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{ {
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev); struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
if (dsi->devcnt == 0)
component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
dw_mipi_dsi_remove(dsi->dmd); dw_mipi_dsi_remove(dsi->dmd);
return 0; return 0;

View File

@ -235,11 +235,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{ {
struct drm_sched_job *job; struct drm_sched_job *job;
struct dma_fence *f;
int r; int r;
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence; struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
while ((f = job->sched->ops->dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence); drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH); dma_fence_set_error(&s_fence->finished, -ESRCH);

View File

@ -919,6 +919,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_disable = tegra_cursor_atomic_disable, .atomic_disable = tegra_cursor_atomic_disable,
}; };
static const uint64_t linear_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc) struct tegra_dc *dc)
{ {
@ -947,7 +952,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats, &tegra_plane_funcs, formats,
num_formats, NULL, num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL); DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) { if (err < 0) {
kfree(plane); kfree(plane);
@ -1065,7 +1070,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats, &tegra_plane_funcs, formats,
num_formats, NULL, type, NULL); num_formats, linear_modifiers,
type, NULL);
if (err < 0) { if (err < 0) {
kfree(plane); kfree(plane);
return ERR_PTR(err); return ERR_PTR(err);

View File

@ -122,8 +122,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096; drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096; drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true; drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs; drm->mode_config.funcs = &tegra_drm_mode_config_funcs;

View File

@ -750,7 +750,7 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state); void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state, void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *right, unsigned int *left, unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom); unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */ /* vc4_debugfs.c */

View File

@ -218,6 +218,7 @@ err_ttm:
err_vbufs: err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev); vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs: err_vqs:
dev->dev_private = NULL;
kfree(vgdev); kfree(vgdev);
return ret; return ret;
} }

View File

@ -3,7 +3,6 @@ config DRM_ZTE
tristate "DRM Support for ZTE SoCs" tristate "DRM Support for ZTE SoCs"
depends on DRM && ARCH_ZX depends on DRM && ARCH_ZX
select DRM_KMS_CMA_HELPER select DRM_KMS_CMA_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select SND_SOC_HDMI_CODEC if SND_SOC select SND_SOC_HDMI_CODEC if SND_SOC
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS

View File

@ -528,7 +528,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset; buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
if (lost && *barrier) { if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
*buf_ptr = *barrier; *buf_ptr = *barrier;
barrier++; barrier++;
} }

View File

@ -2719,7 +2719,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
cma_init_resolve_route_work(work, id_priv); cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec)
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) { if (!route->path_rec) {
ret = -ENOMEM; ret = -ENOMEM;
goto err1; goto err1;

View File

@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n", pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
ret = -EINVAL;
goto free_dma; goto free_dma;
} }

View File

@ -173,7 +173,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n", pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem)); (int)PTR_ERR(umem));
err = -EINVAL; err = PTR_ERR(umem);
goto err1; goto err1;
} }

View File

@ -596,8 +596,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
out_err_bus_register: out_err_bus_register:
tpci200_uninstall(tpci200); tpci200_uninstall(tpci200);
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
tpci200->info->cfg_regs = NULL;
out_err_install: out_err_install:
iounmap(tpci200->info->cfg_regs); if (tpci200->info->cfg_regs)
iounmap(tpci200->info->cfg_regs);
out_err_ioremap: out_err_ioremap:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR); pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request: out_err_pci_request:

View File

@ -2341,7 +2341,7 @@ static void __exit
HFC_cleanup(void) HFC_cleanup(void)
{ {
if (timer_pending(&hfc_tl)) if (timer_pending(&hfc_tl))
del_timer(&hfc_tl); del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver); pci_unregister_driver(&hfc_driver);
} }

View File

@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
delete_at(n, index); delete_at(n, index);
} }
*new_root = shadow_root(&spine); if (!r)
*new_root = shadow_root(&spine);
exit_shadow_spine(&spine); exit_shadow_spine(&spine);
return r; return r;

View File

@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll. * Any block we allocate has to be free in both the old and current ll.
*/ */
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r == -ENOSPC) {
/*
* There's no free block between smd->begin and the end of the metadata device.
* We search before smd->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
}
if (r) if (r)
return r; return r;
@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
return r; return r;
memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll)); memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
smd->begin = 0;
smd->nr_allocated_this_transaction = 0; smd->nr_allocated_this_transaction = 0;
r = sm_disk_get_nr_free(sm, &nr_free); r = sm_disk_get_nr_free(sm, &nr_free);

View File

@ -452,6 +452,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll. * Any block we allocate has to be free in both the old and current ll.
*/ */
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r == -ENOSPC) {
/*
* There's no free block between smm->begin and the end of the metadata device.
* We search before smm->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
}
if (r) if (r)
return r; return r;
@ -503,7 +511,6 @@ static int sm_metadata_commit(struct dm_space_map *sm)
return r; return r;
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
smm->begin = 0;
smm->allocated_this_transaction = 0; smm->allocated_this_transaction = 0;
return 0; return 0;

View File

@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)
/* ---------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */
static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{ {
struct saa6588 *s = to_saa6588(sd); struct saa6588 *s = to_saa6588(sd);
struct saa6588_command *a = arg; struct saa6588_command *a = arg;
@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
/* ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops saa6588_core_ops = { static const struct v4l2_subdev_core_ops saa6588_core_ops = {
.ioctl = saa6588_ioctl, .command = saa6588_command,
}; };
static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = { static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {

View File

@ -3187,7 +3187,7 @@ static int radio_release(struct file *file)
btv->radio_user--; btv->radio_user--;
bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd); bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
if (btv->radio_user == 0) if (btv->radio_user == 0)
btv->has_radio_tuner = 0; btv->has_radio_tuner = 0;
@ -3268,7 +3268,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV; cmd.result = -ENODEV;
radio_enable(btv); radio_enable(btv);
bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd); bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);
return cmd.result; return cmd.result;
} }
@ -3289,7 +3289,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.instance = file; cmd.instance = file;
cmd.event_list = wait; cmd.event_list = wait;
cmd.poll_mask = res; cmd.poll_mask = res;
bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd); bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
return cmd.poll_mask; return cmd.poll_mask;
} }

View File

@ -1179,7 +1179,7 @@ static int video_release(struct file *file)
saa_call_all(dev, tuner, standby); saa_call_all(dev, tuner, standby);
if (vdev->vfl_type == VFL_TYPE_RADIO) if (vdev->vfl_type == VFL_TYPE_RADIO)
saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd); saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
return 0; return 0;
@ -1198,7 +1198,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV; cmd.result = -ENODEV;
mutex_lock(&dev->lock); mutex_lock(&dev->lock);
saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd); saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
return cmd.result; return cmd.result;
@ -1214,7 +1214,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.event_list = wait; cmd.event_list = wait;
cmd.poll_mask = 0; cmd.poll_mask = 0;
mutex_lock(&dev->lock); mutex_lock(&dev->lock);
saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd); saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
return rc | cmd.poll_mask; return rc | cmd.poll_mask;

View File

@ -48,7 +48,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)
ret = v4l2_subdev_call(vpbe_dev->venc, ret = v4l2_subdev_call(vpbe_dev->venc,
core, core,
ioctl, command,
VENC_GET_FLD, VENC_GET_FLD,
&val); &val);
if (ret < 0) { if (ret < 0) {

View File

@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
return ret; return ret;
} }
static long venc_ioctl(struct v4l2_subdev *sd, static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
unsigned int cmd,
void *arg)
{ {
u32 val; u32 val;
@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
} }
static const struct v4l2_subdev_core_ops venc_core_ops = { static const struct v4l2_subdev_core_ops venc_core_ops = {
.ioctl = venc_ioctl, .command = venc_command,
}; };
static const struct v4l2_subdev_video_ops venc_video_ops = { static const struct v4l2_subdev_video_ops venc_video_ops = {

View File

@ -329,7 +329,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
} }
if (attr->query.prog_cnt != 0 && prog_ids && cnt) if (attr->query.prog_cnt != 0 && prog_ids && cnt)
ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt); ret = bpf_prog_array_copy_to_user(progs, prog_ids,
attr->query.prog_cnt);
unlock: unlock:
mutex_unlock(&ir_raw_handler_lock); mutex_unlock(&ir_raw_handler_lock);

View File

@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{ {
struct dtv5100_state *st = d->priv; struct dtv5100_state *st = d->priv;
unsigned int pipe;
u8 request; u8 request;
u8 type; u8 type;
u16 value; u16 value;
@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
switch (wlen) { switch (wlen) {
case 1: case 1:
/* write { reg }, read { value } */ /* write { reg }, read { value } */
pipe = usb_rcvctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ : request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
DTV5100_TUNER_READ); DTV5100_TUNER_READ);
type = USB_TYPE_VENDOR | USB_DIR_IN; type = USB_TYPE_VENDOR | USB_DIR_IN;
@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
break; break;
case 2: case 2:
/* write { reg, value } */ /* write { reg, value } */
pipe = usb_sndctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE : request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
DTV5100_TUNER_WRITE); DTV5100_TUNER_WRITE);
type = USB_TYPE_VENDOR | USB_DIR_OUT; type = USB_TYPE_VENDOR | USB_DIR_OUT;
@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
memcpy(st->data, rbuf, rlen); memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */ msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request, return usb_control_msg(d->udev, pipe, request,
type, value, index, st->data, rlen, type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT); DTV5100_USB_TIMEOUT);
} }
@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,
/* initialize non qt1010/zl10353 part? */ /* initialize non qt1010/zl10353 part? */
for (i = 0; dtv5100_init[i].request; i++) { for (i = 0; dtv5100_init[i].request; i++) {
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
dtv5100_init[i].request, dtv5100_init[i].request,
USB_TYPE_VENDOR | USB_DIR_OUT, USB_TYPE_VENDOR | USB_DIR_OUT,
dtv5100_init[i].value, dtv5100_init[i].value,

View File

@ -116,7 +116,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index)
} }
ret = usb_control_msg(gspca_dev->dev, ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0), usb_rcvctrlpipe(gspca_dev->dev, 0),
USB_REQ_SYNCH_FRAME, /* request */ USB_REQ_SYNCH_FRAME, /* request */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_PING, 0, gspca_dev->usb_buf, 1,

View File

@ -242,6 +242,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
gspca_err(gspca_dev, "reg_r: buffer overflow\n"); gspca_err(gspca_dev, "reg_r: buffer overflow\n");
return; return;
} }
if (len == 0) {
gspca_err(gspca_dev, "reg_r: zero-length read\n");
return;
}
if (gspca_dev->usb_err < 0) if (gspca_dev->usb_err < 0)
return; return;
ret = usb_control_msg(gspca_dev->dev, ret = usb_control_msg(gspca_dev->dev,
@ -250,7 +254,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */ 0, /* value */
index, index,
len ? gspca_dev->usb_buf : NULL, len, gspca_dev->usb_buf, len,
500); 500);
if (ret < 0) { if (ret < 0) {
pr_err("reg_r err %d\n", ret); pr_err("reg_r err %d\n", ret);
@ -727,7 +731,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case MegaImageVI: case MegaImageVI:
reg_w_riv(gspca_dev, 0xf0, 0, 0); reg_w_riv(gspca_dev, 0xf0, 0, 0);
spca504B_WaitCmdStatus(gspca_dev); spca504B_WaitCmdStatus(gspca_dev);
reg_r(gspca_dev, 0xf0, 4, 0); reg_w_riv(gspca_dev, 0xf0, 4, 0);
spca504B_WaitCmdStatus(gspca_dev); spca504B_WaitCmdStatus(gspca_dev);
break; break;
default: default:

View File

@ -124,10 +124,37 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl) struct uvc_streaming_control *ctrl)
{ {
static const struct usb_device_id elgato_cam_link_4k = {
USB_DEVICE(0x0fd9, 0x0066)
};
struct uvc_format *format = NULL; struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL; struct uvc_frame *frame = NULL;
unsigned int i; unsigned int i;
/*
* The response of the Elgato Cam Link 4K is incorrect: The second byte
* contains bFormatIndex (instead of being the second byte of bmHint).
* The first byte is always zero. The third byte is always 1.
*
* The UVC 1.5 class specification defines the first five bits in the
* bmHint bitfield. The remaining bits are reserved and should be zero.
* Therefore a valid bmHint will be less than 32.
*
* Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix.
* MCU: 20.02.19, FPGA: 67
*/
if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) &&
ctrl->bmHint > 255) {
u8 corrected_format_index = ctrl->bmHint >> 8;
/* uvc_dbg(stream->dev, VIDEO,
"Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n",
ctrl->bmHint, ctrl->bFormatIndex,
1, corrected_format_index); */
ctrl->bmHint = 1;
ctrl->bFormatIndex = corrected_format_index;
}
for (i = 0; i < stream->nformats; ++i) { for (i = 0; i < stream->nformats; ++i) {
if (stream->format[i].index == ctrl->bFormatIndex) { if (stream->format[i].index == ctrl->bFormatIndex) {
format = &stream->format[i]; format = &stream->format[i];

View File

@ -1037,6 +1037,7 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam)
DBG("submitting URB %p\n", pipe_info->stream_urb); DBG("submitting URB %p\n", pipe_info->stream_urb);
retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
if (retval) { if (retval) {
usb_free_urb(pipe_info->stream_urb);
printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n"); printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n");
return retval; return retval;
} }

View File

@ -955,11 +955,14 @@ int mmc_execute_tuning(struct mmc_card *card)
err = host->ops->execute_tuning(host, opcode); err = host->ops->execute_tuning(host, opcode);
if (err) if (err) {
pr_err("%s: tuning execution failed: %d\n", pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err); mmc_hostname(host), err);
else } else {
host->retune_now = 0;
host->need_retune = 0;
mmc_retune_enable(host); mmc_retune_enable(host);
}
return err; return err;
} }

View File

@ -799,11 +799,13 @@ try_again:
return err; return err;
/* /*
* In case CCS and S18A in the response is set, start Signal Voltage * In case the S18A bit is set in the response, let's start the signal
* Switch procedure. SPI mode doesn't support CMD11. * voltage switch procedure. SPI mode doesn't support CMD11.
* Note that, according to the spec, the S18A bit is not valid unless
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/ */
if (!mmc_host_is_spi(host) && rocr && if (!mmc_host_is_spi(host) && rocr && (*rocr & 0x01000000)) {
((*rocr & 0x41000000) == 0x41000000)) {
err = mmc_set_uhs_voltage(host, pocr); err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) { if (err == -EAGAIN) {
retries--; retries--;

View File

@ -1514,6 +1514,10 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
u16 preset = 0; u16 preset = 0;
switch (host->timing) { switch (host->timing) {
case MMC_TIMING_MMC_HS:
case MMC_TIMING_SD_HS:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
break;
case MMC_TIMING_UHS_SDR12: case MMC_TIMING_UHS_SDR12:
preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
break; break;

View File

@ -261,6 +261,7 @@
/* 60-FB reserved */ /* 60-FB reserved */
#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64
#define SDHCI_PRESET_FOR_SDR12 0x66 #define SDHCI_PRESET_FOR_SDR12 0x66
#define SDHCI_PRESET_FOR_SDR25 0x68 #define SDHCI_PRESET_FOR_SDR25 0x68
#define SDHCI_PRESET_FOR_SDR50 0x6A #define SDHCI_PRESET_FOR_SDR50 0x6A

View File

@ -426,6 +426,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
int id, ret; int id, ret;
pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!pres) {
dev_err(&pdev->dev, "Invalid resource\n");
return -EINVAL;
}
memset(&res, 0, sizeof(res)); memset(&res, 0, sizeof(res));
memset(&ppd, 0, sizeof(ppd)); memset(&ppd, 0, sizeof(ppd));

View File

@ -1398,7 +1398,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
u8 phy_type; u8 phy_type;
int without_mii; int without_mii;
phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
switch (phy_type) { switch (phy_type) {
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
@ -1518,7 +1518,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */ /* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@ -2266,9 +2266,9 @@ static int e100_asf(struct nic *nic)
{ {
/* ASF can be enabled from eeprom */ /* ASF can be enabled from eeprom */
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
(nic->eeprom[eeprom_config_asf] & eeprom_asf) && (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
} }
static int e100_up(struct nic *nic) static int e100_up(struct nic *nic)
@ -2924,7 +2924,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Wol magic packet can be enabled from eeprom */ /* Wol magic packet can be enabled from eeprom */
if ((nic->mac >= mac_82558_D101_A4) && if ((nic->mac >= mac_82558_D101_A4) &&
(nic->eeprom[eeprom_id] & eeprom_id_wol)) { (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
nic->flags |= wol_magic; nic->flags |= wol_magic;
device_set_wakeup_enable(&pdev->dev, true); device_set_wakeup_enable(&pdev->dev, true);
} }

View File

@ -48,7 +48,7 @@ enum ice_aq_res_ids {
/* FW update timeout definitions are in milliseconds */ /* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000 #define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000 #define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000
enum ice_aq_res_access_type { enum ice_aq_res_access_type {
ICE_RES_READ = 1, ICE_RES_READ = 1,

View File

@ -2651,7 +2651,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
} }
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
input->filter.vlan_tci = match.key->vlan_priority; input->filter.vlan_tci =
(__force __be16)match.key->vlan_priority;
} }
} }
@ -8255,7 +8256,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan); vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
else else
vid = le16_to_cpu(rx_desc->wb.upper.vlan); vid = le16_to_cpu(rx_desc->wb.upper.vlan);

View File

@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
static void igbvf_receive_skb(struct igbvf_adapter *adapter, static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct net_device *netdev, struct net_device *netdev,
struct sk_buff *skb, struct sk_buff *skb,
u32 status, u16 vlan) u32 status, __le16 vlan)
{ {
u16 vid; u16 vid;
if (status & E1000_RXD_STAT_VP) { if (status & E1000_RXD_STAT_VP) {
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
(status & E1000_RXDEXT_STATERR_LB)) (status & E1000_RXDEXT_STATERR_LB))
vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
else else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans)) if (test_bit(vid, adapter->active_vlans))

View File

@ -5740,6 +5740,10 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base); return PTR_ERR(priv->lms_base);
} else { } else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1); res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
dev_err(&pdev->dev, "Invalid resource\n");
return -EINVAL;
}
if (has_acpi_companion(&pdev->dev)) { if (has_acpi_companion(&pdev->dev)) {
/* In case the MDIO memory region is declared in /* In case the MDIO memory region is declared in
* the ACPI, it can already appear as 'in-use' * the ACPI, it can already appear as 'in-use'

View File

@ -1136,6 +1136,10 @@ static int ks8842_probe(struct platform_device *pdev)
unsigned i; unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
dev_err(&pdev->dev, "Invalid resource\n");
return -EINVAL;
}
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
goto err_mem_region; goto err_mem_region;

View File

@ -107,7 +107,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
{ {
u8 *data = skb->data; u8 *data = skb->data;
unsigned int offset; unsigned int offset;
u16 *hi, *id; u16 hi, id;
u32 lo; u32 lo;
if (ptp_classify_raw(skb) == PTP_CLASS_NONE) if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
@ -118,14 +118,11 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
return 0; return 0;
hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
memcpy(&lo, &hi[1], sizeof(lo)); return (uid_hi == hi && uid_lo == lo && seqid == id);
return (uid_hi == *hi &&
uid_lo == lo &&
seqid == *id);
} }
static void static void
@ -135,7 +132,6 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
struct pci_dev *pdev; struct pci_dev *pdev;
u64 ns; u64 ns;
u32 hi, lo, val; u32 hi, lo, val;
u16 uid, seq;
if (!adapter->hwts_rx_en) if (!adapter->hwts_rx_en)
return; return;
@ -151,10 +147,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
lo = pch_src_uuid_lo_read(pdev); lo = pch_src_uuid_lo_read(pdev);
hi = pch_src_uuid_hi_read(pdev); hi = pch_src_uuid_hi_read(pdev);
uid = hi & 0xffff; if (!pch_ptp_match(skb, hi, lo, hi >> 16))
seq = (hi >> 16) & 0xffff;
if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
goto out; goto out;
ns = pch_rx_snap_read(pdev); ns = pch_rx_snap_read(pdev);

View File

@ -5190,7 +5190,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_disable(tp); rtl_pcie_state_l2l3_disable(tp);
rtl_hw_aspm_clkreq_enable(tp, true);
} }
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond) DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)

View File

@ -403,12 +403,17 @@ fail1:
return rc; return rc;
} }
/* Disable SRIOV and remove VFs
* If some VFs are attached to a guest (using Xen, only) nothing is
* done if force=false, and vports are freed if force=true (for the non
* attachedc ones, only) but SRIOV is not disabled and VFs are not
* removed in either case.
*/
static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{ {
struct pci_dev *dev = efx->pci_dev; struct pci_dev *dev = efx->pci_dev;
unsigned int vfs_assigned = 0; unsigned int vfs_assigned = pci_vfs_assigned(dev);
int rc = 0;
vfs_assigned = pci_vfs_assigned(dev);
if (vfs_assigned && !force) { if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
@ -418,10 +423,12 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
if (!vfs_assigned) if (!vfs_assigned)
pci_disable_sriov(dev); pci_disable_sriov(dev);
else
rc = -EBUSY;
efx_ef10_sriov_free_vf_vswitching(efx); efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0; efx->vf_count = 0;
return 0; return rc;
} }
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
@ -440,7 +447,6 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
void efx_ef10_sriov_fini(struct efx_nic *efx) void efx_ef10_sriov_fini(struct efx_nic *efx)
{ {
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int i;
int rc; int rc;
if (!nic_data->vf) { if (!nic_data->vf) {
@ -450,14 +456,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx)
return; return;
} }
/* Remove any VFs in the host */ /* Disable SRIOV and remove any VFs in the host */
for (i = 0; i < efx->vf_count; ++i) {
struct efx_nic *vf_efx = nic_data->vf[i].efx;
if (vf_efx)
vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
}
rc = efx_ef10_pci_sriov_disable(efx, true); rc = efx_ef10_pci_sriov_disable(efx, true);
if (rc) if (rc)
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,

View File

@ -1262,6 +1262,10 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->interrupt_watch_enable = false; adapter->interrupt_watch_enable = false;
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
goto err_free_control_wq;
}
hw->hw_res.start = res->start; hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res); hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0); hw->hw_res.irq = platform_get_irq(plat_dev, 0);

View File

@ -1548,7 +1548,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (virtio_net_hdr_from_skb(skb, &hdr->hdr, if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false, virtio_is_little_endian(vi->vdev), false,
0)) 0))
BUG(); return -EPROTO;
if (vi->mergeable_rx_bufs) if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0; hdr->num_buffers = 0;

View File

@ -3725,6 +3725,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct cfg80211_chan_def chandef; struct cfg80211_chan_def chandef;
struct iwl_mvm_phy_ctxt *phy_ctxt; struct iwl_mvm_phy_ctxt *phy_ctxt;
bool band_change_removal;
int ret, i; int ret, i;
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
@ -3794,19 +3795,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
/* /*
* Change the PHY context configuration as it is currently referenced * Check if the remain-on-channel is on a different band and that
* only by the P2P Device MAC * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
* so, we'll need to release and then re-configure here, since we
* must not remove a PHY context that's part of a binding.
*/ */
if (mvmvif->phy_ctxt->ref == 1) { band_change_removal =
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
mvmvif->phy_ctxt->channel->band != chandef.chan->band;
if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
/*
* Change the PHY context configuration as it is currently
* referenced only by the P2P Device MAC (and we can modify it)
*/
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
&chandef, 1, 1); &chandef, 1, 1);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
} else { } else {
/* /*
* The PHY context is shared with other MACs. Need to remove the * The PHY context is shared with other MACs (or we're trying to
* P2P Device from the binding, allocate an new PHY context and * switch bands), so remove the P2P Device from the binding,
* create a new binding * allocate an new PHY context and create a new binding.
*/ */
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!phy_ctxt) { if (!phy_ctxt) {

View File

@ -63,7 +63,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info; struct iwl_prph_info *prph_info;
void *iml_img;
u32 control_flags = 0; u32 control_flags = 0;
int ret; int ret;
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
@ -162,14 +161,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
trans_pcie->prph_scratch = prph_scratch; trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */ /* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL); &trans_pcie->iml_dma_addr,
if (!iml_img) { GFP_KERNEL);
if (!trans_pcie->iml) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_ctxt_info; goto err_free_ctxt_info;
} }
memcpy(iml_img, trans->iml, trans->iml_len); memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
iwl_enable_fw_load_int_ctx_info(trans); iwl_enable_fw_load_int_ctx_info(trans);
@ -242,6 +242,11 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info_dma_addr = 0;
trans_pcie->ctxt_info_gen3 = NULL; trans_pcie->ctxt_info_gen3 = NULL;
dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
trans_pcie->iml_dma_addr);
trans_pcie->iml_dma_addr = 0;
trans_pcie->iml = NULL;
iwl_pcie_ctxt_info_free_fw_img(trans); iwl_pcie_ctxt_info_free_fw_img(trans);
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),

View File

@ -475,6 +475,8 @@ struct cont_rec {
* Context information addresses will be taken from here. * Context information addresses will be taken from here.
* This is driver's local copy for keeping track of size and * This is driver's local copy for keeping track of size and
* count for allocating and freeing the memory. * count for allocating and freeing the memory.
* @iml: image loader image virtual address
* @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area * @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM * @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler * @scd_bc_tbls: pointer to the byte count table of the scheduler
@ -522,6 +524,7 @@ struct iwl_trans_pcie {
}; };
struct iwl_prph_info *prph_info; struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch *prph_scratch;
void *iml;
dma_addr_t ctxt_info_dma_addr; dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr; dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr; dma_addr_t prph_scratch_dma_addr;

View File

@ -269,7 +269,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
/* now that we got alive we can free the fw image & the context info. /* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it * paging memory cannot be freed included since FW will still use it
*/ */
iwl_pcie_ctxt_info_free(trans); if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_free(trans);
/* /*
* Re-enable all the interrupts, including the RF-Kill one, now that * Re-enable all the interrupts, including the RF-Kill one, now that

View File

@ -840,22 +840,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
int first_idx = 0, last_idx; int first_idx = 0, last_idx;
int i, idx, count; int i, idx, count;
bool fixed_rate, ack_timeout; bool fixed_rate, ack_timeout;
bool probe, ampdu, cck = false; bool ampdu, cck = false;
bool rs_idx; bool rs_idx;
u32 rate_set_tsf; u32 rate_set_tsf;
u32 final_rate, final_rate_flags, final_nss, txs; u32 final_rate, final_rate_flags, final_nss, txs;
fixed_rate = info->status.rates[0].count;
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[1]); txs = le32_to_cpu(txs_data[1]);
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU); ampdu = txs & MT_TXS1_AMPDU;
txs = le32_to_cpu(txs_data[3]); txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs); count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs); last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
txs = le32_to_cpu(txs_data[0]); txs = le32_to_cpu(txs_data[0]);
fixed_rate = txs & MT_TXS0_FIXED_RATE;
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
@ -877,7 +875,7 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY); first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
if (fixed_rate && !probe) { if (fixed_rate) {
info->status.rates[0].count = count; info->status.rates[0].count = count;
i = 0; i = 0;
goto out; goto out;

View File

@ -853,15 +853,10 @@ struct rtl8192eu_efuse {
u8 usb_optional_function; u8 usb_optional_function;
u8 res9[2]; u8 res9[2];
u8 mac_addr[ETH_ALEN]; /* 0xd7 */ u8 mac_addr[ETH_ALEN]; /* 0xd7 */
u8 res10[2]; u8 device_info[80];
u8 vendor_name[7]; u8 res11[3];
u8 res11[2];
u8 device_name[0x0b]; /* 0xe8 */
u8 res12[2];
u8 serial[0x0b]; /* 0xf5 */
u8 res13[0x30];
u8 unknown[0x0d]; /* 0x130 */ u8 unknown[0x0d]; /* 0x130 */
u8 res14[0xc3]; u8 res12[0xc3];
}; };
struct rtl8xxxu_reg8val { struct rtl8xxxu_reg8val {

View File

@ -554,9 +554,43 @@ rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
} }
} }
static void rtl8192eu_log_next_device_info(struct rtl8xxxu_priv *priv,
char *record_name,
char *device_info,
unsigned int *record_offset)
{
char *record = device_info + *record_offset;
/* A record is [ total length | 0x03 | value ] */
unsigned char l = record[0];
/*
* The whole device info section seems to be 80 characters, make sure
* we don't read further.
*/
if (*record_offset + l > 80) {
dev_warn(&priv->udev->dev,
"invalid record length %d while parsing \"%s\" at offset %u.\n",
l, record_name, *record_offset);
return;
}
if (l >= 2) {
char value[80];
memcpy(value, &record[2], l - 2);
value[l - 2] = '\0';
dev_info(&priv->udev->dev, "%s: %s\n", record_name, value);
*record_offset = *record_offset + l;
} else {
dev_info(&priv->udev->dev, "%s not available.\n", record_name);
}
}
static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
{ {
struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu; struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
unsigned int record_offset;
int i; int i;
if (efuse->rtl_id != cpu_to_le16(0x8129)) if (efuse->rtl_id != cpu_to_le16(0x8129))
@ -604,12 +638,25 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->has_xtalk = 1; priv->has_xtalk = 1;
priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f; priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); /*
dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name); * device_info section seems to be laid out as records
if (memchr_inv(efuse->serial, 0xff, 11)) * [ total length | 0x03 | value ] so:
dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial); * - vendor length + 2
else * - 0x03
dev_info(&priv->udev->dev, "Serial not available.\n"); * - vendor string (not null terminated)
* - product length + 2
* - 0x03
* - product string (not null terminated)
* Then there is one or 2 0x00 on all the 4 devices I own or found
* dumped online.
* As previous version of the code handled an optional serial
* string, I now assume there may be a third record if the
* length is not 0.
*/
record_offset = 0;
rtl8192eu_log_next_device_info(priv, "Vendor", efuse->device_info, &record_offset);
rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
unsigned char *raw = priv->efuse_wifi.raw; unsigned char *raw = priv->efuse_wifi.raw;

View File

@ -60,6 +60,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
{ /* end: all zeroes */ }, { /* end: all zeroes */ },
}; };
MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);
/* hwbus_ops implemetation */ /* hwbus_ops implemetation */

View File

@ -466,9 +466,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
cmd->channels[i].channel = channels[i]->hw_value; cmd->channels[i].channel = channels[i]->hw_value;
} }
cmd->params.ssid_len = ssid_len; if (ssid) {
if (ssid) int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
memcpy(cmd->params.ssid, ssid, ssid_len);
cmd->params.ssid_len = len;
memcpy(cmd->params.ssid, ssid, len);
}
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) { if (ret < 0) {

View File

@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
u32 mac1, mac2; u32 mac1, mac2;
int ret; int ret;
/* Device may be in ELP from the bootloader or kexec */
ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
if (ret < 0)
goto out;
usleep_range(500000, 700000);
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0) if (ret < 0)
goto out; goto out;

View File

@ -318,15 +318,17 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
continue; continue;
if (len < 2 * sizeof(u32)) { if (len < 2 * sizeof(u32)) {
dev_err(dev, "nvmem: invalid reg on %pOF\n", child); dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
of_node_put(child);
return -EINVAL; return -EINVAL;
} }
cell = kzalloc(sizeof(*cell), GFP_KERNEL); cell = kzalloc(sizeof(*cell), GFP_KERNEL);
if (!cell) if (!cell) {
of_node_put(child);
return -ENOMEM; return -ENOMEM;
}
cell->nvmem = nvmem; cell->nvmem = nvmem;
cell->np = of_node_get(child);
cell->offset = be32_to_cpup(addr++); cell->offset = be32_to_cpup(addr++);
cell->bytes = be32_to_cpup(addr); cell->bytes = be32_to_cpup(addr);
cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@ -347,11 +349,12 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
cell->name, nvmem->stride); cell->name, nvmem->stride);
/* Cells already added will be freed later. */ /* Cells already added will be freed later. */
kfree_const(cell->name); kfree_const(cell->name);
of_node_put(cell->np);
kfree(cell); kfree(cell);
of_node_put(child);
return -EINVAL; return -EINVAL;
} }
cell->np = of_node_get(child);
nvmem_cell_add(cell); nvmem_cell_add(cell);
} }

View File

@ -61,7 +61,7 @@
#define PIO_COMPLETION_STATUS_UR 1 #define PIO_COMPLETION_STATUS_UR 1
#define PIO_COMPLETION_STATUS_CRS 2 #define PIO_COMPLETION_STATUS_CRS 2
#define PIO_COMPLETION_STATUS_CA 4 #define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(0) #define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
@ -127,6 +127,7 @@
#define LTSSM_MASK 0x3f #define LTSSM_MASK 0x3f
#define LTSSM_L0 0x10 #define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300 #define RC_BAR_CONFIG 0x300
#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
/* PCIe core controller registers */ /* PCIe core controller registers */
#define CTRL_CORE_BASE_ADDR 0x18000 #define CTRL_CORE_BASE_ADDR 0x18000
@ -268,6 +269,16 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= (IS_RC_MSK << IS_RC_SHIFT); reg |= (IS_RC_MSK << IS_RC_SHIFT);
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/*
* Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
* VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
* id in high 16 bits. Updating this register changes readback value of
* read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
* for erratum 4.1: "The value of device and vendor ID is incorrect".
*/
reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
advk_writel(pcie, reg, VENDOR_ID_REG);
/* Set Advanced Error Capabilities and Control PF0 register */ /* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |

View File

@ -27,6 +27,7 @@
#include <linux/nvme.h> #include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h> #include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/switchtec.h> #include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */ #include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h" #include "pci.h"
@ -3703,6 +3704,16 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
return; return;
if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
return; return;
/*
* SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
* We don't know how to turn it back on again, but firmware does,
* so we can only use SXIO/SXFP/SXLF if we're suspending via
* firmware.
*/
if (!pm_suspend_via_firmware())
return;
bridge = ACPI_HANDLE(&dev->dev); bridge = ACPI_HANDLE(&dev->dev);
if (!bridge) if (!bridge)
return; return;

View File

@ -958,6 +958,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
static const struct acpi_device_id amd_gpio_acpi_match[] = { static const struct acpi_device_id amd_gpio_acpi_match[] = {
{ "AMD0030", 0 }, { "AMD0030", 0 },
{ "AMDI0030", 0}, { "AMDI0030", 0},
{ "AMDI0031", 0},
{ }, { },
}; };
MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match); MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);

View File

@ -459,6 +459,11 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_INTF, &intf)) if (mcp_read(mcp, MCP_INTF, &intf))
goto unlock; goto unlock;
if (intf == 0) {
/* There is no interrupt pending */
goto unlock;
}
if (mcp_read(mcp, MCP_INTCAP, &intcap)) if (mcp_read(mcp, MCP_INTCAP, &intcap))
goto unlock; goto unlock;
@ -476,11 +481,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
mcp->cached_gpio = gpio; mcp->cached_gpio = gpio;
mutex_unlock(&mcp->lock); mutex_unlock(&mcp->lock);
if (intf == 0) {
/* There is no interrupt pending */
return IRQ_HANDLED;
}
dev_dbg(mcp->chip.parent, dev_dbg(mcp->chip.parent,
"intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
intcap, intf, gpio_orig, gpio); intcap, intf, gpio_orig, gpio);

View File

@ -150,24 +150,27 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
if (err) if (err)
return err; return err;
val = (val >> 24) & 0xff; val = (val >> 24) & 0x3f;
return sprintf(buf, "%d\n", (int)val); return sprintf(buf, "%d\n", (int)val);
} }
static int tcc_offset_update(int tcc) static int tcc_offset_update(unsigned int tcc)
{ {
u64 val; u64 val;
int err; int err;
if (!tcc) if (tcc > 63)
return -EINVAL; return -EINVAL;
err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val); err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
if (err) if (err)
return err; return err;
val &= ~GENMASK_ULL(31, 24); if (val & BIT(31))
val |= (tcc & 0xff) << 24; return -EPERM;
val &= ~GENMASK_ULL(29, 24);
val |= (tcc & 0x3f) << 24;
err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val); err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
if (err) if (err)
@ -176,14 +179,15 @@ static int tcc_offset_update(int tcc)
return 0; return 0;
} }
static int tcc_offset_save; static unsigned int tcc_offset_save;
static ssize_t tcc_offset_degree_celsius_store(struct device *dev, static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf, struct device_attribute *attr, const char *buf,
size_t count) size_t count)
{ {
unsigned int tcc;
u64 val; u64 val;
int tcc, err; int err;
err = rdmsrl_safe(MSR_PLATFORM_INFO, &val); err = rdmsrl_safe(MSR_PLATFORM_INFO, &val);
if (err) if (err)
@ -192,7 +196,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
if (!(val & BIT(30))) if (!(val & BIT(30)))
return -EACCES; return -EACCES;
if (kstrtoint(buf, 0, &tcc)) if (kstrtouint(buf, 0, &tcc))
return -EINVAL; return -EINVAL;
err = tcc_offset_update(tcc); err = tcc_offset_update(tcc);

View File

@ -273,13 +273,8 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
oname->name); oname->name);
return 0; return 0;
} }
if (hash) { digested_name.hash = hash;
digested_name.hash = hash; digested_name.minor_hash = minor_hash;
digested_name.minor_hash = minor_hash;
} else {
digested_name.hash = 0;
digested_name.minor_hash = 0;
}
memcpy(digested_name.digest, memcpy(digested_name.digest,
FSCRYPT_FNAME_DIGEST(iname->name, iname->len), FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
FSCRYPT_FNAME_DIGEST_SIZE); FSCRYPT_FNAME_DIGEST_SIZE);

View File

@ -151,7 +151,8 @@ void jfs_evict_inode(struct inode *inode)
if (test_cflag(COMMIT_Freewmap, inode)) if (test_cflag(COMMIT_Freewmap, inode))
jfs_free_zero_link(inode); jfs_free_zero_link(inode);
diFree(inode); if (JFS_SBI(inode->i_sb)->ipimap)
diFree(inode);
/* /*
* Free the inode from the quota allocation. * Free the inode from the quota allocation.

Some files were not shown because too many files have changed in this diff Show More