This is the 5.4.144 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEx2AAACgkQONu9yGCS
 aT7csg//ZhXXfRkPNMhpkkMjcV7F825mLAPs1vsluIEIZ0oInOpegu8SyDENOfui
 HyFLZ/2Stewa0mn7kNS1caAUXLpFvZ087sIz/SipzupFjLTUHFsNcMYrd19R1M4h
 UK/owAJeoq/pgR4kUck4o/r+47lo8CMqkscbEdKSvwxYUeANIcbGVB5Sf2UaJr5S
 lqBZeliWY/jYGvLWBoSc7mvUwWRbkKLnQu2JkfvGKM4ODOzpbh8TUhq8NxEL7ZFn
 mZxtNmWPvG2PHHvNP89pwKnKQx70ySKrlQdDv10gL6nIHhKuqwLxBo28Q+KcKMYr
 vfoOFS5Vk35jA7Xt8LhNF+lQtDTbN+2YLeDtoAq+aWMmEW/RUYXSU/3thh+WFuO5
 uZZAbrh4r3bew+PLFpEtnVjxkpMsU9EC33KuIZXIGlDEkFlEneJ9pMQYH7XIwQnV
 5sSSOnbyzkajxv9Kpu6XEg3kKyJf+gk/AB/psgfMR0v/jQ4PXVk9+cZDZxKFcxjj
 wGywDkgIb+/sPrABWici/yXjIup0OSG1fK9/Ki9uLgNzxXZ0h+4e3DcXNMxs1B/p
 GpBPP773qIff2lEDhAI+SbP8pHj5Mnc1j77WUQTU9vsIJcftYm4i0G+POpXnynzx
 gzbjJjOhTBL57OciLQlmL2s5ZZUPgPvu5VoHsRfwOu/bbarRADE=
 =RA6W
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmEx8s8ACgkQ7G51OISz
 Hs1JZA/8Cj7g56QymgMuXHEB1PecU7pLpO5egRK3X6xHxJwksD7Xp2LfpaRxjzGw
 XNQsp+4mbJX4oHiZPjD/RsFOdVuNU3ff3mliSmoH2Tdepa2TuKFt7T8V3GE7FN6K
 ns52rvIzbhF762nL1Vs+LE0YBq1w6rTvL7eenNxMo9pwUxJv95X91v7BpRQjTAY5
 /ngvj8tRKN10dSokwrCpzk47Sj/jhSoLlckJL7+iOopQdhOo/HTfWj1aPCaZC/AX
 q2EUg/L2GB1Ij342lDNEZSWn2xAvuAT6+45R8p3GxyG6TMihwiKGXQM922MJDZAV
 T3Chxgu//OlB/spPMAuFgfBNqaX1z+zxv3Dc1EvEbSNPhn6PwEZ2ck9hYkuPmvI3
 78dkyqj3x3AR5VKvc/CpnqSokXBjV7B1TOxJlHKvJ77lvWuDwujir+chmULjahA8
 bVPpbBC9BfF/nX0cYsjQuDNyddqTpt3cv1Cp9w5gXhs/Nj5MsNDRyZxaVHlGaI/W
 h3N6rAU2cNDDtI4Zqr8Lo5IgBLMVUPuj9ZUNUJBKq3YX5CooEmjCtBKZch55Ou5h
 6xmcaMgrFre3FHKfvVhJ5ACK/DoPuWLvr4Af4Q0v6kgif81Is4LQQ+EEXRLMryi2
 fTv2X5r2GLoMHlUH0WgBW8pY0NuoCJiZuxCY5T1c61pCbDCpRss=
 =yagG
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.144' into 5.4-2.3.x-imx

This is the 5.4.144 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-09-03 10:02:52 +00:00
commit 79c30f58eb
61 changed files with 418 additions and 325 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 143 SUBLEVEL = 144
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus

View File

@ -88,6 +88,8 @@ SECTIONS
CPUIDLE_TEXT CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
} }

View File

@ -30,3 +30,7 @@
}; };
}; };
}; };
&msmgpio {
gpio-reserved-ranges = <85 4>;
};

View File

@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY
void * memcpy(void * dest,const void *src,size_t count); void * memcpy(void * dest,const void *src,size_t count);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *s);
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
extern char *strncpy(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCAT
extern char *strcat(char *dest, const char *src);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#endif #endif

View File

@ -17,10 +17,6 @@
#include <linux/string.h> #include <linux/string.h>
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
#include <linux/atomic.h> #include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8); EXPORT_SYMBOL(__xchg8);

View File

@ -3,7 +3,7 @@
# Makefile for parisc-specific library files # Makefile for parisc-specific library files
# #
lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \ lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o string.o ucmpdi2.o delay.o
obj-y := iomap.o obj-y := iomap.o

72
arch/parisc/lib/memset.c Normal file
View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/types.h>
#include <asm/string.h>
#define OPSIZ (BITS_PER_LONG/8)
typedef unsigned long op_t;
void *
memset (void *dstpp, int sc, size_t len)
{
unsigned int c = sc;
long int dstp = (long int) dstpp;
if (len >= 8)
{
size_t xlen;
op_t cccc;
cccc = (unsigned char) c;
cccc |= cccc << 8;
cccc |= cccc << 16;
if (OPSIZ > 4)
/* Do the shift in two steps to avoid warning if long has 32 bits. */
cccc |= (cccc << 16) << 16;
/* There are at least some bytes to set.
No need to test for LEN == 0 in this alignment loop. */
while (dstp % OPSIZ != 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
/* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
xlen = len / (OPSIZ * 8);
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
((op_t *) dstp)[1] = cccc;
((op_t *) dstp)[2] = cccc;
((op_t *) dstp)[3] = cccc;
((op_t *) dstp)[4] = cccc;
((op_t *) dstp)[5] = cccc;
((op_t *) dstp)[6] = cccc;
((op_t *) dstp)[7] = cccc;
dstp += 8 * OPSIZ;
xlen -= 1;
}
len %= OPSIZ * 8;
/* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
xlen = len / OPSIZ;
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
dstp += OPSIZ;
xlen -= 1;
}
len %= OPSIZ;
}
/* Write the last few bytes. */
while (len > 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
return dstpp;
}

View File

@ -1,136 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC assembly string functions
*
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
*/
#include <asm/assembly.h>
#include <linux/linkage.h>
.section .text.hot
.level PA_ASM_LEVEL
t0 = r20
t1 = r21
t2 = r22
ENTRY_CFI(strlen, frame=0,no_calls)
or,COND(<>) arg0,r0,ret0
b,l,n .Lstrlen_null_ptr,r0
depwi 0,31,2,ret0
cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_not_aligned:
uaddcm arg0,ret0,t1
shladd t1,3,r0,t1
mtsar t1
depwi -1,%sar,32,t0
uxor,nbz r0,t0,r0
.Lstrlen_loop:
b,l,n .Lstrlen_end_loop,r0
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_end_loop:
extrw,u,<> t0,7,8,r0
addib,tr,n -3,ret0,.Lstrlen_out
extrw,u,<> t0,15,8,r0
addib,tr,n -2,ret0,.Lstrlen_out
extrw,u,<> t0,23,8,r0
addi -1,ret0,ret0
.Lstrlen_out:
bv r0(rp)
uaddcm ret0,arg0,ret0
.Lstrlen_null_ptr:
bv,n r0(rp)
ENDPROC_CFI(strlen)
ENTRY_CFI(strcpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 0(arg0),ret0
ldo 1(arg1),t1
cmpb,= r0,t0,2f
ldo 1(arg0),t2
1: ldb 0(t1),arg1
stb arg1,0(t2)
ldo 1(t1),t1
cmpb,<> r0,arg1,1b
ldo 1(t2),t2
2: bv,n r0(rp)
ENDPROC_CFI(strcpy)
ENTRY_CFI(strncpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 1(arg1),t1
ldo 0(arg0),ret0
cmpb,= r0,t0,2f
ldo 1(arg0),arg1
1: ldo -1(arg2),arg2
cmpb,COND(=),n r0,arg2,2f
ldb 0(t1),arg0
stb arg0,0(arg1)
ldo 1(t1),t1
cmpb,<> r0,arg0,1b
ldo 1(arg1),arg1
2: bv,n r0(rp)
ENDPROC_CFI(strncpy)
ENTRY_CFI(strcat, frame=0,no_calls)
ldb 0(arg0),t0
cmpb,= t0,r0,2f
ldo 0(arg0),ret0
ldo 1(arg0),arg0
1: ldb 0(arg0),t1
cmpb,<>,n r0,t1,1b
ldo 1(arg0),arg0
2: ldb 0(arg1),t2
stb t2,0(arg0)
ldo 1(arg0),arg0
ldb 0(arg1),t0
cmpb,<> r0,t0,2b
ldo 1(arg1),arg1
bv,n r0(rp)
ENDPROC_CFI(strcat)
ENTRY_CFI(memset, frame=0,no_calls)
copy arg0,ret0
cmpb,COND(=) r0,arg0,4f
copy arg0,t2
cmpb,COND(=) r0,arg2,4f
ldo -1(arg2),arg3
subi -1,arg3,t0
subi 0,t0,t1
cmpiclr,COND(>=) 0,t1,arg2
ldo -1(t1),arg2
extru arg2,31,2,arg0
2: stb arg1,0(t2)
ldo 1(t2),t2
addib,>= -1,arg0,2b
ldo -1(arg3),arg3
cmpiclr,COND(<=) 4,arg2,r0
b,l,n 4f,r0
#ifdef CONFIG_64BIT
depd,* r0,63,2,arg2
#else
depw r0,31,2,arg2
#endif
ldo 1(t2),t2
3: stb arg1,-1(t2)
stb arg1,0(t2)
stb arg1,1(t2)
stb arg1,2(t2)
addib,COND(>) -4,arg2,3b
ldo 4(t2),t2
4: bv,n r0(rp)
ENDPROC_CFI(memset)
.end

View File

@ -4382,7 +4382,7 @@ static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
return; return;
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword); pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23; addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword); pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12; addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;

View File

@ -4666,7 +4666,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{ {
bool uses_nx = context->nx || /*
* KVM uses NX when TDP is disabled to handle a variety of scenarios,
* notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
* to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
* The iTLB multi-hit workaround can be toggled at any time, so assume
* NX can be used by any non-nested shadow MMU to avoid having to reset
* MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
*/
bool uses_nx = context->nx || !tdp_enabled ||
context->mmu_role.base.smep_andnot_wp; context->mmu_role.base.smep_andnot_wp;
struct rsvd_bits_validate *shadow_zero_check; struct rsvd_bits_validate *shadow_zero_check;
int i; int i;

View File

@ -4063,22 +4063,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1) if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2; UFDCS->rawcmd = 2;
if (mode & (FMODE_READ|FMODE_WRITE)) { if (!(mode & FMODE_NDELAY)) {
UDRS->last_checked = 0; if (mode & (FMODE_READ|FMODE_WRITE)) {
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); UDRS->last_checked = 0;
check_disk_change(bdev); clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) check_disk_change(bdev);
goto out; if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out; goto out;
} }
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
mutex_unlock(&open_lock); mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex); mutex_unlock(&floppy_mutex);
return 0; return 0;

View File

@ -863,8 +863,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.sequence = req32.request.sequence; req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal; req.request.signal = req32.request.signal;
err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;
req32.reply.type = req.reply.type; req32.reply.type = req.reply.type;
req32.reply.sequence = req.reply.sequence; req32.reply.sequence = req.reply.sequence;
@ -873,7 +871,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_to_user(argp, &req32, sizeof(req32))) if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT; return -EFAULT;
return 0; return err;
} }
#if defined(CONFIG_X86) #if defined(CONFIG_X86)

View File

@ -289,6 +289,14 @@ void intel_timeline_fini(struct intel_timeline *timeline)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt); i915_vma_put(timeline->hwsp_ggtt);
/*
* A small race exists between intel_gt_retire_requests_timeout and
* intel_timeline_exit which could result in the syncmap not getting
* free'd. Rather than work to hard to seal this race, simply cleanup
* the syncmap on fini.
*/
i915_syncmap_free(&timeline->sync);
} }
struct intel_timeline * struct intel_timeline *

View File

@ -419,7 +419,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
return ret; return ret;
} }
static void void
nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{ {
struct nvkm_dp *dp = nvkm_dp(outp); struct nvkm_dp *dp = nvkm_dp(outp);

View File

@ -32,6 +32,7 @@ struct nvkm_dp {
int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *, int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *,
struct nvkm_outp **); struct nvkm_outp **);
void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *);
/* DPCD Receiver Capabilities */ /* DPCD Receiver Capabilities */
#define DPCD_RC00_DPCD_REV 0x00000 #define DPCD_RC00_DPCD_REV 0x00000

View File

@ -22,6 +22,7 @@
* Authors: Ben Skeggs * Authors: Ben Skeggs
*/ */
#include "outp.h" #include "outp.h"
#include "dp.h"
#include "ior.h" #include "ior.h"
#include <subdev/bios.h> #include <subdev/bios.h>
@ -216,6 +217,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
if (!ior->arm.head || ior->arm.proto != proto) { if (!ior->arm.head || ior->arm.proto != proto) {
OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head,
ior->arm.proto, proto); ior->arm.proto, proto);
/* The EFI GOP driver on Ampere can leave unused DP links routed,
* which we don't expect. The DisableLT IED script *should* get
* us back to where we need to be.
*/
if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP)
nvkm_dp_disable(outp, ior);
return; return;
} }

View File

@ -1404,6 +1404,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
if (nq) if (nq)
nq->budget++; nq->budget++;
atomic_inc(&rdev->srq_count); atomic_inc(&rdev->srq_count);
spin_lock_init(&srq->lock);
return 0; return 0;

View File

@ -340,6 +340,7 @@ static int efa_enable_msix(struct efa_dev *dev)
} }
if (irq_num != msix_vecs) { if (irq_num != msix_vecs) {
efa_disable_msix(dev);
dev_err(&dev->pdev->dev, dev_err(&dev->pdev->dev,
"Allocated %d MSI-X (out of %d requested)\n", "Allocated %d MSI-X (out of %d requested)\n",
irq_num, msix_vecs); irq_num, msix_vecs);

View File

@ -3056,6 +3056,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{ {
int i; int i;
struct sdma_desc *descp;
/* Handle last descriptor */ /* Handle last descriptor */
if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
@ -3076,12 +3077,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if (unlikely(tx->num_desc == MAX_DESC)) if (unlikely(tx->num_desc == MAX_DESC))
goto enomem; goto enomem;
tx->descp = kmalloc_array( descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
MAX_DESC, if (!descp)
sizeof(struct sdma_desc),
GFP_ATOMIC);
if (!tx->descp)
goto enomem; goto enomem;
tx->descp = descp;
/* reserve last descriptor for coalescing */ /* reserve last descriptor for coalescing */
tx->desc_limit = MAX_DESC - 1; tx->desc_limit = MAX_DESC - 1;

View File

@ -1589,6 +1589,23 @@ out:
__sdhci_msm_set_clock(host, clock); __sdhci_msm_set_clock(host, clock);
} }
static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
u32 count, start = 15;
__sdhci_set_timeout(host, cmd);
count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
/*
* Update software timeout value if its value is less than hardware data
* timeout value. Qcom SoC hardware data timeout value was calculated
* using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
*/
if (cmd && cmd->data && host->clock > 400000 &&
host->clock <= 50000000 &&
((1 << (count + start)) > (10 * host->clock)))
host->data_timeout = 22LL * NSEC_PER_SEC;
}
/* /*
* Platform specific register write functions. This is so that, if any * Platform specific register write functions. This is so that, if any
* register write needs to be followed up by platform specific actions, * register write needs to be followed up by platform specific actions,
@ -1753,6 +1770,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_msm_set_uhs_signaling, .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
.write_w = sdhci_msm_writew, .write_w = sdhci_msm_writew,
.write_b = sdhci_msm_writeb, .write_b = sdhci_msm_writeb,
.set_timeout = sdhci_msm_set_timeout,
}; };
static const struct sdhci_pltfm_data sdhci_msm_pdata = { static const struct sdhci_pltfm_data sdhci_msm_pdata = {

View File

@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
if (id == ESD_EV_CAN_ERROR_EXT) { if (id == ESD_EV_CAN_ERROR_EXT) {
u8 state = msg->msg.rx.data[0]; u8 state = msg->msg.rx.data[0];
u8 ecc = msg->msg.rx.data[1]; u8 ecc = msg->msg.rx.data[1];
u8 txerr = msg->msg.rx.data[2]; u8 rxerr = msg->msg.rx.data[2];
u8 rxerr = msg->msg.rx.data[3]; u8 txerr = msg->msg.rx.data[3];
skb = alloc_can_err_skb(priv->netdev, &cf); skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb == NULL) { if (skb == NULL) {

View File

@ -842,11 +842,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
/* Remove this port from the port matrix of the other ports /* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix * in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled. * is kept and not being setup until the port becomes enabled.
* And the other port's port matrix cannot be broken when the
* other port is still a VLAN-aware port.
*/ */
if (dsa_is_user_port(ds, i) && i != port && if (dsa_is_user_port(ds, i) && i != port) {
!dsa_port_is_vlan_filtering(&ds->ports[i])) {
if (dsa_to_port(ds, i)->bridge_dev != bridge) if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue; continue;
if (priv->ports[i].enable) if (priv->ports[i].enable)

View File

@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev)
ret = register_netdev(ndev); ret = register_netdev(ndev);
if (ret) { if (ret) {
netdev_err(ndev, "Failed to register netdev\n"); netdev_err(ndev, "Failed to register netdev\n");
goto err; goto err_mdio_remove;
} }
return 0; return 0;
err_mdio_remove:
xge_mdio_remove(ndev);
err: err:
free_netdev(ndev); free_netdev(ndev);

View File

@ -257,6 +257,9 @@ enum hclge_opcode_type {
/* Led command */ /* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000, HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* clear hardware resource command */
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
/* NCL config command */ /* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* M7 stats command */ /* M7 stats command */

View File

@ -281,21 +281,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
struct hclge_vport *vport = hclge_get_vport(h); struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
int ret; int ret;
u8 i;
memset(pfc, 0, sizeof(*pfc)); memset(pfc, 0, sizeof(*pfc));
pfc->pfc_cap = hdev->pfc_max; pfc->pfc_cap = hdev->pfc_max;
prio_tc = hdev->tm_info.prio_tc; pfc->pfc_en = hdev->tm_info.pfc_en;
pfc_map = hdev->tm_info.hw_pfc_map;
/* Pfc setting is based on TC */
for (i = 0; i < hdev->tm_info.num_tc; i++) {
for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
pfc->pfc_en |= BIT(j);
}
}
ret = hclge_pfc_tx_stats_get(hdev, requests); ret = hclge_pfc_tx_stats_get(hdev, requests);
if (ret) if (ret)

View File

@ -8006,7 +8006,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl) bool writen_to_tbl)
{ {
struct hclge_vport_vlan_cfg *vlan; struct hclge_vport_vlan_cfg *vlan, *tmp;
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
if (vlan->vlan_id == vlan_id)
return;
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan) if (!vlan)
@ -9165,6 +9169,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
} }
} }
static int hclge_clear_hw_resource(struct hclge_dev *hdev)
{
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
/* This new command is only supported by new firmware, it will
* fail with older firmware. Error value -EOPNOSUPP can only be
* returned by older firmware running this command, to keep code
* backward compatible we will override this value and return
* success.
*/
if (ret && ret != -EOPNOTSUPP) {
dev_err(&hdev->pdev->dev,
"failed to clear hw resource, ret = %d\n", ret);
return ret;
}
return 0;
}
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{ {
struct pci_dev *pdev = ae_dev->pdev; struct pci_dev *pdev = ae_dev->pdev;
@ -9206,6 +9232,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret) if (ret)
goto err_cmd_uninit; goto err_cmd_uninit;
ret = hclge_clear_hw_resource(hdev);
if (ret)
goto err_cmd_uninit;
ret = hclge_get_cap(hdev); ret = hclge_get_cap(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",

View File

@ -995,6 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{ {
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
u16 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */ u16 lat_enc = 0; /* latency encoded */
if (link) { if (link) {
@ -1048,7 +1050,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
if (lat_enc > max_ltr_enc) lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
((lat_enc & E1000_LTRV_SCALE_MASK)
>> E1000_LTRV_SCALE_SHIFT)));
max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) *
(1U << (E1000_LTRV_SCALE_FACTOR *
((max_ltr_enc & E1000_LTRV_SCALE_MASK)
>> E1000_LTRV_SCALE_SHIFT)));
if (lat_enc_d > max_ltr_enc_d)
lat_enc = max_ltr_enc; lat_enc = max_ltr_enc;
} }

View File

@ -274,8 +274,11 @@
/* Latency Tolerance Reporting */ /* Latency Tolerance Reporting */
#define E1000_LTRV 0x000F8 #define E1000_LTRV 0x000F8
#define E1000_LTRV_VALUE_MASK 0x000003FF
#define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_MAX 5
#define E1000_LTRV_SCALE_FACTOR 5 #define E1000_LTRV_SCALE_FACTOR 5
#define E1000_LTRV_SCALE_SHIFT 10
#define E1000_LTRV_SCALE_MASK 0x00001C00
#define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_REQ_SHIFT 15
#define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_NOSNOOP_SHIFT 16
#define E1000_LTRV_SEND (1 << 30) #define E1000_LTRV_SEND (1 << 30)

View File

@ -101,7 +101,7 @@
#define MVNETA_DESC_SWAP BIT(6) #define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
#define MVNETA_PORT_STATUS 0x2444 #define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_IN_PRGRS BIT(0)
#define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
/* Only exists on Armada XP and Armada 370 */ /* Only exists on Armada XP and Armada 370 */

View File

@ -353,6 +353,9 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
unsigned long flags; unsigned long flags;
int rc = -EINVAL; int rc = -EINVAL;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_tx->lock, flags); spin_lock_irqsave(&p_tx->lock, flags);
if (p_tx->b_completing_packet) { if (p_tx->b_completing_packet) {
rc = -EBUSY; rc = -EBUSY;
@ -526,7 +529,16 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
unsigned long flags = 0; unsigned long flags = 0;
int rc = 0; int rc = 0;
if (!p_ll2_conn)
return rc;
spin_lock_irqsave(&p_rx->lock, flags); spin_lock_irqsave(&p_rx->lock, flags);
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
spin_unlock_irqrestore(&p_rx->lock, flags);
return 0;
}
cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
@ -847,6 +859,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
int rc; int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0; return 0;
@ -870,6 +885,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0; u16 new_idx = 0, num_bds = 0;
int rc; int rc;
if (!p_ll2_conn)
return 0;
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
@ -1642,6 +1660,8 @@ int qed_ll2_post_rx_buffer(void *cxt,
if (!p_ll2_conn) if (!p_ll2_conn)
return -EINVAL; return -EINVAL;
p_rx = &p_ll2_conn->rx_queue; p_rx = &p_ll2_conn->rx_queue;
if (!p_rx->set_prod_addr)
return -EIO;
spin_lock_irqsave(&p_rx->lock, flags); spin_lock_irqsave(&p_rx->lock, flags);
if (!list_empty(&p_rx->free_descq)) if (!list_empty(&p_rx->free_descq))

View File

@ -1245,8 +1245,7 @@ qed_rdma_create_qp(void *rdma_cxt,
if (!rdma_cxt || !in_params || !out_params || if (!rdma_cxt || !in_params || !out_params ||
!p_hwfn->p_rdma_info->active) { !p_hwfn->p_rdma_info->active) {
DP_ERR(p_hwfn->cdev, pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params); rdma_cxt, in_params, out_params);
return NULL; return NULL;
} }

View File

@ -682,8 +682,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
} }
} }
/* There should be one of more OPP defined */ /* There should be one or more OPPs defined */
if (WARN_ON(!count)) { if (!count) {
dev_err(dev, "%s: no supported OPPs", __func__);
ret = -ENOENT; ret = -ENOENT;
goto remove_static_opp; goto remove_static_opp;
} }

View File

@ -788,12 +788,15 @@ store_state_field(struct device *dev, struct device_attribute *attr,
ret = scsi_device_set_state(sdev, state); ret = scsi_device_set_state(sdev, state);
/* /*
* If the device state changes to SDEV_RUNNING, we need to * If the device state changes to SDEV_RUNNING, we need to
* rescan the device to revalidate it, and run the queue to * run the queue to avoid I/O hang, and rescan the device
* avoid I/O hang. * to revalidate it. Running the queue first is necessary
* because another thread may be waiting inside
* blk_mq_freeze_queue_wait() and because that call may be
* waiting for pending I/O to finish.
*/ */
if (ret == 0 && state == SDEV_RUNNING) { if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true); blk_mq_run_hw_queues(sdev->request_queue, true);
scsi_rescan_device(dev);
} }
mutex_unlock(&sdev->state_mutex); mutex_unlock(&sdev->state_mutex);

View File

@ -484,16 +484,19 @@ int vt_ioctl(struct tty_struct *tty,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
/* FIXME: this needs the console lock extending */ console_lock();
if (vc->vc_mode == (unsigned char) arg) if (vc->vc_mode == (unsigned char) arg) {
console_unlock();
break; break;
}
vc->vc_mode = (unsigned char) arg; vc->vc_mode = (unsigned char) arg;
if (console != fg_console) if (console != fg_console) {
console_unlock();
break; break;
}
/* /*
* explicitly blank/unblank the screen if switching modes * explicitly blank/unblank the screen if switching modes
*/ */
console_lock();
if (arg == KD_TEXT) if (arg == KD_TEXT)
do_unblank_screen(1); do_unblank_screen(1);
else else

View File

@ -898,19 +898,19 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
{ {
struct dwc3_trb *tmp;
u8 trbs_left; u8 trbs_left;
/* /*
* If enqueue & dequeue are equal than it is either full or empty. * If the enqueue & dequeue are equal then the TRB ring is either full
* * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
* One way to know for sure is if the TRB right before us has HWO bit * pending to be processed by the driver.
* set or not. If it has, then we're definitely full and can't fit any
* more transfers in our ring.
*/ */
if (dep->trb_enqueue == dep->trb_dequeue) { if (dep->trb_enqueue == dep->trb_dequeue) {
tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue); /*
if (tmp->ctrl & DWC3_TRB_CTRL_HWO) * If there is any request remained in the started_list at
* this point, that means there is no TRB available.
*/
if (!list_empty(&dep->started_list))
return 0; return 0;
return DWC3_TRB_NUM - 1; return DWC3_TRB_NUM - 1;
@ -2016,10 +2016,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = wait_for_completion_timeout(&dwc->ep0_in_setup, ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT)); msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
if (ret == 0) { if (ret == 0)
dev_err(dwc->dev, "timed out waiting for SETUP phase\n"); dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
return -ETIMEDOUT;
}
} }
/* /*
@ -2221,6 +2219,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */ /* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE; dwc->ep0state = EP0_SETUP_PHASE;
dwc->link_state = DWC3_LINK_STATE_SS_DIS; dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc->delayed_status = false;
dwc3_ep0_out_start(dwc); dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc); dwc3_gadget_enable_irq(dwc);

View File

@ -349,8 +349,6 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
if (!prm->ep_enabled) if (!prm->ep_enabled)
return; return;
prm->ep_enabled = false;
audio_dev = uac->audio_dev; audio_dev = uac->audio_dev;
params = &audio_dev->params; params = &audio_dev->params;
@ -368,11 +366,12 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
} }
} }
prm->ep_enabled = false;
if (usb_ep_disable(ep)) if (usb_ep_disable(ep))
dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__); dev_err(uac->card->dev, "%s:%d Error!\n", __func__, __LINE__);
} }
int u_audio_start_capture(struct g_audio *audio_dev) int u_audio_start_capture(struct g_audio *audio_dev)
{ {
struct snd_uac_chip *uac = audio_dev->uac; struct snd_uac_chip *uac = audio_dev->uac;

View File

@ -678,7 +678,6 @@ static struct usb_serial_driver ch341_device = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "ch341-uart", .name = "ch341-uart",
}, },
.bulk_in_size = 512,
.id_table = id_table, .id_table = id_table,
.num_ports = 1, .num_ports = 1,
.open = ch341_open, .open = ch341_open,

View File

@ -2074,6 +2074,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) }, .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) }, .driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */

View File

@ -331,7 +331,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
iov = wiov; iov = wiov;
else { else {
iov = riov; iov = riov;
if (unlikely(wiov && wiov->i)) { if (unlikely(wiov && wiov->used)) {
vringh_bad("Readable desc %p after writable", vringh_bad("Readable desc %p after writable",
&descs[i]); &descs[i]);
err = -EINVAL; err = -EINVAL;

View File

@ -576,6 +576,13 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
struct device *dev = get_device(&vp_dev->vdev.dev); struct device *dev = get_device(&vp_dev->vdev.dev);
/*
* Device is marked broken on surprise removal so that virtio upper
* layers can abort any ongoing operation.
*/
if (!pci_device_is_present(pci_dev))
virtio_break_device(&vp_dev->vdev);
pci_disable_sriov(pci_dev); pci_disable_sriov(pci_dev);
unregister_virtio_device(&vp_dev->vdev); unregister_virtio_device(&vp_dev->vdev);

View File

@ -2268,7 +2268,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq)
{ {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
return vq->broken; return READ_ONCE(vq->broken);
} }
EXPORT_SYMBOL_GPL(virtqueue_is_broken); EXPORT_SYMBOL_GPL(virtqueue_is_broken);
@ -2283,7 +2283,9 @@ void virtio_break_device(struct virtio_device *dev)
spin_lock(&dev->vqs_list_lock); spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) { list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq); struct vring_virtqueue *vq = to_vvq(_vq);
vq->broken = true;
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
WRITE_ONCE(vq->broken, true);
} }
spin_unlock(&dev->vqs_list_lock); spin_unlock(&dev->vqs_list_lock);
} }

View File

@ -268,6 +268,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
mod); mod);
} }
/*
* Called every time after doing a buffered, direct IO or memory mapped write.
*
* This is to ensure that if we write to a file that was previously fsynced in
* the current transaction, then try to fsync it again in the same transaction,
* we will know that there were changes in the file and that it needs to be
* logged.
*/
static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
{
spin_lock(&inode->lock);
inode->last_sub_trans = inode->root->log_transid;
spin_unlock(&inode->lock);
}
static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
{ {
int ret = 0; int ret = 0;

View File

@ -2004,14 +2004,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
inode_unlock(inode); inode_unlock(inode);
/* btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
* We also have to set last_sub_trans to the current log transid,
* otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occurred.
*/
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_sub_trans = root->log_transid;
spin_unlock(&BTRFS_I(inode)->lock);
if (num_written > 0) if (num_written > 0)
num_written = generic_write_sync(iocb, num_written); num_written = generic_write_sync(iocb, num_written);

View File

@ -9250,9 +9250,7 @@ again:
set_page_dirty(page); set_page_dirty(page);
SetPageUptodate(page); SetPageUptodate(page);
BTRFS_I(inode)->last_trans = fs_info->generation; btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state); unlock_extent_cached(io_tree, page_start, page_end, &cached_state);

View File

@ -160,7 +160,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_trans = trans->transaction->transid; BTRFS_I(inode)->last_trans = trans->transaction->transid;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans - 1;
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
} }

View File

@ -2168,7 +2168,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (IS_ERR(device)) { if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT && if (PTR_ERR(device) == -ENOENT &&
strcmp(device_path, "missing") == 0) device_path && strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else else
ret = PTR_ERR(device); ret = PTR_ERR(device);

View File

@ -395,6 +395,7 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
*/ */
take_dentry_name_snapshot(&name, real); take_dentry_name_snapshot(&name, real);
this = lookup_one_len(name.name.name, connected, name.name.len); this = lookup_one_len(name.name.name, connected, name.name.len);
release_dentry_name_snapshot(&name);
err = PTR_ERR(this); err = PTR_ERR(this);
if (IS_ERR(this)) { if (IS_ERR(this)) {
goto fail; goto fail;
@ -409,7 +410,6 @@ static struct dentry *ovl_lookup_real_one(struct dentry *connected,
} }
out: out:
release_dentry_name_snapshot(&name);
dput(parent); dput(parent);
inode_unlock(dir); inode_unlock(dir);
return this; return this;

View File

@ -549,8 +549,17 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
{ {
unsigned long totalpages = totalram_pages() + total_swap_pages; unsigned long totalpages = totalram_pages() + total_swap_pages;
unsigned long points = 0; unsigned long points = 0;
long badness;
badness = oom_badness(task, totalpages);
/*
* Special case OOM_SCORE_ADJ_MIN for all others scale the
* badness value into [0, 2000] range which we have been
* exporting for a long time so userspace might depend on it.
*/
if (badness != LONG_MIN)
points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3;
points = oom_badness(task, totalpages) * 1000 / totalpages;
seq_printf(m, "%lu\n", points); seq_printf(m, "%lu\n", points);
return 0; return 0;

View File

@ -3684,6 +3684,10 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev); void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name); bool dev_valid_name(const char *name);
static inline bool is_socket_ioctl_cmd(unsigned int cmd)
{
return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
}
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
bool *need_copyout); bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ifconf(struct net *net, struct ifconf *, int);

View File

@ -7,7 +7,7 @@
bool __do_once_start(bool *done, unsigned long *flags); bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key, void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags); unsigned long *flags, struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform /* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only * a function call such as initialization of random seeds, etc, only
@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \ if (unlikely(___ret)) { \
func(__VA_ARGS__); \ func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \ __do_once_done(&___done, &___once_key, \
&___flags); \ &___flags, THIS_MODULE); \
} \ } \
} \ } \
___ret; \ ___ret; \

View File

@ -48,7 +48,7 @@ struct oom_control {
/* Used by oom implementation, do not set */ /* Used by oom implementation, do not set */
unsigned long totalpages; unsigned long totalpages;
struct task_struct *chosen; struct task_struct *chosen;
unsigned long chosen_points; long chosen_points;
/* Used to print the constraint info. */ /* Used to print the constraint info. */
enum oom_constraint constraint; enum oom_constraint constraint;
@ -108,7 +108,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
bool __oom_reap_task_mm(struct mm_struct *mm); bool __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p, long oom_badness(struct task_struct *p,
unsigned long totalpages); unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc); extern bool out_of_memory(struct oom_control *oc);

View File

@ -595,7 +595,6 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
spin_lock(&hash_lock); spin_lock(&hash_lock);
} }
spin_unlock(&hash_lock); spin_unlock(&hash_lock);
put_tree(victim);
} }
/* /*
@ -604,6 +603,7 @@ static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
static void prune_one(struct audit_tree *victim) static void prune_one(struct audit_tree *victim)
{ {
prune_tree_chunks(victim, false); prune_tree_chunks(victim, false);
put_tree(victim);
} }
/* trim the uncommitted chunks from tree */ /* trim the uncommitted chunks from tree */

View File

@ -2778,6 +2778,41 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
reg->smax_value = reg->umax_value; reg->smax_value = reg->umax_value;
} }
static bool bpf_map_is_rdonly(const struct bpf_map *map)
{
return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
}
static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
{
void *ptr;
u64 addr;
int err;
err = map->ops->map_direct_value_addr(map, &addr, off);
if (err)
return err;
ptr = (void *)(long)addr + off;
switch (size) {
case sizeof(u8):
*val = (u64)*(u8 *)ptr;
break;
case sizeof(u16):
*val = (u64)*(u16 *)ptr;
break;
case sizeof(u32):
*val = (u64)*(u32 *)ptr;
break;
case sizeof(u64):
*val = *(u64 *)ptr;
break;
default:
return -EINVAL;
}
return 0;
}
/* check whether memory at (regno + off) is accessible for t = (read | write) /* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory * if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory * if t==read, value_regno is a register which will receive the value from memory
@ -2815,9 +2850,27 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err) if (err)
return err; return err;
err = check_map_access(env, regno, off, size, false); err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0) if (!err && t == BPF_READ && value_regno >= 0) {
mark_reg_unknown(env, regs, value_regno); struct bpf_map *map = reg->map_ptr;
/* if map is read-only, track its contents as scalars */
if (tnum_is_const(reg->var_off) &&
bpf_map_is_rdonly(map) &&
map->ops->map_direct_value_addr) {
int map_off = off + reg->var_off.value;
u64 val = 0;
err = bpf_map_direct_read(map, map_off, size,
&val);
if (err)
return err;
regs[value_regno].type = SCALAR_VALUE;
__mark_reg_known(&regs[value_regno], val);
} else {
mark_reg_unknown(env, regs, value_regno);
}
}
} else if (reg->type == PTR_TO_CTX) { } else if (reg->type == PTR_TO_CTX) {
enum bpf_reg_type reg_type = SCALAR_VALUE; enum bpf_reg_type reg_type = SCALAR_VALUE;

View File

@ -3,10 +3,12 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/once.h> #include <linux/once.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/module.h>
struct once_work { struct once_work {
struct work_struct work; struct work_struct work;
struct static_key_true *key; struct static_key_true *key;
struct module *module;
}; };
static void once_deferred(struct work_struct *w) static void once_deferred(struct work_struct *w)
@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
work = container_of(w, struct once_work, work); work = container_of(w, struct once_work, work);
BUG_ON(!static_key_enabled(work->key)); BUG_ON(!static_key_enabled(work->key));
static_branch_disable(work->key); static_branch_disable(work->key);
module_put(work->module);
kfree(work); kfree(work);
} }
static void once_disable_jump(struct static_key_true *key) static void once_disable_jump(struct static_key_true *key, struct module *mod)
{ {
struct once_work *w; struct once_work *w;
@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
INIT_WORK(&w->work, once_deferred); INIT_WORK(&w->work, once_deferred);
w->key = key; w->key = key;
w->module = mod;
__module_get(mod);
schedule_work(&w->work); schedule_work(&w->work);
} }
@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
EXPORT_SYMBOL(__do_once_start); EXPORT_SYMBOL(__do_once_start);
void __do_once_done(bool *done, struct static_key_true *once_key, void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags) unsigned long *flags, struct module *mod)
__releases(once_lock) __releases(once_lock)
{ {
*done = true; *done = true;
spin_unlock_irqrestore(&once_lock, *flags); spin_unlock_irqrestore(&once_lock, *flags);
once_disable_jump(once_key); once_disable_jump(once_key, mod);
} }
EXPORT_SYMBOL(__do_once_done); EXPORT_SYMBOL(__do_once_done);

View File

@ -197,17 +197,17 @@ static bool is_dump_unreclaim_slabs(void)
* predictable as possible. The goal is to return the highest value for the * predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures. * task consuming the most memory to avoid subsequent oom failures.
*/ */
unsigned long oom_badness(struct task_struct *p, unsigned long totalpages) long oom_badness(struct task_struct *p, unsigned long totalpages)
{ {
long points; long points;
long adj; long adj;
if (oom_unkillable_task(p)) if (oom_unkillable_task(p))
return 0; return LONG_MIN;
p = find_lock_task_mm(p); p = find_lock_task_mm(p);
if (!p) if (!p)
return 0; return LONG_MIN;
/* /*
* Do not even consider tasks which are explicitly marked oom * Do not even consider tasks which are explicitly marked oom
@ -219,7 +219,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
test_bit(MMF_OOM_SKIP, &p->mm->flags) || test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
in_vfork(p)) { in_vfork(p)) {
task_unlock(p); task_unlock(p);
return 0; return LONG_MIN;
} }
/* /*
@ -234,11 +234,7 @@ unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
adj *= totalpages / 1000; adj *= totalpages / 1000;
points += adj; points += adj;
/* return points;
* Never return 0 for an eligible task regardless of the root bonus and
* oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
*/
return points > 0 ? points : 1;
} }
static const char * const oom_constraint_text[] = { static const char * const oom_constraint_text[] = {
@ -311,7 +307,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc)
static int oom_evaluate_task(struct task_struct *task, void *arg) static int oom_evaluate_task(struct task_struct *task, void *arg)
{ {
struct oom_control *oc = arg; struct oom_control *oc = arg;
unsigned long points; long points;
if (oom_unkillable_task(task)) if (oom_unkillable_task(task))
goto next; goto next;
@ -337,12 +333,12 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
* killed first if it triggers an oom, then select it. * killed first if it triggers an oom, then select it.
*/ */
if (oom_task_origin(task)) { if (oom_task_origin(task)) {
points = ULONG_MAX; points = LONG_MAX;
goto select; goto select;
} }
points = oom_badness(task, oc->totalpages); points = oom_badness(task, oc->totalpages);
if (!points || points < oc->chosen_points) if (points == LONG_MIN || points < oc->chosen_points)
goto next; goto next;
select: select:
@ -366,6 +362,8 @@ abort:
*/ */
static void select_bad_process(struct oom_control *oc) static void select_bad_process(struct oom_control *oc)
{ {
oc->chosen_points = LONG_MIN;
if (is_memcg_oom(oc)) if (is_memcg_oom(oc))
mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
else { else {

View File

@ -2414,6 +2414,7 @@ static int do_setlink(const struct sk_buff *skb,
return err; return err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
const char *pat = ifname && ifname[0] ? ifname : NULL;
struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev), struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
tb, CAP_NET_ADMIN); tb, CAP_NET_ADMIN);
if (IS_ERR(net)) { if (IS_ERR(net)) {
@ -2421,7 +2422,7 @@ static int do_setlink(const struct sk_buff *skb,
goto errout; goto errout;
} }
err = dev_change_net_namespace(dev, net, ifname); err = dev_change_net_namespace(dev, net, pat);
put_net(net); put_net(net);
if (err) if (err)
goto errout; goto errout;

View File

@ -446,6 +446,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
static int gre_handle_offloads(struct sk_buff *skb, bool csum) static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{ {
if (csum && skb_checksum_start(skb) < skb->data)
return -EINVAL;
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
} }

View File

@ -66,22 +66,17 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
struct conntrack_gc_work { struct conntrack_gc_work {
struct delayed_work dwork; struct delayed_work dwork;
u32 last_bucket; u32 next_bucket;
bool exiting; bool exiting;
bool early_drop; bool early_drop;
long next_gc_run;
}; };
static __read_mostly struct kmem_cache *nf_conntrack_cachep; static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all; static __read_mostly bool nf_conntrack_locks_all;
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ #define GC_SCAN_INTERVAL (120u * HZ)
#define GC_MAX_BUCKETS_DIV 128u #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
/* upper bound of full table scan */
#define GC_MAX_SCAN_JIFFIES (16u * HZ)
/* desired ratio of entries found to be expired */
#define GC_EVICT_RATIO 50u
static struct conntrack_gc_work conntrack_gc_work; static struct conntrack_gc_work conntrack_gc_work;
@ -1226,17 +1221,13 @@ static void nf_ct_offload_timeout(struct nf_conn *ct)
static void gc_worker(struct work_struct *work) static void gc_worker(struct work_struct *work)
{ {
unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
unsigned int i, goal, buckets = 0, expired_count = 0; unsigned int i, hashsz, nf_conntrack_max95 = 0;
unsigned int nf_conntrack_max95 = 0; unsigned long next_run = GC_SCAN_INTERVAL;
struct conntrack_gc_work *gc_work; struct conntrack_gc_work *gc_work;
unsigned int ratio, scanned = 0;
unsigned long next_run;
gc_work = container_of(work, struct conntrack_gc_work, dwork.work); gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; i = gc_work->next_bucket;
i = gc_work->last_bucket;
if (gc_work->early_drop) if (gc_work->early_drop)
nf_conntrack_max95 = nf_conntrack_max / 100u * 95u; nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
@ -1244,22 +1235,21 @@ static void gc_worker(struct work_struct *work)
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_head *ct_hash; struct hlist_nulls_head *ct_hash;
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
unsigned int hashsz;
struct nf_conn *tmp; struct nf_conn *tmp;
i++;
rcu_read_lock(); rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hashsz); nf_conntrack_get_ht(&ct_hash, &hashsz);
if (i >= hashsz) if (i >= hashsz) {
i = 0; rcu_read_unlock();
break;
}
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
struct net *net; struct net *net;
tmp = nf_ct_tuplehash_to_ctrack(h); tmp = nf_ct_tuplehash_to_ctrack(h);
scanned++;
if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
nf_ct_offload_timeout(tmp); nf_ct_offload_timeout(tmp);
continue; continue;
@ -1267,7 +1257,6 @@ static void gc_worker(struct work_struct *work)
if (nf_ct_is_expired(tmp)) { if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp); nf_ct_gc_expired(tmp);
expired_count++;
continue; continue;
} }
@ -1299,7 +1288,14 @@ static void gc_worker(struct work_struct *work)
*/ */
rcu_read_unlock(); rcu_read_unlock();
cond_resched(); cond_resched();
} while (++buckets < goal); i++;
if (time_after(jiffies, end_time) && i < hashsz) {
gc_work->next_bucket = i;
next_run = 0;
break;
}
} while (i < hashsz);
if (gc_work->exiting) if (gc_work->exiting)
return; return;
@ -1310,40 +1306,17 @@ static void gc_worker(struct work_struct *work)
* *
* This worker is only here to reap expired entries when system went * This worker is only here to reap expired entries when system went
* idle after a busy period. * idle after a busy period.
*
* The heuristics below are supposed to balance conflicting goals:
*
* 1. Minimize time until we notice a stale entry
* 2. Maximize scan intervals to not waste cycles
*
* Normally, expire ratio will be close to 0.
*
* As soon as a sizeable fraction of the entries have expired
* increase scan frequency.
*/ */
ratio = scanned ? expired_count * 100 / scanned : 0; if (next_run) {
if (ratio > GC_EVICT_RATIO) { gc_work->early_drop = false;
gc_work->next_gc_run = min_interval; gc_work->next_bucket = 0;
} else {
unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
gc_work->next_gc_run += min_interval;
if (gc_work->next_gc_run > max)
gc_work->next_gc_run = max;
} }
next_run = gc_work->next_gc_run;
gc_work->last_bucket = i;
gc_work->early_drop = false;
queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run); queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
} }
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{ {
INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
gc_work->next_gc_run = HZ;
gc_work->exiting = false; gc_work->exiting = false;
} }

View File

@ -314,7 +314,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
goto err; goto err;
} }
if (len != ALIGN(size, 4) + hdrlen) if (!size || len != ALIGN(size, 4) + hdrlen)
goto err; goto err;
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA) if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)

View File

@ -131,9 +131,9 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
cpu_relax(); cpu_relax();
} }
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
&off, PAGE_SIZE); &off, PAGE_SIZE);
if (unlikely(ret != ibmr->sg_len)) if (unlikely(ret != ibmr->sg_dma_len))
return ret < 0 ? ret : -EINVAL; return ret < 0 ? ret : -EINVAL;
if (cmpxchg(&frmr->fr_state, if (cmpxchg(&frmr->fr_state,

View File

@ -1053,7 +1053,7 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
rtnl_unlock(); rtnl_unlock();
if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf))) if (!err && copy_to_user(argp, &ifc, sizeof(struct ifconf)))
err = -EFAULT; err = -EFAULT;
} else { } else if (is_socket_ioctl_cmd(cmd)) {
struct ifreq ifr; struct ifreq ifr;
bool need_copyout; bool need_copyout;
if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
@ -1062,6 +1062,8 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
if (!err && need_copyout) if (!err && need_copyout)
if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
return -EFAULT; return -EFAULT;
} else {
err = -ENOTTY;
} }
return err; return err;
} }
@ -3228,6 +3230,8 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
struct ifreq ifreq; struct ifreq ifreq;
u32 data32; u32 data32;
if (!is_socket_ioctl_cmd(cmd))
return -ENOTTY;
if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ)) if (copy_from_user(ifreq.ifr_name, u_ifreq32->ifr_name, IFNAMSIZ))
return -EFAULT; return -EFAULT;
if (get_user(data32, &u_ifreq32->ifr_data)) if (get_user(data32, &u_ifreq32->ifr_data))