This is the 5.4.18 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl47Mi4ACgkQONu9yGCS
 aT4zURAAiRhPVfht+amxkX2ebcimGIDO7oMICICF4vAOn9zAHHzEuxjtQms0LWRN
 l91qduujbVHg9/81BzCe0qp5GwYsgJRAIB5sJwuCrtM5u8zSSQXIY58sU1ZUXLha
 UC7Pnez+f420RO/U4Xh3BnEM6GOj97VrLw3gePdmI+e4imwP+czloYR/sVLhHES3
 vHr7OFhYx8GF303RomFo4kuG9dz11ZUuPdrcHWxDRBHIf3bNCITGpjOV9ICZPL9+
 UqmrFZKyqXcT29pgwtUMIxobnkgQm9KekS8iYGS7pblu5BXTuvU6TvfKwpOMOdF3
 FIDV9km6LRFyydCLowuiA5gVuNQwfcXCIfPZfhX0ua0vC8e7q/DKItf7QBaChYyp
 tD4mjXGkOvIrZad94kSw3qVWr1bv9I4D5w+BmDoi5/zchjAcphUeij+QXEhmkD9k
 PB+zu3NbeY0J69QaVgCbPPHAmimIwsCKlA9FAJNMeuDwBIk7LXF7J0Y0ehlxwdTy
 h9/miG0UXYkeo5BhhdvvlZ9jdCVHI/fux+McObsasBL2xArmAA059GRgDag2qEo9
 X+rtszl8x1JIcPZ3lfm5aSsZ/8nQhtpfsl+mZKSM/x+Kl+SoryvgYtDJHg9Rf5Cp
 WPx5mD82fS9g1RaQmIWa7iZj4iNvLDf4I+ppe2JbEGUq5IYzZGY=
 =cfYj
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.18' into 5.4-1.0.0-imx

This is the 5.4.18 stable release
This commit is contained in:
Gary Bisson 2020-04-15 14:28:36 +02:00
commit a48a1e55f9
136 changed files with 870 additions and 397 deletions

View File

@ -7,6 +7,13 @@ Description:
The name of devfreq object denoted as ... is same as the
name of device using devfreq.
What: /sys/class/devfreq/.../name
Date: November 2019
Contact: Chanwoo Choi <cw00.choi@samsung.com>
Description:
The /sys/class/devfreq/.../name shows the name of device
of the corresponding devfreq object.
What: /sys/class/devfreq/.../governor
Date: September 2011
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 17
SUBLEVEL = 18
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -131,6 +131,11 @@
};
/ {
memory@80000000 {
device_type = "memory";
reg = <0x80000000 0x20000000>; /* 512 MB */
};
clk_mcasp0_fixed: clk_mcasp0_fixed {
#clock-cells = <0>;
compatible = "fixed-clock";

View File

@ -848,6 +848,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi0_pins_default>;
pinctrl-1 = <&spi0_pins_sleep>;
ti,pindir-d0-out-d1-in = <1>;
};
&spi1 {
@ -855,6 +856,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&spi1_pins_default>;
pinctrl-1 = <&spi1_pins_sleep>;
ti,pindir-d0-out-d1-in = <1>;
};
&usb2_phy1 {

View File

@ -170,10 +170,6 @@
gpios = <&gpio5 18 GPIO_ACTIVE_HIGH>;
};
&pcie1_ep {
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};
&mmc1 {
pinctrl-names = "default", "hs";
pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;

View File

@ -147,10 +147,6 @@
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};
&pcie1_ep {
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};
&mailbox5 {
status = "okay";
mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {

View File

@ -29,6 +29,27 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
main_12v0: fixedregulator-main_12v0 {
/* main supply */
compatible = "regulator-fixed";
regulator-name = "main_12v0";
regulator-min-microvolt = <12000000>;
regulator-max-microvolt = <12000000>;
regulator-always-on;
regulator-boot-on;
};
evm_5v0: fixedregulator-evm_5v0 {
/* Output of TPS54531D */
compatible = "regulator-fixed";
regulator-name = "evm_5v0";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
vin-supply = <&main_12v0>;
regulator-always-on;
regulator-boot-on;
};
vdd_3v3: fixedregulator-vdd_3v3 {
compatible = "regulator-fixed";
regulator-name = "vdd_3v3";
@ -547,10 +568,6 @@
gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
&pcie1_ep {
gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
&mcasp3 {
#sound-dai-cells = <0>;
assigned-clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;

View File

@ -101,7 +101,7 @@
initial-mode = <1>; /* initialize in HUB mode */
disabled-ports = <1>;
intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
reset-gpios = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */
connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
refclk-frequency = <19200000>;
};

View File

@ -146,10 +146,9 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
lsr r7, #16
and r7, #0xf
cmp r7, #1
bne 1f
ubfx r7, r7, #16, #4
teq r7, #0
beq 1f
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL

View File

@ -16,7 +16,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.gz
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)

View File

@ -361,6 +361,8 @@
bluetooth {
compatible = "brcm,bcm43438-bt";
interrupt-parent = <&gpio_intc>;
interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;

View File

@ -889,8 +889,8 @@ static void print_parisc_device(struct parisc_device *dev)
static int count;
print_pa_hwpath(dev, hw_path);
pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
if (dev->num_addrs) {

View File

@ -63,6 +63,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;

View File

@ -60,6 +60,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy6: ethernet-phy@0 {
reg = <0x0>;

View File

@ -63,6 +63,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;

View File

@ -60,6 +60,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy7: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy2: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy3: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy4: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy5: ethernet-phy@0 {
reg = <0x0>;

View File

@ -60,6 +60,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy14: ethernet-phy@0 {
reg = <0x0>;

View File

@ -60,6 +60,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy15: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy8: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy9: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy10: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy11: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy12: ethernet-phy@0 {
reg = <0x0>;

View File

@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
fsl,erratum-a011043; /* must ignore read errors */
pcsphy13: ethernet-phy@0 {
reg = <0x0>;

View File

@ -58,7 +58,8 @@ quiet_cmd_vdsold = VDSOLD $@
cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
-Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
$(CROSS_COMPILE)objcopy \
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@

View File

@ -15,6 +15,7 @@
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
@ -657,6 +658,10 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_E3_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
@ -826,6 +831,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */

View File

@ -369,11 +369,6 @@
#define SNR_M2M_PCI_PMON_BOX_CTL 0x438
#define SNR_M2M_PCI_PMON_UMASK_EXT 0xff
/* SNR PCIE3 */
#define SNR_PCIE3_PCI_PMON_CTL0 0x508
#define SNR_PCIE3_PCI_PMON_CTR0 0x4e8
#define SNR_PCIE3_PCI_PMON_BOX_CTL 0x4e4
/* SNR IMC */
#define SNR_IMC_MMIO_PMON_FIXED_CTL 0x54
#define SNR_IMC_MMIO_PMON_FIXED_CTR 0x38
@ -4328,27 +4323,12 @@ static struct intel_uncore_type snr_uncore_m2m = {
.format_group = &snr_m2m_uncore_format_group,
};
static struct intel_uncore_type snr_uncore_pcie3 = {
.name = "pcie3",
.num_counters = 4,
.num_boxes = 1,
.perf_ctr_bits = 48,
.perf_ctr = SNR_PCIE3_PCI_PMON_CTR0,
.event_ctl = SNR_PCIE3_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
.box_ctl = SNR_PCIE3_PCI_PMON_BOX_CTL,
.ops = &ivbep_uncore_pci_ops,
.format_group = &ivbep_uncore_format_group,
};
enum {
SNR_PCI_UNCORE_M2M,
SNR_PCI_UNCORE_PCIE3,
};
static struct intel_uncore_type *snr_pci_uncores[] = {
[SNR_PCI_UNCORE_M2M] = &snr_uncore_m2m,
[SNR_PCI_UNCORE_PCIE3] = &snr_uncore_pcie3,
NULL,
};
@ -4357,10 +4337,6 @@ static const struct pci_device_id snr_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
},
{ /* PCIe3 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
},
{ /* end: all zeroes */ }
};

View File

@ -1970,7 +1970,7 @@ static int rdt_get_tree(struct fs_context *fc)
if (rdt_mon_capable) {
ret = mongroup_create_dir(rdtgroup_default.kn,
NULL, "mon_groups",
&rdtgroup_default, "mon_groups",
&kn_mongrp);
if (ret < 0)
goto out_info;
@ -2205,7 +2205,11 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
free_rmid(sentry->mon.rmid);
list_del(&sentry->mon.crdtgrp_list);
kfree(sentry);
if (atomic_read(&sentry->waitcount) != 0)
sentry->flags = RDT_DELETED;
else
kfree(sentry);
}
}
@ -2243,7 +2247,11 @@ static void rmdir_all_sub(void)
kernfs_remove(rdtgrp->kn);
list_del(&rdtgrp->rdtgroup_list);
kfree(rdtgrp);
if (atomic_read(&rdtgrp->waitcount) != 0)
rdtgrp->flags = RDT_DELETED;
else
kfree(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@ -2446,7 +2454,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn,
/*
* Create the mon_data directory first.
*/
ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
if (ret)
return ret;
@ -2645,7 +2653,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
uint files = 0;
int ret;
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
prdtgrp = rdtgroup_kn_lock_live(parent_kn);
if (!prdtgrp) {
ret = -ENODEV;
goto out_unlock;
@ -2718,7 +2726,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
kernfs_activate(kn);
/*
* The caller unlocks the prgrp_kn upon success.
* The caller unlocks the parent_kn upon success.
*/
return 0;
@ -2729,7 +2737,7 @@ out_destroy:
out_free_rgrp:
kfree(rdtgrp);
out_unlock:
rdtgroup_kn_unlock(prgrp_kn);
rdtgroup_kn_unlock(parent_kn);
return ret;
}
@ -2767,7 +2775,7 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
*/
list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
rdtgroup_kn_unlock(prgrp_kn);
rdtgroup_kn_unlock(parent_kn);
return ret;
}
@ -2810,7 +2818,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
* Create an empty mon_groups directory to hold the subset
* of tasks and cpus to monitor.
*/
ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
if (ret) {
rdt_last_cmd_puts("kernfs subdir error\n");
goto out_del_list;
@ -2826,7 +2834,7 @@ out_id_free:
out_common_fail:
mkdir_rdt_prepare_clean(rdtgrp);
out_unlock:
rdtgroup_kn_unlock(prgrp_kn);
rdtgroup_kn_unlock(parent_kn);
return ret;
}
@ -2952,13 +2960,13 @@ static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
closid_free(rdtgrp->closid);
free_rmid(rdtgrp->mon.rmid);
rdtgroup_ctrl_remove(kn, rdtgrp);
/*
* Free all the child monitor group rmids.
*/
free_all_child_rdtgrp(rdtgrp);
rdtgroup_ctrl_remove(kn, rdtgrp);
return 0;
}

View File

@ -15,10 +15,11 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/spinlock.h>
struct ttyprintk_port {
struct tty_port port;
struct mutex port_write_mutex;
spinlock_t spinlock;
};
static struct ttyprintk_port tpk_port;
@ -99,11 +100,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
unsigned long flags;
mutex_lock(&tpkp->port_write_mutex);
spin_lock_irqsave(&tpkp->spinlock, flags);
/* flush tpk_printk buffer */
tpk_printk(NULL, 0);
mutex_unlock(&tpkp->port_write_mutex);
spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@ -115,13 +117,14 @@ static int tpk_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
unsigned long flags;
int ret;
/* exclusive use of tpk_printk within this tty */
mutex_lock(&tpkp->port_write_mutex);
spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
mutex_unlock(&tpkp->port_write_mutex);
spin_unlock_irqrestore(&tpkp->spinlock, flags);
return ret;
}
@ -171,7 +174,7 @@ static int __init ttyprintk_init(void)
{
int ret = -ENOMEM;
mutex_init(&tpk_port.port_write_mutex);
spin_lock_init(&tpk_port.spinlock);
ttyprintk_driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |

View File

@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);

View File

@ -23,9 +23,9 @@
*/
static const char * const ar100_r_apb2_parents[] = { "osc24M", "osc32k",
"pll-periph0", "iosc" };
"iosc", "pll-periph0" };
static const struct ccu_mux_var_prediv ar100_r_apb2_predivs[] = {
{ .index = 2, .shift = 0, .width = 5 },
{ .index = 3, .shift = 0, .width = 5 },
};
static struct ccu_div ar100_clk = {

View File

@ -51,19 +51,7 @@ static struct ccu_div ar100_clk = {
static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0);
static struct ccu_div apb0_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO),
.common = {
.reg = 0x0c,
.hw.init = CLK_HW_INIT_HW("apb0",
&ahb0_clk.hw,
&ccu_div_ops,
0),
},
};
static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
static SUNXI_CCU_M(apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
/*
* Define the parent as an array that can be reused to save space
@ -127,7 +115,7 @@ static struct ccu_mp a83t_ir_clk = {
static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
&ar100_clk.common,
&a83t_apb0_clk.common,
&apb0_clk.common,
&apb0_pio_clk.common,
&apb0_ir_clk.common,
&apb0_timer_clk.common,
@ -167,7 +155,7 @@ static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
[CLK_AHB0] = &ahb0_clk.hw,
[CLK_APB0] = &a83t_apb0_clk.common.hw,
[CLK_APB0] = &apb0_clk.common.hw,
[CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
[CLK_APB0_IR] = &apb0_ir_clk.common.hw,
[CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
@ -282,9 +270,6 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
{
/* Fix apb0 bus gate parents here */
apb0_gate_parent[0] = &a83t_apb0_clk.common.hw;
sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",

View File

@ -618,7 +618,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
.num = CLK_NUMBER,
.num = CLK_PLL_DDR1 + 1,
};
static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
@ -700,7 +700,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_MBUS] = &mbus_clk.common.hw,
[CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
},
.num = CLK_NUMBER,
.num = CLK_I2S0 + 1,
};
static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {

View File

@ -51,6 +51,4 @@
#define CLK_PLL_DDR1 74
#define CLK_NUMBER (CLK_I2S0 + 1)
#endif /* _CCU_SUN8I_H3_H_ */

View File

@ -234,7 +234,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
int latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int duration_us, hits, misses, early_hits;
int max_early_idx, constraint_idx, idx, i;
int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick;
if (dev->last_state_idx >= 0) {
@ -251,6 +251,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
misses = 0;
early_hits = 0;
max_early_idx = -1;
prev_max_early_idx = -1;
constraint_idx = drv->state_count;
idx = -1;
@ -303,6 +304,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
if (!(tick_nohz_tick_stopped() &&
drv->states[idx].target_residency < TICK_USEC)) {
prev_max_early_idx = max_early_idx;
early_hits = cpu_data->states[i].early_hits;
max_early_idx = idx;
}
@ -329,6 +331,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (early_hits < cpu_data->states[i].early_hits &&
!(tick_nohz_tick_stopped() &&
drv->states[i].target_residency < TICK_USEC)) {
prev_max_early_idx = max_early_idx;
early_hits = cpu_data->states[i].early_hits;
max_early_idx = i;
}
@ -342,9 +345,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* "early hits" metric, but if that cannot be determined, just use the
* state selected so far.
*/
if (hits <= misses && max_early_idx >= 0) {
idx = max_early_idx;
duration_us = drv->states[idx].target_residency;
if (hits <= misses) {
/*
* The current candidate state is not suitable, so take the one
* whose "early hits" metric is the maximum for the range of
* shallower states.
*/
if (idx == max_early_idx)
max_early_idx = prev_max_early_idx;
if (max_early_idx >= 0) {
idx = max_early_idx;
duration_us = drv->states[idx].target_residency;
}
}
/*

View File

@ -1112,6 +1112,14 @@ err_out:
}
EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *devfreq = to_devfreq(dev);
return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
}
static DEVICE_ATTR_RO(name);
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -1440,6 +1448,7 @@ static ssize_t trans_stat_show(struct device *dev,
static DEVICE_ATTR_RO(trans_stat);
static struct attribute *devfreq_attrs[] = {
&dev_attr_name.attr,
&dev_attr_governor.attr,
&dev_attr_available_governors.attr,
&dev_attr_cur_freq.attr,

View File

@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
struct evdev_client *client;
int error;
client = kzalloc(struct_size(client, buffer, bufsize),
GFP_KERNEL | __GFP_NOWARN);
if (!client)
client = vzalloc(struct_size(client, buffer, bufsize));
client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
if (!client)
return -ENOMEM;

View File

@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
return input_register_device(onkey->input);
}
static const struct of_device_id max77650_onkey_of_match[] = {
{ .compatible = "maxim,max77650-onkey" },
{ }
};
MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
static struct platform_driver max77650_onkey_driver = {
.driver = {
.name = "max77650-onkey",
.of_match_table = max77650_onkey_of_match,
},
.probe = max77650_onkey_probe,
};

View File

@ -135,9 +135,16 @@ err_node_put:
return rv;
}
static const struct of_device_id max77650_led_of_match[] = {
{ .compatible = "maxim,max77650-led" },
{ }
};
MODULE_DEVICE_TABLE(of, max77650_led_of_match);
static struct platform_driver max77650_led_driver = {
.driver = {
.name = "max77650-led",
.of_match_table = max77650_led_of_match,
},
.probe = max77650_led_probe,
};

View File

@ -3420,10 +3420,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto out_flags_changed;
dm_pool_register_pre_commit_callback(pt->pool->pmd,
metadata_pre_commit_callback,
pt);
pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks);
@ -3587,6 +3583,9 @@ static int pool_preresume(struct dm_target *ti)
if (r)
return r;
dm_pool_register_pre_commit_callback(pool->pmd,
metadata_pre_commit_callback, pt);
r = maybe_resize_data_dev(ti, &need_commit1);
if (r)
return r;

View File

@ -554,7 +554,7 @@ static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
u8 *buf, int size)
{
u16 checksum;
int act_len, i, ret;
int act_len = 0, i, ret;
memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);

View File

@ -230,18 +230,22 @@ static struct rc_map_table rc_map_digitv_table[] = {
static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
int i;
int ret, i;
u8 key[5];
u8 b[4] = { 0 };
*event = 0;
*state = REMOTE_NO_KEY_PRESSED;
digitv_ctrl_msg(d,USB_READ_REMOTE,0,NULL,0,&key[1],4);
ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4);
if (ret)
return ret;
/* Tell the device we've read the remote. Not sure how necessary
this is, but the Nebula SDK does it. */
digitv_ctrl_msg(d,USB_WRITE_REMOTE,0,b,4,NULL,0);
ret = digitv_ctrl_msg(d, USB_WRITE_REMOTE, 0, b, 4, NULL, 0);
if (ret)
return ret;
/* if something is inside the buffer, simulate key press */
if (key[1] != 0)

View File

@ -12,7 +12,7 @@
int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
u16 rlen, int delay_ms)
{
int actlen,ret = -ENOMEM;
int actlen = 0, ret = -ENOMEM;
if (!d || wbuf == NULL || wlen == 0)
return -EINVAL;

View File

@ -96,10 +96,14 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
static int vp7045_rc_query(struct dvb_usb_device *d)
{
int ret;
u8 key;
vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
deb_rc("remote query key: %x %d\n",key,key);
ret = vp7045_usb_op(d, RC_VAL_READ, NULL, 0, &key, 1, 20);
if (ret)
return ret;
deb_rc("remote query key: %x\n", key);
if (key != 0x44) {
/*
@ -115,15 +119,18 @@ static int vp7045_rc_query(struct dvb_usb_device *d)
static int vp7045_read_eeprom(struct dvb_usb_device *d,u8 *buf, int len, int offset)
{
int i = 0;
u8 v,br[2];
int i, ret;
u8 v, br[2];
for (i=0; i < len; i++) {
v = offset + i;
vp7045_usb_op(d,GET_EE_VALUE,&v,1,br,2,5);
ret = vp7045_usb_op(d, GET_EE_VALUE, &v, 1, br, 2, 5);
if (ret)
return ret;
buf[i] = br[1];
}
deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ",offset, i);
debug_dump(buf,i,deb_info);
deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ", offset, i);
debug_dump(buf, i, deb_info);
return 0;
}

View File

@ -1461,7 +1461,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
pr_err("couldn't kzalloc gspca struct\n");
return -ENOMEM;
}
gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL);
if (!gspca_dev->usb_buf) {
pr_err("out of memory\n");
ret = -ENOMEM;

View File

@ -274,7 +274,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
void lkdtm_UNSET_SMEP(void)
{
#ifdef CONFIG_X86_64
#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
void (*direct_write_cr4)(unsigned long val);
unsigned char *insn;

View File

@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
if (v)
++*pos;
++(*pos);
return v;
}

View File

@ -682,8 +682,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
if (v)
++*pos;
++(*pos);
return v;
}

View File

@ -110,7 +110,7 @@ do { \
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_XGMII;
tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;

View File

@ -49,6 +49,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
/* Return all Fs if nothing was there */
if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
"fsl,erratum-a011043");
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");

View File

@ -185,13 +185,12 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct delayed_work watchdog_task;
struct workqueue_struct *e1000_workqueue;
struct work_struct watchdog_task;
const struct e1000_info *ei;

View File

@ -1780,8 +1780,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, HZ);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@ -1861,8 +1860,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, HZ);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
/* Reset on uncorrectable ECC error */
@ -1907,8 +1905,7 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, HZ);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
@ -4281,6 +4278,7 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
napi_synchronize(&adapter->napi);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
@ -5152,11 +5150,25 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
static void e1000_watchdog(struct timer_list *t)
{
struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
/* TODO: make this use queue_delayed_work() */
}
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
watchdog_task.work);
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@ -5404,9 +5416,8 @@ link_up:
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
queue_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task,
round_jiffies(2 * HZ));
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
@ -7259,21 +7270,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
e1000e_driver_name);
if (!adapter->e1000_workqueue) {
err = -ENOMEM;
goto err_workqueue;
}
INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
0);
timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
@ -7367,9 +7368,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
flush_workqueue(adapter->e1000_workqueue);
destroy_workqueue(adapter->e1000_workqueue);
err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
@ -7407,26 +7405,22 @@ static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
if (!down)
set_bit(__E1000_DOWN, &adapter->state);
set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
cancel_delayed_work(&adapter->watchdog_task);
flush_workqueue(adapter->e1000_workqueue);
destroy_workqueue(adapter->e1000_workqueue);
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
@ -7435,9 +7429,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
/* Don't lie to e1000_close() down the road. */
if (!down)
clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
if (pci_dev_run_wake(pdev))

View File

@ -2322,6 +2322,22 @@ static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
return ret;
}
/**
* i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
* @vqs: virtchnl_queue_select structure containing bitmaps to validate
*
* Returns true if validation was successful, else false.
*/
static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
{
if ((!vqs->rx_queues && !vqs->tx_queues) ||
vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
return false;
return true;
}
/**
* i40e_vc_enable_queues_msg
* @vf: pointer to the VF info
@ -2347,7 +2363,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@ -2409,9 +2425,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
vqs->rx_queues > I40E_MAX_VF_QUEUES ||
vqs->tx_queues > I40E_MAX_VF_QUEUES) {
if (i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}

View File

@ -415,4 +415,6 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */

View File

@ -743,9 +743,8 @@ iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
*
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr)
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr)
{
struct iavf_mac_filter *f;
@ -2065,9 +2064,9 @@ static void iavf_reset_task(struct work_struct *work)
struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
struct iavf_mac_filter *f;
u32 reg_val;
int i = 0, err;
bool running;
@ -2181,6 +2180,16 @@ continue_reset:
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
* been changed by the PF via administratively set MAC.
* Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
*/
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
list_del(&f->list);
kfree(f);
}
}
/* re-add all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;

View File

@ -1359,6 +1359,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
}
break;

View File

@ -530,7 +530,7 @@ static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
dev_spec->module_plugged = true;
if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e100_base_fx) {
} else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
dev_spec->sgmii_active = true;
hw->phy.media_type = e1000_media_type_internal_serdes;
} else if (eth_flags->e1000_base_t) {
@ -657,14 +657,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
/* do not change link mode for 100BaseFX */
if (dev_spec->eth_flags.e100_base_fx)
break;
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
if (hw->phy.media_type == e1000_media_type_copper)
if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;

View File

@ -181,7 +181,7 @@ static int igb_get_link_ksettings(struct net_device *netdev,
advertising &= ~ADVERTISED_1000baseKX_Full;
}
}
if (eth_flags->e100_base_fx) {
if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
supported |= SUPPORTED_100baseT_Full;
advertising |= ADVERTISED_100baseT_Full;
}

View File

@ -5239,7 +5239,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
u64 action;
u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@ -5248,17 +5248,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
action = filter->action;
if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
action =
(action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
queue = IXGBE_FDIR_DROP_QUEUE;
} else {
u32 ring = ethtool_get_flow_spec_ring(filter->action);
u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
if (!vf && (ring >= adapter->num_rx_queues)) {
e_err(drv, "FDIR restore failed without VF, ring: %u\n",
ring);
continue;
} else if (vf &&
((vf > adapter->num_vfs) ||
ring >= adapter->num_rx_queues_per_pool)) {
e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
vf, ring);
continue;
}
/* Map the ring onto the absolute queue index */
if (!vf)
queue = adapter->rx_ring[ring]->reg_idx;
else
queue = ((vf - 1) *
adapter->num_rx_queues_per_pool) + ring;
}
ixgbe_fdir_write_perfect_filter_82599(hw,
&filter->filter,
filter->sw_idx,
(action == IXGBE_FDIR_DROP_QUEUE) ?
IXGBE_FDIR_DROP_QUEUE :
adapter->rx_ring[action]->reg_idx);
&filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);

View File

@ -2081,11 +2081,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
if ((netdev_uc_count(netdev)) > 10) {
pr_err("Too many unicast filters - No Space\n");
return -ENOSPC;
}
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;

View File

@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}

View File

@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
cond_resched();
}
fw_dump->clr = 1;

View File

@ -1062,6 +1062,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */

View File

@ -29,7 +29,7 @@
#define NETNEXT_VERSION "10"
/* Information for net */
#define NET_VERSION "10"
#define NET_VERSION "11"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -63,6 +63,7 @@
#define PLA_LED_FEATURE 0xdd92
#define PLA_PHYAR 0xde00
#define PLA_BOOT_CTRL 0xe004
#define PLA_LWAKE_CTRL_REG 0xe007
#define PLA_GPHY_INTR_IMR 0xe022
#define PLA_EEE_CR 0xe040
#define PLA_EEEP_CR 0xe080
@ -90,6 +91,7 @@
#define PLA_TALLYCNT 0xe890
#define PLA_SFF_STS_7 0xe8de
#define PLA_PHYSTATUS 0xe908
#define PLA_CONFIG6 0xe90a /* CONFIG6 */
#define PLA_BP_BA 0xfc26
#define PLA_BP_0 0xfc28
#define PLA_BP_1 0xfc2a
@ -102,6 +104,7 @@
#define PLA_BP_EN 0xfc38
#define USB_USB2PHY 0xb41e
#define USB_SSPHYLINK1 0xb426
#define USB_SSPHYLINK2 0xb428
#define USB_U2P3_CTRL 0xb460
#define USB_CSR_DUMMY1 0xb464
@ -286,6 +289,9 @@
#define LINK_ON_WAKE_EN 0x0010
#define LINK_OFF_WAKE_EN 0x0008
/* PLA_CONFIG6 */
#define LANWAKE_CLR_EN BIT(0)
/* PLA_CONFIG5 */
#define BWF_EN 0x0040
#define MWF_EN 0x0020
@ -298,6 +304,7 @@
/* PLA_PHY_PWR */
#define TX_10M_IDLE_EN 0x0080
#define PFM_PWM_SWITCH 0x0040
#define TEST_IO_OFF BIT(4)
/* PLA_MAC_PWR_CTRL */
#define D3_CLK_GATED_EN 0x00004000
@ -310,6 +317,7 @@
#define MAC_CLK_SPDWN_EN BIT(15)
/* PLA_MAC_PWR_CTRL3 */
#define PLA_MCU_SPDWN_EN BIT(14)
#define PKT_AVAIL_SPDWN_EN 0x0100
#define SUSPEND_SPDWN_EN 0x0004
#define U1U2_SPDWN_EN 0x0002
@ -340,6 +348,9 @@
/* PLA_BOOT_CTRL */
#define AUTOLOAD_DONE 0x0002
/* PLA_LWAKE_CTRL_REG */
#define LANWAKE_PIN BIT(7)
/* PLA_SUSPEND_FLAG */
#define LINK_CHG_EVENT BIT(0)
@ -353,6 +364,9 @@
#define USB2PHY_SUSPEND 0x0001
#define USB2PHY_L1 0x0002
/* USB_SSPHYLINK1 */
#define DELAY_PHY_PWR_CHG BIT(1)
/* USB_SSPHYLINK2 */
#define pwd_dn_scale_mask 0x3ffe
#define pwd_dn_scale(x) ((x) << 1)
@ -3175,7 +3189,6 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
r8153b_ups_en(tp, false);
r8153_queue_wake(tp, false);
rtl_runtime_suspend_enable(tp, false);
r8153_u2p3en(tp, true);
r8153b_u1u2en(tp, true);
}
}
@ -3703,7 +3716,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
r8153_u2p3en(tp, true);
set_bit(PHY_RESET, &tp->flags);
}
@ -4004,6 +4016,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@ -4011,6 +4025,19 @@ static void rtl8153_up(struct r8152 *tp)
r8153_u2p3en(tp, false);
r8153_aldps_en(tp, false);
r8153_first_init(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
ocp_data |= LANWAKE_CLR_EN;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
ocp_data &= ~LANWAKE_PIN;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
ocp_data &= ~DELAY_PHY_PWR_CHG;
ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
r8153_aldps_en(tp, true);
switch (tp->version) {
@ -4029,11 +4056,17 @@ static void rtl8153_up(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
ocp_data &= ~LANWAKE_CLR_EN;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
r8153_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153_power_cut_en(tp, false);
@ -4044,6 +4077,8 @@ static void rtl8153_down(struct r8152 *tp)
static void rtl8153b_up(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@ -4054,18 +4089,27 @@ static void rtl8153b_up(struct r8152 *tp)
r8153_first_init(tp);
ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
ocp_data &= ~PLA_MCU_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
r8153_aldps_en(tp, true);
r8153_u2p3en(tp, true);
r8153b_u1u2en(tp, true);
}
static void rtl8153b_down(struct r8152 *tp)
{
u32 ocp_data;
if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
ocp_data |= PLA_MCU_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
r8153b_u1u2en(tp, false);
r8153_u2p3en(tp, false);
r8153b_power_cut_en(tp, false);
@ -4454,6 +4498,14 @@ static void r8153_init(struct r8152 *tp)
r8153_mac_clk_spd(tp, false);
usb_enable_lpm(tp->udev);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
ocp_data |= LANWAKE_CLR_EN;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
ocp_data &= ~LANWAKE_PIN;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@ -4526,6 +4578,19 @@ static void r8153b_init(struct r8152 *tp)
ocp_data |= MAC_CLK_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
ocp_data &= ~PLA_MCU_SPDWN_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
if (tp->version == RTL_VER_09) {
/* Disable Test IO for 32QFN */
if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TEST_IO_OFF;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
}
}
set_bit(GREEN_ETHERNET, &tp->flags);
/* rx aggregation */
@ -5690,6 +5755,11 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
if (!rtl_can_wakeup(tp))
__rtl_set_wol(tp, 0);
else
tp->saved_wolopts = __rtl_get_wol(tp);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
@ -5703,10 +5773,6 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
if (!rtl_can_wakeup(tp))
__rtl_set_wol(tp, 0);
tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else

View File

@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_device_cmd *dev_cmd;
struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
memset(dev_cmd, 0, sizeof(*dev_cmd));
dev_cmd->hdr.cmd = REPLY_TX;
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;

View File

@ -2490,12 +2490,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret = 0;
/* if the FW crashed or not debug monitor cfg was given, there is
* no point in changing the recording state
*/
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
(!fwrt->trans->dbg.dest_tlv &&
fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
if (fw_has_capa(&fwrt->fw->ucode_capa,

View File

@ -225,6 +225,34 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
/**
* enum iwl_reg_capa_flags - global flags applied for the whole regulatory
* domain.
* @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
* 2.4Ghz band is allowed.
* @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
* 5Ghz band is allowed.
* @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
* for this regulatory domain (valid only in 5Ghz).
* @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
* for this regulatory domain (valid only in 5Ghz).
* @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
* @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
* for this regulatory domain (valid only in 5Ghz).
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
*/
enum iwl_reg_capa_flags {
REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
REG_CAPA_160MHZ_ALLOWED = BIT(2),
REG_CAPA_80MHZ_ALLOWED = BIT(3),
REG_CAPA_MCS_8_ALLOWED = BIT(4),
REG_CAPA_MCS_9_ALLOWED = BIT(5),
REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
REG_CAPA_DC_HIGH_ENABLED = BIT(9),
};
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@ -1031,6 +1059,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
u16 cap_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@ -1069,13 +1098,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
(flags & NL80211_RRF_NO_IR))
flags |= NL80211_RRF_GO_CONCURRENT;
/*
* cap_flags is per regulatory domain so apply it for every channel
*/
if (ch_idx >= NUM_2GHZ_CHANNELS) {
if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
flags |= NL80211_RRF_NO_HT40;
if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
flags |= NL80211_RRF_NO_80MHZ;
if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
flags |= NL80211_RRF_NO_160MHZ;
}
return flags;
}
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info)
u16 geo_info, u16 cap)
{
int ch_idx;
u16 ch_flags;
@ -1133,7 +1176,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, cfg);
ch_flags, cap,
cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -29,7 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -103,7 +103,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
u16 geo_info);
u16 geo_info, u16 cap);
/**
* struct iwl_nvm_section - describes an NVM section in memory.

View File

@ -66,7 +66,9 @@
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops)
const struct iwl_trans_ops *ops,
unsigned int cmd_pool_size,
unsigned int cmd_pool_align)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
sizeof(struct iwl_device_cmd),
sizeof(void *),
SLAB_HWCACHE_ALIGN,
NULL);
cmd_pool_size, cmd_pool_align,
SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;

View File

@ -193,6 +193,18 @@ struct iwl_device_cmd {
};
} __packed;
/**
* struct iwl_device_tx_cmd - buffer for TX command
* @hdr: the header
* @payload: the payload placeholder
*
* The actual structure is sized dynamically according to need.
*/
struct iwl_device_tx_cmd {
struct iwl_cmd_header hdr;
u8 payload[];
} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
/*
@ -544,7 +556,7 @@ struct iwl_trans_ops {
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue);
struct iwl_device_tx_cmd *dev_cmd, int queue);
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
@ -921,22 +933,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
return trans->ops->dump_data(trans, dump_mask);
}
static inline struct iwl_device_cmd *
static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
struct iwl_device_cmd *dev_cmd)
struct iwl_device_tx_cmd *dev_cmd)
{
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
struct iwl_device_tx_cmd *dev_cmd, int queue)
{
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@ -1239,7 +1251,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops);
const struct iwl_trans_ops *ops,
unsigned int cmd_pool_size,
unsigned int cmd_pool_align);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************

View File

@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info));
__le16_to_cpu(resp->geo_info),
__le16_to_cpu(resp->cap));
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);

View File

@ -487,13 +487,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_cmd *
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
struct ieee80211_sta *sta, u8 sta_id)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_cmd *dev_cmd;
struct iwl_device_tx_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@ -501,11 +501,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(!dev_cmd))
return NULL;
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
@ -594,7 +589,7 @@ out:
}
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
struct iwl_device_cmd *cmd)
struct iwl_device_tx_cmd *cmd)
{
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
@ -713,7 +708,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd;
struct iwl_device_tx_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
@ -1075,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
struct iwl_device_cmd *dev_cmd;
struct iwl_device_tx_cmd *dev_cmd;
__le16 fc;
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;

View File

@ -305,7 +305,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@ -690,7 +690,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@ -1111,7 +1111,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);

View File

@ -79,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@ -3462,19 +3463,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
int ret, addr_size;
int ret, addr_size, txcmd_size, txcmd_align;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
if (!cfg_trans->gen2) {
ops = &trans_ops_pcie;
txcmd_size = sizeof(struct iwl_tx_cmd);
txcmd_align = sizeof(void *);
} else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
txcmd_align = 64;
} else {
txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
txcmd_align = 128;
}
txcmd_size += sizeof(struct iwl_cmd_header);
txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
if (cfg_trans->gen2)
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, &trans_ops_pcie_gen2);
else
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, &trans_ops_pcie);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);

View File

@ -243,7 +243,8 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
u8 hdr_len, struct iwl_device_cmd *dev_cmd)
u8 hdr_len,
struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -371,7 +372,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd,
struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@ -403,6 +404,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
/*
* No need for _with_wa(), we ensure (via alignment) that the data
* here can never cross or end at a page boundary.
*/
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@ -456,7 +461,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd,
struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@ -496,6 +501,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
/*
* No need for _with_wa(), we ensure (via alignment) that the data
* here can never cross or end at a page boundary.
*/
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@ -540,7 +549,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd,
struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@ -580,7 +589,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@ -605,7 +614,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);

View File

@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
(void *)txq->entries[txq->write_ptr].cmd->payload;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
struct iwl_tx_cmd *tx_cmd =
(void *)txq->entries[read_ptr].cmd->payload;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@ -1196,7 +1196,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
struct iwl_device_cmd *dev_cmd_ptr;
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@ -2099,7 +2099,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
struct iwl_device_tx_cmd *dev_cmd,
u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@ -2281,7 +2282,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
struct iwl_device_tx_cmd *dev_cmd,
u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@ -2291,7 +2293,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@ -2348,7 +2350,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);

View File

@ -805,7 +805,7 @@ static int rsi_probe(struct usb_interface *pfunction,
adapter->device_model = RSI_DEV_9116;
} else {
rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
__func__, id ? id->idProduct : 0x0);
__func__, id->idProduct);
goto err1;
}

View File

@ -127,7 +127,7 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
temp_limits[i]);
temp_limits[0] = TEMP_LIMIT0_DEFAULT;

View File

@ -44,6 +44,8 @@ static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);

View File

@ -793,8 +793,6 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
/* (re-)init queue's state machine */
ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */

View File

@ -261,7 +261,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
void ap_queue_reinit_state(struct ap_queue *aq);
void ap_queue_init_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);

View File

@ -638,7 +638,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->list);
@ -771,10 +771,11 @@ void ap_queue_remove(struct ap_queue *aq)
spin_unlock_bh(&aq->lock);
}
void ap_queue_reinit_state(struct ap_queue *aq)
void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
aq->state = AP_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_state);

View File

@ -175,6 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2A_CLEANUP_TIME,
aq->private = zq;

View File

@ -220,6 +220,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
ap_rapq(aq->qid);
rc = zcrypt_cex2c_rng_supported(aq);
if (rc < 0) {
zcrypt_queue_free(zq);
@ -231,6 +232,7 @@ static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
else
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX2C_CLEANUP_TIME;
aq->private = zq;

View File

@ -381,6 +381,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
aq->request_timeout = CEX4_CLEANUP_TIME,
aq->private = zq;

View File

@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,

View File

@ -419,6 +419,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
else
m3_ipc_state = m3_ipc;
do_exit(0);
}
@ -505,8 +507,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
m3_ipc_state = m3_ipc;
return 0;
err_put_rproc:

View File

@ -3,6 +3,7 @@
config OPTEE
tristate "OP-TEE"
depends on HAVE_ARM_SMCCC
depends on MMU
help
This implements the OP-TEE Trusted Execution Environment (TEE)
driver.

View File

@ -2102,7 +2102,15 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
*/
thresh = SZ_4M;
if (!mixed && total_free_meta - thresh < block_rsv->size)
/*
* We only want to claim there's no available space if we can no longer
* allocate chunks for our metadata profile and our global reserve will
* not fit in the free metadata space. If we aren't ->full then we
* still can allocate chunks and thus are fine using the currently
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
total_free_meta - thresh < block_rsv->size)
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;

View File

@ -312,7 +312,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (server->tcpStatus != CifsNeedReconnect)
break;
if (--retries)
if (retries && --retries)
continue;
/*

View File

@ -264,7 +264,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
struct super_block *sb = sdp->sd_vfs;
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
@ -471,6 +471,20 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
put_page(page); /* Once more for find_or_create_page */
}
static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
{
struct bio *new;
new = bio_alloc(GFP_NOIO, nr_iovecs);
bio_copy_dev(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint;
bio_chain(new, prev);
submit_bio(prev);
return new;
}
/**
* gfs2_find_jhead - find the head of a log
* @jd: The journal descriptor
@ -487,15 +501,15 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping;
unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
unsigned int bsize = sdp->sd_sb.sb_bsize;
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int readhead_blocks = BIO_MAX_PAGES << shift;
unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
struct page *page = NULL;
bool done = false;
bool bio_chained = false, done = false;
errseq_t since;
memset(head, 0, sizeof(*head));
@ -504,9 +518,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
since = filemap_sample_wb_err(mapping);
list_for_each_entry(je, &jd->extent_list, list) {
for (; block < je->lblock + je->blocks; block++) {
u64 dblock;
u64 dblock = je->dblock;
for (; block < je->lblock + je->blocks; block++, dblock++) {
if (!page) {
page = find_or_create_page(mapping,
block >> shift, GFP_NOFS);
@ -515,35 +529,41 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
done = true;
goto out;
}
off = 0;
}
if (!bio || (bio_chained && !off)) {
/* start new bio */
} else {
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize)
goto block_added;
if (off) {
unsigned int blocks =
(PAGE_SIZE - off) >> bsize_shift;
bio = gfs2_chain_bio(bio, blocks);
bio_chained = true;
goto add_block_to_new_bio;
}
}
if (bio) {
unsigned int off;
off = (block << bsize_shift) & ~PAGE_MASK;
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize) { /* block added */
if (off + bsize == PAGE_SIZE) {
page = NULL;
goto page_added;
}
continue;
}
blocks_submitted = block + 1;
submit_bio(bio);
bio = NULL;
}
dblock = je->dblock + (block - je->lblock);
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
sz = bio_add_page(bio, page, bsize, 0);
gfs2_assert_warn(sdp, sz == bsize);
if (bsize == PAGE_SIZE)
bio_chained = false;
add_block_to_new_bio:
sz = bio_add_page(bio, page, bsize, off);
BUG_ON(sz != bsize);
block_added:
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
page_added:
if (blocks_submitted < blocks_read + readhead_blocks) {
if (blocks_submitted < blocks_read + readahead_blocks) {
/* Keep at least one bio in flight */
continue;
}

View File

@ -3249,8 +3249,8 @@ static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op)
{
struct dentry *dir = nd->path.dentry;
kuid_t dir_uid = dir->d_inode->i_uid;
umode_t dir_mode = dir->d_inode->i_mode;
kuid_t dir_uid = nd->inode->i_uid;
umode_t dir_mode = nd->inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;

View File

@ -629,6 +629,7 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
destroy_workqueue(REISERFS_SB(s)->commit_wq);
kfree(REISERFS_SB(s)->s_jdev);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
}
@ -2240,6 +2241,7 @@ error_unlocked:
kfree(qf_names[j]);
}
#endif
kfree(sbi->s_jdev);
kfree(sbi);
s->s_fs_info = NULL;

View File

@ -1915,11 +1915,11 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Only applies when forking a process, not a thread.
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_THREAD) {
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_sig = 0;
t->rseq_event_mask = 0;

Some files were not shown because too many files have changed in this diff Show More