This is the 5.4.29 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6EWJoACgkQONu9yGCS aT6ZpBAApiZRWKEHEnsYCbcfR3Kgvz9hQoIVjQWhXyCGVwJISbviBgw+GZB34j4j UrI/Y7av/b7YRjvTm9+HkfmqEZSmlOnc8Z5jr4/u6jTSlAyMEluFT/OPm4eUuwJt ea0D4piISja770zfO7SZ4qsuO2QxzIhyJcmlOgnxnk+keyYBE/ZVtrJalSZs7hc9 eUKqLpN1YG/RztcjFK/v7mIXLBQ317s6ZbfuMHvUSmkvPrweQjj3Tm36K9mQqVwB 22YrirWFLBBuotTSZJmU4YCFgrCHLo4ByI6Of2PDY3eBWui88h47frKIcxLExA9c 2F9ZM1TzV5MbtkMLmhakG4ugd2bVKbxGlKU9JosgdseaTHu2v53itDqmKuDGzh12 yktnA/3gmhusfQu6uUACXgXeyTSxMK6WsKLI7qYKrF3ZL8IUfhMVofhjS/yDSznd nt+8K4aBD0ZTn+CQFNNidX+hAeCZCo9gVK7XmUniuPNq4hpH4vRt+a54zyi4vXGX 2U3fsz4bvES4EYQeJQiRvhcAQlO+X2icD7VU5b1N3S8KbntzA3QOAiedvfJXSYE9 lEPmZKbJreY1c+jrCJZluAltmY9uNIu/NdwdPp5S1puFuuZAljPeuuSqVvE/xyPA 9pIwqQ7X/WIJEvI/z5UOVGPq6WQ81TsA12LRwSi6eZONqbJ0740= =Vc7I -----END PGP SIGNATURE----- Merge tag 'v5.4.29' into 5.4-1.0.0-imx This is the 5.4.29 stable release
This commit is contained in:
commit
3acdf2bdfa
|
@ -110,6 +110,13 @@ PROPERTIES
|
|||
Usage: required
|
||||
Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
|
||||
|
||||
- fsl,erratum-a050385
|
||||
Usage: optional
|
||||
Value type: boolean
|
||||
Definition: A boolean property. Indicates the presence of the
|
||||
erratum A050385 which indicates that DMA transactions that are
|
||||
split can result in a FMan lock.
|
||||
|
||||
=============================================================================
|
||||
FMan MURAM Node
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 28
|
||||
SUBLEVEL = 29
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
|
|
@ -148,6 +148,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x0 0x0 0x0 0xc0000000>;
|
||||
dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
|
||||
ti,hwmods = "l3_main_1", "l3_main_2";
|
||||
reg = <0x0 0x44000000 0x0 0x1000000>,
|
||||
<0x0 0x45000000 0x0 0x1000>;
|
||||
|
|
|
@ -143,6 +143,7 @@
|
|||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0 0 0 0xc0000000>;
|
||||
dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
|
||||
ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
|
||||
reg = <0 0x44000000 0 0x2000>,
|
||||
<0 0x44800000 0 0x3000>,
|
||||
|
|
|
@ -482,7 +482,8 @@
|
|||
};
|
||||
|
||||
&usbphy {
|
||||
usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
|
||||
usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */
|
||||
usb0_vbus_power-supply = <&usb_power_supply>;
|
||||
usb0_vbus-supply = <®_drivevbus>;
|
||||
usb1_vbus-supply = <®_vmain>;
|
||||
usb2_vbus-supply = <®_vmain>;
|
||||
|
|
|
@ -115,6 +115,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
|
|||
if (!sev_active())
|
||||
return;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
return;
|
||||
|
||||
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
|
||||
desc->flags |= IORES_MAP_ENCRYPTED;
|
||||
}
|
||||
|
|
|
@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
|||
}
|
||||
/* and dreg_lo,sreg_lo */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
if (is_jmp64) {
|
||||
/* and dreg_hi,sreg_hi */
|
||||
EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
|
||||
/* or dreg_lo,dreg_hi */
|
||||
EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
|
||||
}
|
||||
goto emit_cond_jmp;
|
||||
}
|
||||
case BPF_JMP | BPF_JSET | BPF_K:
|
||||
|
|
|
@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_s2idle_sync(void)
|
||||
{
|
||||
/*
|
||||
* The EC driver uses the system workqueue and an additional special
|
||||
* one, so those need to be flushed too.
|
||||
*/
|
||||
acpi_ec_flush_work();
|
||||
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||
}
|
||||
|
||||
static bool acpi_s2idle_wake(void)
|
||||
{
|
||||
if (!acpi_sci_irq_valid())
|
||||
|
@ -1021,13 +1031,8 @@ static bool acpi_s2idle_wake(void)
|
|||
* should be missed by canceling the wakeup here.
|
||||
*/
|
||||
pm_system_cancel_wakeup();
|
||||
/*
|
||||
* The EC driver uses the system workqueue and an additional
|
||||
* special one, so those need to be flushed too.
|
||||
*/
|
||||
acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
|
||||
acpi_ec_flush_work();
|
||||
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||
|
||||
acpi_s2idle_sync();
|
||||
|
||||
/*
|
||||
* The SCI is in the "suspended" state now and it cannot produce
|
||||
|
@ -1055,6 +1060,13 @@ static void acpi_s2idle_restore_early(void)
|
|||
|
||||
static void acpi_s2idle_restore(void)
|
||||
{
|
||||
/*
|
||||
* Drain pending events before restoring the working-state configuration
|
||||
* of GPEs.
|
||||
*/
|
||||
acpi_os_wait_events_complete(); /* synchronize GPE processing */
|
||||
acpi_s2idle_sync();
|
||||
|
||||
s2idle_wakeup = false;
|
||||
|
||||
acpi_enable_all_runtime_gpes();
|
||||
|
|
|
@ -393,6 +393,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
|
||||
|
|
|
@ -114,30 +114,13 @@ static ssize_t phys_index_show(struct device *dev,
|
|||
}
|
||||
|
||||
/*
|
||||
* Show whether the memory block is likely to be offlineable (or is already
|
||||
* offline). Once offline, the memory block could be removed. The return
|
||||
* value does, however, not indicate that there is a way to remove the
|
||||
* memory block.
|
||||
* Legacy interface that we cannot remove. Always indicate "removable"
|
||||
* with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
|
||||
*/
|
||||
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
unsigned long pfn;
|
||||
int ret = 1, i;
|
||||
|
||||
if (mem->state != MEM_ONLINE)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < sections_per_block; i++) {
|
||||
if (!present_section_nr(mem->start_section_nr + i))
|
||||
continue;
|
||||
pfn = section_nr_to_pfn(mem->start_section_nr + i);
|
||||
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
}
|
||||
|
||||
out:
|
||||
return sprintf(buf, "%d\n", ret);
|
||||
return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -233,7 +233,8 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
|
|||
|
||||
static u64 read_hv_sched_clock_tsc(void)
|
||||
{
|
||||
return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
|
||||
return (read_hv_clock_tsc(NULL) - hv_sched_clock_offset) *
|
||||
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_tsc = {
|
||||
|
@ -258,7 +259,8 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
|
|||
|
||||
static u64 read_hv_sched_clock_msr(void)
|
||||
{
|
||||
return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
|
||||
return (read_hv_clock_msr(NULL) - hv_sched_clock_offset) *
|
||||
(NSEC_PER_SEC / HV_CLOCK_HZ);
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_msr = {
|
||||
|
|
|
@ -21,18 +21,21 @@
|
|||
#include "gpiolib.h"
|
||||
#include "gpiolib-acpi.h"
|
||||
|
||||
#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
|
||||
#define QUIRK_NO_WAKEUP 0x02l
|
||||
|
||||
static int run_edge_events_on_boot = -1;
|
||||
module_param(run_edge_events_on_boot, int, 0444);
|
||||
MODULE_PARM_DESC(run_edge_events_on_boot,
|
||||
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
|
||||
|
||||
static int honor_wakeup = -1;
|
||||
module_param(honor_wakeup, int, 0444);
|
||||
MODULE_PARM_DESC(honor_wakeup,
|
||||
"Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
|
||||
static char *ignore_wake;
|
||||
module_param(ignore_wake, charp, 0444);
|
||||
MODULE_PARM_DESC(ignore_wake,
|
||||
"controller@pin combos on which to ignore the ACPI wake flag "
|
||||
"ignore_wake=controller@pin[,controller@pin[,...]]");
|
||||
|
||||
struct acpi_gpiolib_dmi_quirk {
|
||||
bool no_edge_events_on_boot;
|
||||
char *ignore_wake;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct acpi_gpio_event - ACPI GPIO event handler data
|
||||
|
@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
|
|||
acpi_gpiochip_request_irq(acpi_gpio, event);
|
||||
}
|
||||
|
||||
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
|
||||
{
|
||||
const char *controller, *pin_str;
|
||||
int len, pin;
|
||||
char *endp;
|
||||
|
||||
controller = ignore_wake;
|
||||
while (controller) {
|
||||
pin_str = strchr(controller, '@');
|
||||
if (!pin_str)
|
||||
goto err;
|
||||
|
||||
len = pin_str - controller;
|
||||
if (len == strlen(controller_in) &&
|
||||
strncmp(controller, controller_in, len) == 0) {
|
||||
pin = simple_strtoul(pin_str + 1, &endp, 10);
|
||||
if (*endp != 0 && *endp != ',')
|
||||
goto err;
|
||||
|
||||
if (pin == pin_in)
|
||||
return true;
|
||||
}
|
||||
|
||||
controller = strchr(controller, ',');
|
||||
if (controller)
|
||||
controller++;
|
||||
}
|
||||
|
||||
return false;
|
||||
err:
|
||||
pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
|
||||
ignore_wake);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool acpi_gpio_irq_is_wake(struct device *parent,
|
||||
struct acpi_resource_gpio *agpio)
|
||||
{
|
||||
int pin = agpio->pin_table[0];
|
||||
|
||||
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
|
||||
return false;
|
||||
|
||||
if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
|
||||
dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
||||
void *context)
|
||||
{
|
||||
|
@ -282,7 +336,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
|
|||
event->handle = evt_handle;
|
||||
event->handler = handler;
|
||||
event->irq = irq;
|
||||
event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
|
||||
event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
|
||||
event->pin = pin;
|
||||
event->desc = desc;
|
||||
|
||||
|
@ -1321,7 +1375,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.no_edge_events_on_boot = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
|
@ -1334,16 +1390,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.no_edge_events_on_boot = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Various HP X2 10 Cherry Trail models use an external
|
||||
* embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler. The embedded controller generates various
|
||||
* spurious wakeup events when suspended. So disable wakeup
|
||||
* for its handler (it uses the only ACPI GPIO event handler).
|
||||
* This breaks wakeup when opening the lid, the user needs
|
||||
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FF:01 pin 0, causing spurious wakeups.
|
||||
* When suspending by closing the LID, the power to the USB
|
||||
* keyboard is turned off, causing INT0002 ACPI events to
|
||||
* trigger once the XHCI controller notices the keyboard is
|
||||
* gone. So INT0002 events cause spurious wakeups too. Ignoring
|
||||
* EC wakes breaks wakeup when opening the lid, the user needs
|
||||
* to press the power-button to wakeup the system. The
|
||||
* alternative is suspend simply not working, which is worse.
|
||||
*/
|
||||
|
@ -1351,33 +1411,46 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_NO_WAKEUP,
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FF:01@0,INT0002:00@2",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FC:02 pin 28, causing spurious wakeups.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "815D"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FC:02@28",
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
static int acpi_gpio_setup_params(void)
|
||||
{
|
||||
const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
|
||||
const struct dmi_system_id *id;
|
||||
long quirks = 0;
|
||||
|
||||
id = dmi_first_match(gpiolib_acpi_quirks);
|
||||
if (id)
|
||||
quirks = (long)id->driver_data;
|
||||
quirk = id->driver_data;
|
||||
|
||||
if (run_edge_events_on_boot < 0) {
|
||||
if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
|
||||
if (quirk && quirk->no_edge_events_on_boot)
|
||||
run_edge_events_on_boot = 0;
|
||||
else
|
||||
run_edge_events_on_boot = 1;
|
||||
}
|
||||
|
||||
if (honor_wakeup < 0) {
|
||||
if (quirks & QUIRK_NO_WAKEUP)
|
||||
honor_wakeup = 0;
|
||||
else
|
||||
honor_wakeup = 1;
|
||||
}
|
||||
if (ignore_wake == NULL && quirk && quirk->ignore_wake)
|
||||
ignore_wake = quirk->ignore_wake;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2194,9 +2194,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
|
|||
{
|
||||
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
|
||||
|
||||
/*
|
||||
* Since we override .irq_disable() we need to mimic the
|
||||
* behaviour of __irq_disable() in irq/chip.c.
|
||||
* First call .irq_disable() if it exists, else mimic the
|
||||
* behaviour of mask_irq() which calls .irq_mask() if
|
||||
* it exists.
|
||||
*/
|
||||
if (chip->irq.irq_disable)
|
||||
chip->irq.irq_disable(d);
|
||||
else
|
||||
else if (chip->irq.chip->irq_mask)
|
||||
chip->irq.chip->irq_mask(d);
|
||||
gpiochip_disable_irq(chip, d->hwirq);
|
||||
}
|
||||
|
|
|
@ -84,6 +84,13 @@
|
|||
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
|
||||
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
|
||||
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
|
||||
|
||||
/* for Vega20/arcturus regiter offset change */
|
||||
#define mmROM_INDEX_VG20 0x00e4
|
||||
#define mmROM_INDEX_VG20_BASE_IDX 0
|
||||
#define mmROM_DATA_VG20 0x00e5
|
||||
#define mmROM_DATA_VG20_BASE_IDX 0
|
||||
|
||||
/*
|
||||
* Indirect registers accessor
|
||||
*/
|
||||
|
@ -304,6 +311,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 *dw_ptr;
|
||||
u32 i, length_dw;
|
||||
uint32_t rom_index_offset;
|
||||
uint32_t rom_data_offset;
|
||||
|
||||
if (bios == NULL)
|
||||
return false;
|
||||
|
@ -316,11 +325,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
|
|||
dw_ptr = (u32 *)bios;
|
||||
length_dw = ALIGN(length_bytes, 4) / 4;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
|
||||
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
|
||||
break;
|
||||
default:
|
||||
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
|
||||
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
|
||||
break;
|
||||
}
|
||||
|
||||
/* set rom index to 0 */
|
||||
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
|
||||
WREG32(rom_index_offset, 0);
|
||||
/* read out the rom data */
|
||||
for (i = 0; i < length_dw; i++)
|
||||
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
|
||||
dw_ptr[i] = RREG32(rom_data_offset);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -269,6 +269,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
|||
.use_urgent_burst_bw = 0
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
|
||||
.clock_limits = {
|
||||
{
|
||||
.state = 0,
|
||||
.dcfclk_mhz = 560.0,
|
||||
.fabricclk_mhz = 560.0,
|
||||
.dispclk_mhz = 513.0,
|
||||
.dppclk_mhz = 513.0,
|
||||
.phyclk_mhz = 540.0,
|
||||
.socclk_mhz = 560.0,
|
||||
.dscclk_mhz = 171.0,
|
||||
.dram_speed_mts = 8960.0,
|
||||
},
|
||||
{
|
||||
.state = 1,
|
||||
.dcfclk_mhz = 694.0,
|
||||
.fabricclk_mhz = 694.0,
|
||||
.dispclk_mhz = 642.0,
|
||||
.dppclk_mhz = 642.0,
|
||||
.phyclk_mhz = 600.0,
|
||||
.socclk_mhz = 694.0,
|
||||
.dscclk_mhz = 214.0,
|
||||
.dram_speed_mts = 11104.0,
|
||||
},
|
||||
{
|
||||
.state = 2,
|
||||
.dcfclk_mhz = 875.0,
|
||||
.fabricclk_mhz = 875.0,
|
||||
.dispclk_mhz = 734.0,
|
||||
.dppclk_mhz = 734.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 875.0,
|
||||
.dscclk_mhz = 245.0,
|
||||
.dram_speed_mts = 14000.0,
|
||||
},
|
||||
{
|
||||
.state = 3,
|
||||
.dcfclk_mhz = 1000.0,
|
||||
.fabricclk_mhz = 1000.0,
|
||||
.dispclk_mhz = 1100.0,
|
||||
.dppclk_mhz = 1100.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1000.0,
|
||||
.dscclk_mhz = 367.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
{
|
||||
.state = 4,
|
||||
.dcfclk_mhz = 1200.0,
|
||||
.fabricclk_mhz = 1200.0,
|
||||
.dispclk_mhz = 1284.0,
|
||||
.dppclk_mhz = 1284.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 428.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
/*Extra state, no dispclk ramping*/
|
||||
{
|
||||
.state = 5,
|
||||
.dcfclk_mhz = 1200.0,
|
||||
.fabricclk_mhz = 1200.0,
|
||||
.dispclk_mhz = 1284.0,
|
||||
.dppclk_mhz = 1284.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 428.0,
|
||||
.dram_speed_mts = 16000.0,
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
.urgent_latency_vm_data_only_us = 4.0,
|
||||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
|
||||
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
|
||||
.max_avg_sdp_bw_use_normal_percent = 40.0,
|
||||
.max_avg_dram_bw_use_normal_percent = 40.0,
|
||||
.writeback_latency_us = 12.0,
|
||||
.ideal_dram_bw_after_urgent_percent = 40.0,
|
||||
.max_request_size_bytes = 256,
|
||||
.dram_channel_width_bytes = 2,
|
||||
.fabric_datapath_to_dcn_data_return_bytes = 64,
|
||||
.dcn_downspread_percent = 0.5,
|
||||
.downspread_percent = 0.38,
|
||||
.dram_page_open_time_ns = 50.0,
|
||||
.dram_rw_turnaround_time_ns = 17.5,
|
||||
.dram_return_buffer_per_channel_bytes = 8192,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 131,
|
||||
.urgent_out_of_order_return_per_channel_bytes = 256,
|
||||
.channel_interleave_bytes = 256,
|
||||
.num_banks = 8,
|
||||
.num_chans = 8,
|
||||
.vmm_page_size_bytes = 4096,
|
||||
.dram_clock_change_latency_us = 404.0,
|
||||
.dummy_pstate_latency_us = 5.0,
|
||||
.writeback_dram_clock_change_latency_us = 23.0,
|
||||
.return_bus_width_bytes = 64,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3850,
|
||||
.xfc_bus_transport_time_us = 20,
|
||||
.xfc_xbuf_latency_tolerance_us = 4,
|
||||
.use_urgent_burst_bw = 0
|
||||
};
|
||||
|
||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
|
||||
|
||||
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
|
||||
|
@ -3135,6 +3246,9 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
|||
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
|
||||
uint32_t hw_internal_rev)
|
||||
{
|
||||
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
|
||||
return &dcn2_0_nv14_soc;
|
||||
|
||||
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
|
||||
return &dcn2_0_nv12_soc;
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
|
|||
struct decon_context {
|
||||
struct device *dev;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct exynos_drm_crtc *crtc;
|
||||
struct exynos_drm_plane planes[WINDOWS_NR];
|
||||
struct exynos_drm_plane_config configs[WINDOWS_NR];
|
||||
|
@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
|
|||
|
||||
decon_clear_channels(ctx->crtc);
|
||||
|
||||
return exynos_drm_register_dma(drm_dev, dev);
|
||||
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static void decon_unbind(struct device *dev, struct device *master, void *data)
|
||||
|
@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
|
|||
decon_disable(ctx->crtc);
|
||||
|
||||
/* detach this sub driver from iommu mapping if supported. */
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops decon_component_ops = {
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
struct decon_context {
|
||||
struct device *dev;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct exynos_drm_crtc *crtc;
|
||||
struct exynos_drm_plane planes[WINDOWS_NR];
|
||||
struct exynos_drm_plane_config configs[WINDOWS_NR];
|
||||
|
@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
|
|||
|
||||
decon_clear_channels(ctx->crtc);
|
||||
|
||||
return exynos_drm_register_dma(drm_dev, ctx->dev);
|
||||
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static void decon_ctx_remove(struct decon_context *ctx)
|
||||
{
|
||||
/* detach this sub driver from iommu mapping if supported. */
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static u32 decon_calc_clkdiv(struct decon_context *ctx,
|
||||
|
|
|
@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
|
|||
* mapping.
|
||||
*/
|
||||
static int drm_iommu_attach_device(struct drm_device *drm_dev,
|
||||
struct device *subdrv_dev)
|
||||
struct device *subdrv_dev, void **dma_priv)
|
||||
{
|
||||
struct exynos_drm_private *priv = drm_dev->dev_private;
|
||||
int ret;
|
||||
|
@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
|
|||
return ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
|
||||
if (to_dma_iommu_mapping(subdrv_dev))
|
||||
/*
|
||||
* Keep the original DMA mapping of the sub-device and
|
||||
* restore it on Exynos DRM detach, otherwise the DMA
|
||||
* framework considers it as IOMMU-less during the next
|
||||
* probe (in case of deferred probe or modular build)
|
||||
*/
|
||||
*dma_priv = to_dma_iommu_mapping(subdrv_dev);
|
||||
if (*dma_priv)
|
||||
arm_iommu_detach_device(subdrv_dev);
|
||||
|
||||
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
|
||||
|
@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
|
|||
* mapping
|
||||
*/
|
||||
static void drm_iommu_detach_device(struct drm_device *drm_dev,
|
||||
struct device *subdrv_dev)
|
||||
struct device *subdrv_dev, void **dma_priv)
|
||||
{
|
||||
struct exynos_drm_private *priv = drm_dev->dev_private;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
|
||||
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
|
||||
arm_iommu_detach_device(subdrv_dev);
|
||||
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
|
||||
arm_iommu_attach_device(subdrv_dev, *dma_priv);
|
||||
} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
|
||||
iommu_detach_device(priv->mapping, subdrv_dev);
|
||||
|
||||
clear_dma_max_seg_size(subdrv_dev);
|
||||
}
|
||||
|
||||
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
|
||||
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
|
||||
void **dma_priv)
|
||||
{
|
||||
struct exynos_drm_private *priv = drm->dev_private;
|
||||
|
||||
|
@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
|
|||
priv->mapping = mapping;
|
||||
}
|
||||
|
||||
return drm_iommu_attach_device(drm, dev);
|
||||
return drm_iommu_attach_device(drm, dev, dma_priv);
|
||||
}
|
||||
|
||||
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
|
||||
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
|
||||
void **dma_priv)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
|
||||
drm_iommu_detach_device(drm, dev);
|
||||
drm_iommu_detach_device(drm, dev, dma_priv);
|
||||
}
|
||||
|
||||
void exynos_drm_cleanup_dma(struct drm_device *drm)
|
||||
|
|
|
@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
|
|||
return priv->mapping ? true : false;
|
||||
}
|
||||
|
||||
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
|
||||
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
|
||||
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
|
||||
void **dma_priv);
|
||||
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
|
||||
void **dma_priv);
|
||||
void exynos_drm_cleanup_dma(struct drm_device *drm);
|
||||
|
||||
#ifdef CONFIG_DRM_EXYNOS_DPI
|
||||
|
|
|
@ -97,6 +97,7 @@ struct fimc_scaler {
|
|||
struct fimc_context {
|
||||
struct exynos_drm_ipp ipp;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct device *dev;
|
||||
struct exynos_drm_ipp_task *task;
|
||||
struct exynos_drm_ipp_formats *formats;
|
||||
|
@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
|
|||
|
||||
ctx->drm_dev = drm_dev;
|
||||
ipp->drm_dev = drm_dev;
|
||||
exynos_drm_register_dma(drm_dev, dev);
|
||||
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
|
||||
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
|
||||
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
|
||||
|
@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
|
|||
struct exynos_drm_ipp *ipp = &ctx->ipp;
|
||||
|
||||
exynos_drm_ipp_unregister(dev, ipp);
|
||||
exynos_drm_unregister_dma(drm_dev, dev);
|
||||
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops fimc_component_ops = {
|
||||
|
|
|
@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
|
|||
struct fimd_context {
|
||||
struct device *dev;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct exynos_drm_crtc *crtc;
|
||||
struct exynos_drm_plane planes[WINDOWS_NR];
|
||||
struct exynos_drm_plane_config configs[WINDOWS_NR];
|
||||
|
@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
|
|||
if (is_drm_iommu_supported(drm_dev))
|
||||
fimd_clear_channels(ctx->crtc);
|
||||
|
||||
return exynos_drm_register_dma(drm_dev, dev);
|
||||
return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static void fimd_unbind(struct device *dev, struct device *master,
|
||||
|
@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
|
|||
|
||||
fimd_disable(ctx->crtc);
|
||||
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
|
||||
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
|
||||
|
||||
if (ctx->encoder)
|
||||
exynos_dpi_remove(ctx->encoder);
|
||||
|
|
|
@ -232,6 +232,7 @@ struct g2d_runqueue_node {
|
|||
|
||||
struct g2d_data {
|
||||
struct device *dev;
|
||||
void *dma_priv;
|
||||
struct clk *gate_clk;
|
||||
void __iomem *regs;
|
||||
int irq;
|
||||
|
@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = exynos_drm_register_dma(drm_dev, dev);
|
||||
ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to enable iommu.\n");
|
||||
g2d_fini_cmdlist(g2d);
|
||||
|
@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
|
|||
priv->g2d_dev = NULL;
|
||||
|
||||
cancel_work_sync(&g2d->runqueue_work);
|
||||
exynos_drm_unregister_dma(g2d->drm_dev, dev);
|
||||
exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops g2d_component_ops = {
|
||||
|
|
|
@ -97,6 +97,7 @@ struct gsc_scaler {
|
|||
struct gsc_context {
|
||||
struct exynos_drm_ipp ipp;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct device *dev;
|
||||
struct exynos_drm_ipp_task *task;
|
||||
struct exynos_drm_ipp_formats *formats;
|
||||
|
@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
|
|||
|
||||
ctx->drm_dev = drm_dev;
|
||||
ctx->drm_dev = drm_dev;
|
||||
exynos_drm_register_dma(drm_dev, dev);
|
||||
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
|
||||
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
|
||||
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
|
||||
|
@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
|
|||
struct exynos_drm_ipp *ipp = &ctx->ipp;
|
||||
|
||||
exynos_drm_ipp_unregister(dev, ipp);
|
||||
exynos_drm_unregister_dma(drm_dev, dev);
|
||||
exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops gsc_component_ops = {
|
||||
|
|
|
@ -56,6 +56,7 @@ struct rot_variant {
|
|||
struct rot_context {
|
||||
struct exynos_drm_ipp ipp;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct device *dev;
|
||||
void __iomem *regs;
|
||||
struct clk *clock;
|
||||
|
@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
|
|||
|
||||
rot->drm_dev = drm_dev;
|
||||
ipp->drm_dev = drm_dev;
|
||||
exynos_drm_register_dma(drm_dev, dev);
|
||||
exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
|
||||
|
||||
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
|
||||
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
|
||||
|
@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
|
|||
struct exynos_drm_ipp *ipp = &rot->ipp;
|
||||
|
||||
exynos_drm_ipp_unregister(dev, ipp);
|
||||
exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
|
||||
exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops rotator_component_ops = {
|
||||
|
|
|
@ -39,6 +39,7 @@ struct scaler_data {
|
|||
struct scaler_context {
|
||||
struct exynos_drm_ipp ipp;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct device *dev;
|
||||
void __iomem *regs;
|
||||
struct clk *clock[SCALER_MAX_CLK];
|
||||
|
@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
|
|||
|
||||
scaler->drm_dev = drm_dev;
|
||||
ipp->drm_dev = drm_dev;
|
||||
exynos_drm_register_dma(drm_dev, dev);
|
||||
exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
|
||||
|
||||
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
|
||||
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
|
||||
|
@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
|
|||
struct exynos_drm_ipp *ipp = &scaler->ipp;
|
||||
|
||||
exynos_drm_ipp_unregister(dev, ipp);
|
||||
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
|
||||
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
|
||||
&scaler->dma_priv);
|
||||
}
|
||||
|
||||
static const struct component_ops scaler_component_ops = {
|
||||
|
|
|
@ -94,6 +94,7 @@ struct mixer_context {
|
|||
struct platform_device *pdev;
|
||||
struct device *dev;
|
||||
struct drm_device *drm_dev;
|
||||
void *dma_priv;
|
||||
struct exynos_drm_crtc *crtc;
|
||||
struct exynos_drm_plane planes[MIXER_WIN_NR];
|
||||
unsigned long flags;
|
||||
|
@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
|
|||
}
|
||||
}
|
||||
|
||||
return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
|
||||
return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
|
||||
&mixer_ctx->dma_priv);
|
||||
}
|
||||
|
||||
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
|
||||
{
|
||||
exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
|
||||
exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
|
||||
&mixer_ctx->dma_priv);
|
||||
}
|
||||
|
||||
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
|
||||
|
|
|
@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
|
|||
i2c_del_adapter(&priv->adap);
|
||||
pm_runtime_disable(priv->dev);
|
||||
pm_runtime_set_suspended(priv->dev);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
|
|||
|
||||
static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
|
||||
{
|
||||
unsigned long target = jiffies + msecs_to_jiffies(1000);
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
val = readl(i2cd->regs + I2C_MST_CNTL);
|
||||
if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
|
||||
break;
|
||||
if ((val & I2C_MST_CNTL_STATUS) !=
|
||||
I2C_MST_CNTL_STATUS_BUS_BUSY)
|
||||
break;
|
||||
usleep_range(500, 600);
|
||||
} while (time_is_after_jiffies(target));
|
||||
ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val,
|
||||
!(val & I2C_MST_CNTL_CYCLE_TRIGGER) ||
|
||||
(val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY,
|
||||
500, 1000 * USEC_PER_MSEC);
|
||||
|
||||
if (time_is_before_jiffies(target)) {
|
||||
if (ret) {
|
||||
dev_err(i2cd->dev, "i2c timeout error %x\n", val);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
|
|
@ -899,7 +899,9 @@ static int add_one_compat_dev(struct ib_device *device,
|
|||
cdev->dev.parent = device->dev.parent;
|
||||
rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
|
||||
cdev->dev.release = compatdev_release;
|
||||
dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
|
||||
ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
|
||||
if (ret)
|
||||
goto add_err;
|
||||
|
||||
ret = device_add(&cdev->dev);
|
||||
if (ret)
|
||||
|
|
|
@ -863,6 +863,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
|
||||
nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
|
||||
IB_DEVICE_NAME_MAX);
|
||||
if (strlen(name) == 0) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
err = ib_device_rename(device, name);
|
||||
goto done;
|
||||
}
|
||||
|
@ -1468,7 +1472,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
|
||||
nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
|
||||
sizeof(ibdev_name));
|
||||
if (strchr(ibdev_name, '%'))
|
||||
if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
|
||||
|
|
|
@ -349,16 +349,11 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
|
|||
else if (qp_pps)
|
||||
new_pps->main.pkey_index = qp_pps->main.pkey_index;
|
||||
|
||||
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
|
||||
if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
|
||||
(qp_attr_mask & IB_QP_PORT)) ||
|
||||
(qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
|
||||
new_pps->main.state = IB_PORT_PKEY_VALID;
|
||||
|
||||
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
|
||||
new_pps->main.port_num = qp_pps->main.port_num;
|
||||
new_pps->main.pkey_index = qp_pps->main.pkey_index;
|
||||
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
|
||||
new_pps->main.state = IB_PORT_PKEY_VALID;
|
||||
}
|
||||
|
||||
if (qp_attr_mask & IB_QP_ALT_PATH) {
|
||||
new_pps->alt.port_num = qp_attr->alt_port_num;
|
||||
new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
|
||||
|
|
|
@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = {
|
|||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct ib_umad_port *get_port(struct ib_device *ibdev,
|
||||
struct ib_umad_device *umad_dev,
|
||||
unsigned int port)
|
||||
{
|
||||
if (!umad_dev)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (!rdma_is_port_valid(ibdev, port))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!rdma_cap_ib_mad(ibdev, port))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
return &umad_dev->ports[port - rdma_start_port(ibdev)];
|
||||
}
|
||||
|
||||
static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
|
||||
struct ib_client_nl_info *res)
|
||||
{
|
||||
struct ib_umad_device *umad_dev = client_data;
|
||||
struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
|
||||
|
||||
if (!rdma_is_port_valid(ibdev, res->port))
|
||||
return -EINVAL;
|
||||
if (IS_ERR(port))
|
||||
return PTR_ERR(port);
|
||||
|
||||
res->abi = IB_USER_MAD_ABI_VERSION;
|
||||
res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev;
|
||||
|
||||
res->cdev = &port->dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad");
|
|||
static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
|
||||
struct ib_client_nl_info *res)
|
||||
{
|
||||
struct ib_umad_device *umad_dev =
|
||||
ib_get_client_data(ibdev, &umad_client);
|
||||
struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
|
||||
|
||||
if (!rdma_is_port_valid(ibdev, res->port))
|
||||
return -EINVAL;
|
||||
if (IS_ERR(port))
|
||||
return PTR_ERR(port);
|
||||
|
||||
res->abi = IB_USER_MAD_ABI_VERSION;
|
||||
res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev;
|
||||
|
||||
res->cdev = &port->sm_dev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
|
|||
dump_cqe(dev, cqe);
|
||||
}
|
||||
|
||||
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
|
||||
u16 tail, u16 head)
|
||||
{
|
||||
u16 idx;
|
||||
|
||||
do {
|
||||
idx = tail & (qp->sq.wqe_cnt - 1);
|
||||
if (idx == head)
|
||||
break;
|
||||
|
||||
tail = qp->sq.w_list[idx].next;
|
||||
} while (1);
|
||||
tail = qp->sq.w_list[idx].next;
|
||||
qp->sq.last_poll = tail;
|
||||
}
|
||||
|
||||
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
|
||||
{
|
||||
mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
|
||||
|
@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
|
|||
}
|
||||
|
||||
static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
|
||||
int *npolled, int is_send)
|
||||
int *npolled, bool is_send)
|
||||
{
|
||||
struct mlx5_ib_wq *wq;
|
||||
unsigned int cur;
|
||||
|
@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
|
|||
return;
|
||||
|
||||
for (i = 0; i < cur && np < num_entries; i++) {
|
||||
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
|
||||
unsigned int idx;
|
||||
|
||||
idx = (is_send) ? wq->last_poll : wq->tail;
|
||||
idx &= (wq->wqe_cnt - 1);
|
||||
wc->wr_id = wq->wrid[idx];
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
|
||||
wq->tail++;
|
||||
if (is_send)
|
||||
wq->last_poll = wq->w_list[idx].next;
|
||||
np++;
|
||||
wc->qp = &qp->ibqp;
|
||||
wc++;
|
||||
|
@ -476,6 +498,7 @@ repoll:
|
|||
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
|
||||
idx = wqe_ctr & (wq->wqe_cnt - 1);
|
||||
handle_good_req(wc, cqe64, wq, idx);
|
||||
handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
|
||||
wc->wr_id = wq->wrid[idx];
|
||||
wq->tail = wq->wqe_head[idx] + 1;
|
||||
wc->status = IB_WC_SUCCESS;
|
||||
|
|
|
@ -5638,9 +5638,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
|
|||
const struct mlx5_ib_counters *cnts =
|
||||
get_counters(dev, counter->port - 1);
|
||||
|
||||
/* Q counters are in the beginning of all counters */
|
||||
return rdma_alloc_hw_stats_struct(cnts->names,
|
||||
cnts->num_q_counters,
|
||||
cnts->num_q_counters +
|
||||
cnts->num_cong_counters +
|
||||
cnts->num_ext_ppcnt_counters,
|
||||
RDMA_HW_STATS_DEFAULT_LIFESPAN);
|
||||
}
|
||||
|
||||
|
|
|
@ -283,6 +283,7 @@ struct mlx5_ib_wq {
|
|||
unsigned head;
|
||||
unsigned tail;
|
||||
u16 cur_post;
|
||||
u16 last_poll;
|
||||
void *cur_edge;
|
||||
};
|
||||
|
||||
|
|
|
@ -3725,6 +3725,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
|
|||
qp->sq.cur_post = 0;
|
||||
if (qp->sq.wqe_cnt)
|
||||
qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
|
||||
qp->sq.last_poll = 0;
|
||||
qp->db.db[MLX5_RCV_DBR] = 0;
|
||||
qp->db.db[MLX5_SND_DBR] = 0;
|
||||
}
|
||||
|
@ -6131,6 +6132,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
|||
if (udata->outlen && udata->outlen < min_resp_len)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!capable(CAP_SYS_RAWIO) &&
|
||||
init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
dev = to_mdev(pd->device);
|
||||
switch (init_attr->wq_type) {
|
||||
case IB_WQT_RQ:
|
||||
|
|
|
@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
|||
if (cq->ip)
|
||||
kref_put(&cq->ip->ref, rvt_release_mmap_info);
|
||||
else
|
||||
vfree(cq->queue);
|
||||
vfree(cq->kqueue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t)
|
|||
input_value_sync
|
||||
};
|
||||
|
||||
input_set_timestamp(dev, ktime_get());
|
||||
input_pass_values(dev, vals, ARRAY_SIZE(vals));
|
||||
|
||||
if (dev->rep[REP_PERIOD])
|
||||
|
|
|
@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
|
|||
"SYN3052", /* HP EliteBook 840 G4 */
|
||||
"SYN3221", /* HP 15-ay000 */
|
||||
"SYN323d", /* HP Spectre X360 13-w013dx */
|
||||
"SYN3257", /* HP Envy 13-ad105ng */
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool raydium_i2c_boot_trigger(struct i2c_client *client)
|
||||
static int raydium_i2c_boot_trigger(struct i2c_client *client)
|
||||
{
|
||||
static const u8 cmd[7][6] = {
|
||||
{ 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
|
||||
|
@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
|
|||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool raydium_i2c_fw_trigger(struct i2c_client *client)
|
||||
static int raydium_i2c_fw_trigger(struct i2c_client *client)
|
||||
{
|
||||
static const u8 cmd[5][11] = {
|
||||
{ 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
|
||||
|
@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
|
|||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int raydium_i2c_check_path(struct i2c_client *client)
|
||||
|
|
|
@ -371,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
|
|||
{
|
||||
struct dmar_drhd_unit *dmaru;
|
||||
|
||||
list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
|
||||
list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
|
||||
dmar_rcu_check())
|
||||
if (dmaru->segment == drhd->segment &&
|
||||
dmaru->reg_base_addr == drhd->address)
|
||||
return dmaru;
|
||||
|
|
|
@ -32,38 +32,42 @@ struct iommu_regset {
|
|||
|
||||
#define IOMMU_REGSET_ENTRY(_reg_) \
|
||||
{ DMAR_##_reg_##_REG, __stringify(_reg_) }
|
||||
static const struct iommu_regset iommu_regs[] = {
|
||||
|
||||
static const struct iommu_regset iommu_regs_32[] = {
|
||||
IOMMU_REGSET_ENTRY(VER),
|
||||
IOMMU_REGSET_ENTRY(CAP),
|
||||
IOMMU_REGSET_ENTRY(ECAP),
|
||||
IOMMU_REGSET_ENTRY(GCMD),
|
||||
IOMMU_REGSET_ENTRY(GSTS),
|
||||
IOMMU_REGSET_ENTRY(RTADDR),
|
||||
IOMMU_REGSET_ENTRY(CCMD),
|
||||
IOMMU_REGSET_ENTRY(FSTS),
|
||||
IOMMU_REGSET_ENTRY(FECTL),
|
||||
IOMMU_REGSET_ENTRY(FEDATA),
|
||||
IOMMU_REGSET_ENTRY(FEADDR),
|
||||
IOMMU_REGSET_ENTRY(FEUADDR),
|
||||
IOMMU_REGSET_ENTRY(AFLOG),
|
||||
IOMMU_REGSET_ENTRY(PMEN),
|
||||
IOMMU_REGSET_ENTRY(PLMBASE),
|
||||
IOMMU_REGSET_ENTRY(PLMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(PHMBASE),
|
||||
IOMMU_REGSET_ENTRY(PHMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(IQH),
|
||||
IOMMU_REGSET_ENTRY(IQT),
|
||||
IOMMU_REGSET_ENTRY(IQA),
|
||||
IOMMU_REGSET_ENTRY(ICS),
|
||||
IOMMU_REGSET_ENTRY(IRTA),
|
||||
IOMMU_REGSET_ENTRY(PQH),
|
||||
IOMMU_REGSET_ENTRY(PQT),
|
||||
IOMMU_REGSET_ENTRY(PQA),
|
||||
IOMMU_REGSET_ENTRY(PRS),
|
||||
IOMMU_REGSET_ENTRY(PECTL),
|
||||
IOMMU_REGSET_ENTRY(PEDATA),
|
||||
IOMMU_REGSET_ENTRY(PEADDR),
|
||||
IOMMU_REGSET_ENTRY(PEUADDR),
|
||||
};
|
||||
|
||||
static const struct iommu_regset iommu_regs_64[] = {
|
||||
IOMMU_REGSET_ENTRY(CAP),
|
||||
IOMMU_REGSET_ENTRY(ECAP),
|
||||
IOMMU_REGSET_ENTRY(RTADDR),
|
||||
IOMMU_REGSET_ENTRY(CCMD),
|
||||
IOMMU_REGSET_ENTRY(AFLOG),
|
||||
IOMMU_REGSET_ENTRY(PHMBASE),
|
||||
IOMMU_REGSET_ENTRY(PHMLIMIT),
|
||||
IOMMU_REGSET_ENTRY(IQH),
|
||||
IOMMU_REGSET_ENTRY(IQT),
|
||||
IOMMU_REGSET_ENTRY(IQA),
|
||||
IOMMU_REGSET_ENTRY(IRTA),
|
||||
IOMMU_REGSET_ENTRY(PQH),
|
||||
IOMMU_REGSET_ENTRY(PQT),
|
||||
IOMMU_REGSET_ENTRY(PQA),
|
||||
IOMMU_REGSET_ENTRY(MTRRCAP),
|
||||
IOMMU_REGSET_ENTRY(MTRRDEF),
|
||||
IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
|
||||
|
@ -126,10 +130,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
|
|||
* by adding the offset to the pointer (virtual address).
|
||||
*/
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
|
||||
value = dmar_readq(iommu->reg + iommu_regs[i].offset);
|
||||
for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
|
||||
value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
|
||||
seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
|
||||
iommu_regs[i].regs, iommu_regs[i].offset,
|
||||
iommu_regs_32[i].regs, iommu_regs_32[i].offset,
|
||||
value);
|
||||
}
|
||||
for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
|
||||
value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
|
||||
seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
|
||||
iommu_regs_64[i].regs, iommu_regs_64[i].offset,
|
||||
value);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
|
@ -271,9 +281,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
|
|||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
u32 sts;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
|
||||
if (!(sts & DMA_GSTS_TES)) {
|
||||
seq_printf(m, "DMA Remapping is not enabled on %s\n",
|
||||
iommu->name);
|
||||
continue;
|
||||
}
|
||||
root_tbl_walk(m, iommu);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
@ -343,6 +360,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
|
|||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
u64 irta;
|
||||
u32 sts;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
|
@ -352,7 +370,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
|
||||
iommu->name);
|
||||
|
||||
if (iommu->ir_table) {
|
||||
sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
|
||||
if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
|
||||
irta = virt_to_phys(iommu->ir_table->base);
|
||||
seq_printf(m, " IR table address:%llx\n", irta);
|
||||
ir_tbl_remap_entry_show(m, iommu);
|
||||
|
|
|
@ -4961,6 +4961,9 @@ int __init intel_iommu_init(void)
|
|||
|
||||
down_write(&dmar_global_lock);
|
||||
|
||||
if (!no_iommu)
|
||||
intel_iommu_debugfs_init();
|
||||
|
||||
if (no_iommu || dmar_disabled) {
|
||||
/*
|
||||
* We exit the function here to ensure IOMMU's remapping and
|
||||
|
@ -5056,7 +5059,6 @@ int __init intel_iommu_init(void)
|
|||
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
|
||||
|
||||
intel_iommu_enabled = 1;
|
||||
intel_iommu_debugfs_init();
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -511,6 +511,9 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
switch (fc_usb->udev->speed) {
|
||||
case USB_SPEED_LOW:
|
||||
err("cannot handle USB speed because it is too slow.");
|
||||
|
@ -544,9 +547,6 @@ static int flexcop_usb_probe(struct usb_interface *intf,
|
|||
struct flexcop_device *fc = NULL;
|
||||
int ret;
|
||||
|
||||
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
|
||||
err("out of memory\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
|
|||
|
||||
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
|
||||
|
||||
if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
|
||||
if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1)
|
||||
return -ENODEV;
|
||||
|
||||
purb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
|
@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
|
|||
* Some devices like the Hauppauge NovaTD model 52009 use an interrupt
|
||||
* endpoint, while others use a bulk one.
|
||||
*/
|
||||
e = &intf->altsetting[0].endpoint[rc_ep].desc;
|
||||
e = &intf->cur_altsetting->endpoint[rc_ep].desc;
|
||||
if (usb_endpoint_dir_in(e)) {
|
||||
if (usb_endpoint_xfer_bulk(e)) {
|
||||
pipe = usb_rcvbulkpipe(d->udev, rc_ep);
|
||||
|
|
|
@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd)
|
|||
return;
|
||||
}
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1) {
|
||||
sd->gspca_dev.usb_err = -ENODEV;
|
||||
return;
|
||||
}
|
||||
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
|
||||
|
||||
|
@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd)
|
|||
return;
|
||||
}
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1) {
|
||||
sd->gspca_dev.usb_err = -ENODEV;
|
||||
return;
|
||||
}
|
||||
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
|
||||
|
||||
|
|
|
@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size);
|
||||
if (err < 0)
|
||||
|
@ -306,11 +309,21 @@ out:
|
|||
|
||||
static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
|
||||
{
|
||||
struct usb_interface_cache *intfc;
|
||||
struct usb_host_interface *alt;
|
||||
struct sd *sd = (struct sd *) gspca_dev;
|
||||
|
||||
intfc = gspca_dev->dev->actconfig->intf_cache[0];
|
||||
|
||||
if (intfc->num_altsetting < 2)
|
||||
return -ENODEV;
|
||||
|
||||
alt = &intfc->altsetting[1];
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
|
||||
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
|
||||
alt->endpoint[0].desc.wMaxPacketSize =
|
||||
cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
|
||||
|
||||
|
@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
|
|||
struct usb_host_interface *alt;
|
||||
struct sd *sd = (struct sd *) gspca_dev;
|
||||
|
||||
/*
|
||||
* Existence of altsetting and endpoint was verified in
|
||||
* stv06xx_isoc_init()
|
||||
*/
|
||||
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
|
||||
|
|
|
@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd)
|
|||
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
|
||||
if (!alt)
|
||||
return -ENODEV;
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
|
||||
/* If we don't have enough bandwidth use a lower framerate */
|
||||
|
|
|
@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
}
|
||||
|
||||
|
@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
|
|||
|
||||
static int sd_isoc_init(struct gspca_dev *gspca_dev)
|
||||
{
|
||||
struct usb_interface_cache *intfc;
|
||||
struct usb_host_interface *alt;
|
||||
int max_packet_size;
|
||||
|
||||
|
@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
|
|||
break;
|
||||
}
|
||||
|
||||
intfc = gspca_dev->dev->actconfig->intf_cache[0];
|
||||
|
||||
if (intfc->num_altsetting < 2)
|
||||
return -ENODEV;
|
||||
|
||||
alt = &intfc->altsetting[1];
|
||||
|
||||
if (alt->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
|
||||
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
|
||||
alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
|
||||
|
||||
return 0;
|
||||
|
@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Existence of altsetting and endpoint was verified in sd_isoc_init()
|
||||
*/
|
||||
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
|
||||
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
|
||||
if (packet_size <= min_packet_size)
|
||||
|
|
|
@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
|
|||
|
||||
ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
|
||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
value, index, NULL, 0, 0);
|
||||
value, index, NULL, 0, USB_CTRL_GET_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
ret = usb_control_msg(usbtv->udev,
|
||||
usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
|
||||
0, USBTV_BASE + 0x0244, (void *)data, 3,
|
||||
USB_CTRL_GET_TIMEOUT);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
|
@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
|
|||
ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
|
||||
USBTV_CONTROL_REG,
|
||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, index, (void *)data, size, 0);
|
||||
0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT);
|
||||
|
||||
error:
|
||||
if (ret < 0)
|
||||
|
|
|
@ -179,6 +179,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd)
|
|||
|
||||
if (sd->internal_ops && sd->internal_ops->release)
|
||||
sd->internal_ops->release(sd);
|
||||
sd->devnode = NULL;
|
||||
module_put(owner);
|
||||
}
|
||||
|
||||
|
|
|
@ -1734,8 +1734,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
|
|||
* the erase operation does not exceed the max_busy_timeout, we should
|
||||
* use R1B response. Or we need to prevent the host from doing hw busy
|
||||
* detection, which is done by converting to a R1 response instead.
|
||||
* Note, some hosts requires R1B, which also means they are on their own
|
||||
* when it comes to deal with the busy timeout.
|
||||
*/
|
||||
if (card->host->max_busy_timeout &&
|
||||
if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
|
||||
card->host->max_busy_timeout &&
|
||||
busy_timeout > card->host->max_busy_timeout) {
|
||||
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
|
||||
} else {
|
||||
|
|
|
@ -1911,9 +1911,12 @@ static int mmc_sleep(struct mmc_host *host)
|
|||
* If the max_busy_timeout of the host is specified, validate it against
|
||||
* the sleep cmd timeout. A failure means we need to prevent the host
|
||||
* from doing hw busy detection, which is done by converting to a R1
|
||||
* response instead of a R1B.
|
||||
* response instead of a R1B. Note, some hosts requires R1B, which also
|
||||
* means they are on their own when it comes to deal with the busy
|
||||
* timeout.
|
||||
*/
|
||||
if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
|
||||
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
|
||||
(timeout_ms > host->max_busy_timeout)) {
|
||||
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
} else {
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
|
|
|
@ -538,10 +538,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|||
* If the cmd timeout and the max_busy_timeout of the host are both
|
||||
* specified, let's validate them. A failure means we need to prevent
|
||||
* the host from doing hw busy detection, which is done by converting
|
||||
* to a R1 response instead of a R1B.
|
||||
* to a R1 response instead of a R1B. Note, some hosts requires R1B,
|
||||
* which also means they are on their own when it comes to deal with the
|
||||
* busy timeout.
|
||||
*/
|
||||
if (timeout_ms && host->max_busy_timeout &&
|
||||
(timeout_ms > host->max_busy_timeout))
|
||||
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
|
||||
host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
|
||||
use_r1b_resp = false;
|
||||
|
||||
cmd.opcode = MMC_SWITCH;
|
||||
|
|
|
@ -1134,6 +1134,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
|
|||
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
|
||||
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
|
||||
|
||||
/* R1B responses is required to properly manage HW busy detection. */
|
||||
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
|
||||
|
||||
ret = sdhci_setup_host(host);
|
||||
if (ret)
|
||||
goto err_put_sync;
|
||||
|
|
|
@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
|
|||
if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
|
||||
host->mmc->caps |= MMC_CAP_1_8V_DDR;
|
||||
|
||||
/* R1B responses is required to properly manage HW busy detection. */
|
||||
host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
|
||||
|
||||
tegra_sdhci_parse_dt(host);
|
||||
|
||||
tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
|
||||
|
|
|
@ -106,6 +106,7 @@ config NET_FC
|
|||
config IFB
|
||||
tristate "Intermediate Functional Block support"
|
||||
depends on NET_CLS_ACT
|
||||
select NET_REDIRECT
|
||||
---help---
|
||||
This is an intermediate driver that allows sharing of
|
||||
resources.
|
||||
|
|
|
@ -625,7 +625,10 @@ err_free_chan:
|
|||
tty->disc_data = NULL;
|
||||
clear_bit(SLF_INUSE, &sl->flags);
|
||||
slc_free_netdev(sl->dev);
|
||||
/* do not call free_netdev before rtnl_unlock */
|
||||
rtnl_unlock();
|
||||
free_netdev(sl->dev);
|
||||
return err;
|
||||
|
||||
err_exit:
|
||||
rtnl_unlock();
|
||||
|
|
|
@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
|
|||
static void
|
||||
mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
|
||||
{
|
||||
u32 mask = PMCR_TX_EN | PMCR_RX_EN;
|
||||
u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
|
||||
|
||||
if (enable)
|
||||
mt7530_set(priv, MT7530_PMCR_P(port), mask);
|
||||
|
@ -1439,7 +1439,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
|
|||
mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
|
||||
PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
|
||||
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
|
||||
PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
|
||||
PMCR_BACKPR_EN | PMCR_FORCE_MODE;
|
||||
|
||||
/* Are we connected to external phy */
|
||||
if (port == 5 && dsa_is_user_port(ds, 5))
|
||||
|
|
|
@ -3652,13 +3652,15 @@ err_disable_device:
|
|||
|
||||
/*****************************************************************************/
|
||||
|
||||
/* ena_remove - Device Removal Routine
|
||||
/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
|
||||
* @pdev: PCI device information struct
|
||||
* @shutdown: Is it a shutdown operation? If false, means it is a removal
|
||||
*
|
||||
* ena_remove is called by the PCI subsystem to alert the driver
|
||||
* that it should release a PCI device.
|
||||
* __ena_shutoff is a helper routine that does the real work on shutdown and
|
||||
* removal paths; the difference between those paths is with regards to whether
|
||||
* dettach or unregister the netdevice.
|
||||
*/
|
||||
static void ena_remove(struct pci_dev *pdev)
|
||||
static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
|
||||
{
|
||||
struct ena_adapter *adapter = pci_get_drvdata(pdev);
|
||||
struct ena_com_dev *ena_dev;
|
||||
|
@ -3677,13 +3679,17 @@ static void ena_remove(struct pci_dev *pdev)
|
|||
|
||||
cancel_work_sync(&adapter->reset_task);
|
||||
|
||||
rtnl_lock();
|
||||
rtnl_lock(); /* lock released inside the below if-else block */
|
||||
ena_destroy_device(adapter, true);
|
||||
rtnl_unlock();
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
||||
free_netdev(netdev);
|
||||
if (shutdown) {
|
||||
netif_device_detach(netdev);
|
||||
dev_close(netdev);
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
rtnl_unlock();
|
||||
unregister_netdev(netdev);
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
ena_com_rss_destroy(ena_dev);
|
||||
|
||||
|
@ -3698,6 +3704,30 @@ static void ena_remove(struct pci_dev *pdev)
|
|||
vfree(ena_dev);
|
||||
}
|
||||
|
||||
/* ena_remove - Device Removal Routine
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* ena_remove is called by the PCI subsystem to alert the driver
|
||||
* that it should release a PCI device.
|
||||
*/
|
||||
|
||||
static void ena_remove(struct pci_dev *pdev)
|
||||
{
|
||||
__ena_shutoff(pdev, false);
|
||||
}
|
||||
|
||||
/* ena_shutdown - Device Shutdown Routine
|
||||
* @pdev: PCI device information struct
|
||||
*
|
||||
* ena_shutdown is called by the PCI subsystem to alert the driver that
|
||||
* a shutdown/reboot (or kexec) is happening and device must be disabled.
|
||||
*/
|
||||
|
||||
static void ena_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
__ena_shutoff(pdev, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* ena_suspend - PM suspend callback
|
||||
* @pdev: PCI device information struct
|
||||
|
@ -3747,6 +3777,7 @@ static struct pci_driver ena_pci_driver = {
|
|||
.id_table = ena_pci_tbl,
|
||||
.probe = ena_probe,
|
||||
.remove = ena_remove,
|
||||
.shutdown = ena_shutdown,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ena_suspend,
|
||||
.resume = ena_resume,
|
||||
|
|
|
@ -6863,12 +6863,12 @@ skip_rdma:
|
|||
}
|
||||
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
|
||||
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
|
||||
rc);
|
||||
else
|
||||
ctx->flags |= BNXT_CTX_FLAG_INITED;
|
||||
|
||||
return rc;
|
||||
}
|
||||
ctx->flags |= BNXT_CTX_FLAG_INITED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7387,14 +7387,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
|||
pri2cos = &resp2->pri0_cos_queue_id;
|
||||
for (i = 0; i < 8; i++) {
|
||||
u8 queue_id = pri2cos[i];
|
||||
u8 queue_idx;
|
||||
|
||||
/* Per port queue IDs start from 0, 10, 20, etc */
|
||||
queue_idx = queue_id % 10;
|
||||
if (queue_idx > BNXT_MAX_QUEUE) {
|
||||
bp->pri2cos_valid = false;
|
||||
goto qstats_done;
|
||||
}
|
||||
for (j = 0; j < bp->max_q; j++) {
|
||||
if (bp->q_ids[j] == queue_id)
|
||||
bp->pri2cos[i] = j;
|
||||
bp->pri2cos_idx[i] = queue_idx;
|
||||
}
|
||||
}
|
||||
bp->pri2cos_valid = 1;
|
||||
}
|
||||
qstats_done:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
return rc;
|
||||
}
|
||||
|
@ -11595,6 +11603,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
|
|||
bp->rx_nr_rings++;
|
||||
bp->cp_nr_rings++;
|
||||
}
|
||||
if (rc) {
|
||||
bp->tx_nr_rings = 0;
|
||||
bp->rx_nr_rings = 0;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -11887,12 +11899,12 @@ init_err_cleanup:
|
|||
init_err_pci_clean:
|
||||
bnxt_free_hwrm_short_cmd_req(bp);
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
kfree(bp->fw_health);
|
||||
bp->fw_health = NULL;
|
||||
bnxt_cleanup_pci(bp);
|
||||
bnxt_free_ctx_mem(bp);
|
||||
kfree(bp->ctx);
|
||||
bp->ctx = NULL;
|
||||
|
||||
init_err_free:
|
||||
free_netdev(dev);
|
||||
|
|
|
@ -1688,7 +1688,7 @@ struct bnxt {
|
|||
u16 fw_rx_stats_ext_size;
|
||||
u16 fw_tx_stats_ext_size;
|
||||
u16 hw_ring_stats_size;
|
||||
u8 pri2cos[8];
|
||||
u8 pri2cos_idx[8];
|
||||
u8 pri2cos_valid;
|
||||
|
||||
u16 hwrm_max_req_len;
|
||||
|
|
|
@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
|
|||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
struct ieee_ets *my_ets = bp->ieee_ets;
|
||||
int rc;
|
||||
|
||||
ets->ets_cap = bp->max_tc;
|
||||
|
||||
if (!my_ets) {
|
||||
int rc;
|
||||
|
||||
if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
|
||||
return 0;
|
||||
|
||||
my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
|
||||
if (!my_ets)
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
goto error;
|
||||
rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
|
||||
if (rc)
|
||||
return 0;
|
||||
goto error;
|
||||
|
||||
/* cache result */
|
||||
bp->ieee_ets = my_ets;
|
||||
}
|
||||
|
||||
ets->cbs = my_ets->cbs;
|
||||
|
@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
|
|||
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
|
||||
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
|
||||
return 0;
|
||||
error:
|
||||
kfree(my_ets);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
|
||||
|
|
|
@ -589,25 +589,25 @@ skip_ring_stats:
|
|||
if (bp->pri2cos_valid) {
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_rx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_bytes_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
}
|
||||
for (i = 0; i < 8; i++, j++) {
|
||||
long n = bnxt_tx_pkts_pri_arr[i].base_off +
|
||||
bp->pri2cos[i];
|
||||
bp->pri2cos_idx[i];
|
||||
|
||||
buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
|
||||
}
|
||||
|
|
|
@ -1324,8 +1324,9 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
|
|||
int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
|
||||
int maxreclaim)
|
||||
{
|
||||
unsigned int reclaimed, hw_cidx;
|
||||
struct sge_txq *q = &eq->q;
|
||||
unsigned int reclaimed;
|
||||
int hw_in_use;
|
||||
|
||||
if (!q->in_use || !__netif_tx_trylock(eq->txq))
|
||||
return 0;
|
||||
|
@ -1333,12 +1334,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
|
|||
/* Reclaim pending completed TX Descriptors. */
|
||||
reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
|
||||
|
||||
hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
|
||||
hw_in_use = q->pidx - hw_cidx;
|
||||
if (hw_in_use < 0)
|
||||
hw_in_use += q->size;
|
||||
|
||||
/* If the TX Queue is currently stopped and there's now more than half
|
||||
* the queue available, restart it. Otherwise bail out since the rest
|
||||
* of what we want do here is with the possibility of shipping any
|
||||
* currently buffered Coalesced TX Work Request.
|
||||
*/
|
||||
if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
|
||||
if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
|
||||
netif_tx_wake_queue(eq->txq);
|
||||
eq->q.restarts++;
|
||||
}
|
||||
|
@ -1469,16 +1475,7 @@ out_free: dev_kfree_skb_any(skb);
|
|||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(q);
|
||||
|
||||
/* If we're using the SGE Doorbell Queue Timer facility, we
|
||||
* don't need to ask the Firmware to send us Egress Queue CIDX
|
||||
* Updates: the Hardware will do this automatically. And
|
||||
* since we send the Ingress Queue CIDX Updates to the
|
||||
* corresponding Ethernet Response Queue, we'll get them very
|
||||
* quickly.
|
||||
*/
|
||||
if (!q->dbqt)
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
}
|
||||
|
||||
wr = (void *)&q->q.desc[q->q.pidx];
|
||||
|
@ -1792,16 +1789,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
|
|||
* has opened up.
|
||||
*/
|
||||
eth_txq_stop(txq);
|
||||
|
||||
/* If we're using the SGE Doorbell Queue Timer facility, we
|
||||
* don't need to ask the Firmware to send us Egress Queue CIDX
|
||||
* Updates: the Hardware will do this automatically. And
|
||||
* since we send the Ingress Queue CIDX Updates to the
|
||||
* corresponding Ethernet Response Queue, we'll get them very
|
||||
* quickly.
|
||||
*/
|
||||
if (!txq->dbqt)
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
|
||||
}
|
||||
|
||||
/* Start filling in our Work Request. Note that we do _not_ handle
|
||||
|
@ -2924,26 +2912,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
|
|||
}
|
||||
|
||||
txq = &s->ethtxq[pi->first_qset + rspq->idx];
|
||||
|
||||
/* We've got the Hardware Consumer Index Update in the Egress Update
|
||||
* message. If we're using the SGE Doorbell Queue Timer mechanism,
|
||||
* these Egress Update messages will be our sole CIDX Updates we get
|
||||
* since we don't want to chew up PCIe bandwidth for both Ingress
|
||||
* Messages and Status Page writes. However, The code which manages
|
||||
* reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
|
||||
* stored in the Status Page at the end of the TX Queue. It's easiest
|
||||
* to simply copy the CIDX Update value from the Egress Update message
|
||||
* to the Status Page. Also note that no Endian issues need to be
|
||||
* considered here since both are Big Endian and we're just copying
|
||||
* bytes consistently ...
|
||||
*/
|
||||
if (txq->dbqt) {
|
||||
struct cpl_sge_egr_update *egr;
|
||||
|
||||
egr = (struct cpl_sge_egr_update *)rsp;
|
||||
WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
|
||||
}
|
||||
|
||||
t4_sge_eth_txq_egress_update(adapter, txq, -1);
|
||||
}
|
||||
|
||||
|
|
|
@ -2949,9 +2949,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
|
|||
headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
|
||||
DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
|
||||
|
||||
return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
|
||||
DPAA_FD_DATA_ALIGNMENT) :
|
||||
headroom;
|
||||
return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
|
||||
}
|
||||
|
||||
static int dpaa_eth_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -8,3 +8,31 @@ config FSL_FMAN
|
|||
help
|
||||
Freescale Data-Path Acceleration Architecture Frame Manager
|
||||
(FMan) support
|
||||
|
||||
config DPAA_ERRATUM_A050385
|
||||
bool
|
||||
depends on ARM64 && FSL_DPAA
|
||||
default y
|
||||
help
|
||||
DPAA FMan erratum A050385 software workaround implementation:
|
||||
align buffers, data start, SG fragment length to avoid FMan DMA
|
||||
splits.
|
||||
FMAN DMA read or writes under heavy traffic load may cause FMAN
|
||||
internal resource leak thus stopping further packet processing.
|
||||
The FMAN internal queue can overflow when FMAN splits single
|
||||
read or write transactions into multiple smaller transactions
|
||||
such that more than 17 AXI transactions are in flight from FMAN
|
||||
to interconnect. When the FMAN internal queue overflows, it can
|
||||
stall further packet processing. The issue can occur with any
|
||||
one of the following three conditions:
|
||||
1. FMAN AXI transaction crosses 4K address boundary (Errata
|
||||
A010022)
|
||||
2. FMAN DMA address for an AXI transaction is not 16 byte
|
||||
aligned, i.e. the last 4 bits of an address are non-zero
|
||||
3. Scatter Gather (SG) frames have more than one SG buffer in
|
||||
the SG list and any one of the buffers, except the last
|
||||
buffer in the SG list has data size that is not a multiple
|
||||
of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
|
||||
With any one of the above three conditions present, there is
|
||||
likelihood of stalled FMAN packet processing, especially under
|
||||
stress with multiple ports injecting line-rate traffic.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright 2008-2015 Freescale Semiconductor Inc.
|
||||
* Copyright 2020 NXP
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -566,6 +567,10 @@ struct fman_cfg {
|
|||
u32 qmi_def_tnums_thresh;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DPAA_ERRATUM_A050385
|
||||
static bool fman_has_err_a050385;
|
||||
#endif
|
||||
|
||||
static irqreturn_t fman_exceptions(struct fman *fman,
|
||||
enum fman_exceptions exception)
|
||||
{
|
||||
|
@ -2547,6 +2552,14 @@ struct fman *fman_bind(struct device *fm_dev)
|
|||
}
|
||||
EXPORT_SYMBOL(fman_bind);
|
||||
|
||||
#ifdef CONFIG_DPAA_ERRATUM_A050385
|
||||
bool fman_has_errata_a050385(void)
|
||||
{
|
||||
return fman_has_err_a050385;
|
||||
}
|
||||
EXPORT_SYMBOL(fman_has_errata_a050385);
|
||||
#endif
|
||||
|
||||
static irqreturn_t fman_err_irq(int irq, void *handle)
|
||||
{
|
||||
struct fman *fman = (struct fman *)handle;
|
||||
|
@ -2874,6 +2887,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
|
|||
goto fman_free;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DPAA_ERRATUM_A050385
|
||||
fman_has_err_a050385 =
|
||||
of_property_read_bool(fm_node, "fsl,erratum-a050385");
|
||||
#endif
|
||||
|
||||
return fman;
|
||||
|
||||
fman_node_put:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright 2008-2015 Freescale Semiconductor Inc.
|
||||
* Copyright 2020 NXP
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
|
@ -402,6 +403,10 @@ u16 fman_get_max_frm(void);
|
|||
|
||||
int fman_get_rx_extra_headroom(void);
|
||||
|
||||
#ifdef CONFIG_DPAA_ERRATUM_A050385
|
||||
bool fman_has_errata_a050385(void);
|
||||
#endif
|
||||
|
||||
struct fman *fman_bind(struct device *dev);
|
||||
|
||||
#endif /* __FM_H */
|
||||
|
|
|
@ -1596,7 +1596,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
|
|||
netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
|
||||
|
||||
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
|
||||
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
|
||||
kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
|
|
|
@ -2804,11 +2804,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
|
|||
/* For the case where the last mvneta_poll did not process all
|
||||
* RX packets
|
||||
*/
|
||||
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
|
||||
|
||||
cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
|
||||
port->cause_rx_tx;
|
||||
|
||||
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
|
||||
if (rx_queue) {
|
||||
rx_queue = rx_queue - 1;
|
||||
if (pp->bm_priv)
|
||||
|
|
|
@ -371,6 +371,7 @@ enum {
|
|||
|
||||
struct mlx5e_sq_wqe_info {
|
||||
u8 opcode;
|
||||
u8 num_wqebbs;
|
||||
|
||||
/* Auxiliary data for different opcodes. */
|
||||
union {
|
||||
|
@ -1058,6 +1059,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
|
|||
void mlx5e_activate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
|
||||
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
|
||||
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
|
||||
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
|
||||
|
||||
|
|
|
@ -10,8 +10,7 @@
|
|||
|
||||
static inline bool cqe_syndrome_needs_recover(u8 syndrome)
|
||||
{
|
||||
return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
|
||||
return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
|
||||
syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
|||
goto out;
|
||||
|
||||
mlx5e_reset_icosq_cc_pc(icosq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
mlx5e_free_rx_in_progress_descs(rq);
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
|
||||
mlx5e_activate_icosq(icosq);
|
||||
mlx5e_activate_rq(rq);
|
||||
|
|
|
@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
|
|||
|
||||
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
|
||||
{
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
mlx5_wq_ll_reset(&rq->mpwqe.wq);
|
||||
else
|
||||
rq->mpwqe.actual_wq_head = 0;
|
||||
} else {
|
||||
mlx5_wq_cyc_reset(&rq->wqe.wq);
|
||||
}
|
||||
}
|
||||
|
||||
/* SW parser related functions */
|
||||
|
|
|
@ -824,6 +824,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
|
|||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
struct mlx5_wq_ll *wq;
|
||||
u16 head;
|
||||
int i;
|
||||
|
||||
if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
|
||||
return;
|
||||
|
||||
wq = &rq->mpwqe.wq;
|
||||
head = wq->head;
|
||||
|
||||
/* Outstanding UMR WQEs (in progress) start at wq->head */
|
||||
for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
|
||||
rq->dealloc_wqe(rq, head);
|
||||
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
|
||||
}
|
||||
|
||||
rq->mpwqe.actual_wq_head = wq->head;
|
||||
rq->mpwqe.umr_in_progress = 0;
|
||||
rq->mpwqe.umr_completed = 0;
|
||||
}
|
||||
|
||||
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
||||
{
|
||||
__be16 wqe_ix_be;
|
||||
|
@ -831,14 +854,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
|
|||
|
||||
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
|
||||
u16 head = wq->head;
|
||||
int i;
|
||||
|
||||
/* Outstanding UMR WQEs (in progress) start at wq->head */
|
||||
for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
|
||||
rq->dealloc_wqe(rq, head);
|
||||
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
|
||||
}
|
||||
mlx5e_free_rx_in_progress_descs(rq);
|
||||
|
||||
while (!mlx5_wq_ll_is_empty(wq)) {
|
||||
struct mlx5e_rx_wqe_ll *wqe;
|
||||
|
|
|
@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
|
|||
/* fill sq frag edge with nops to avoid wqe wrapping two pages */
|
||||
for (; wi < edge_wi; wi++) {
|
||||
wi->opcode = MLX5_OPCODE_NOP;
|
||||
wi->num_wqebbs = 1;
|
||||
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
}
|
||||
}
|
||||
|
@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
|
||||
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
|
||||
sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
|
||||
sq->db.ico_wqe[pi].umr.rq = rq;
|
||||
sq->pc += MLX5E_UMR_WQEBBS;
|
||||
|
||||
|
@ -628,17 +630,14 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.ico_wqe[ci];
|
||||
sqcc += wi->num_wqebbs;
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR))
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
} else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
|
||||
sqcc++;
|
||||
} else {
|
||||
else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OPCODE in ICOSQ WQE info: 0x%x\n",
|
||||
wi->opcode);
|
||||
}
|
||||
|
||||
} while (!last_wqe);
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
|
|||
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
||||
|
||||
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
||||
sq->db.ico_wqe[pi].num_wqebbs = 1;
|
||||
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
||||
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
||||
}
|
||||
|
|
|
@ -930,7 +930,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
|
|||
|
||||
action->rewrite.data = (void *)ops;
|
||||
action->rewrite.num_of_actions = i;
|
||||
action->rewrite.chunk->byte_size = i * sizeof(*ops);
|
||||
|
||||
ret = mlx5dr_send_postsend_action(dmn, action);
|
||||
if (ret) {
|
||||
|
|
|
@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
|
|||
int ret;
|
||||
|
||||
send_info.write.addr = (uintptr_t)action->rewrite.data;
|
||||
send_info.write.length = action->rewrite.chunk->byte_size;
|
||||
send_info.write.length = action->rewrite.num_of_actions *
|
||||
DR_MODIFY_ACTION_SIZE;
|
||||
send_info.write.lkey = 0;
|
||||
send_info.remote_addr = action->rewrite.chunk->mr_addr;
|
||||
send_info.rkey = action->rewrite.chunk->rkey;
|
||||
|
|
|
@ -1318,36 +1318,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
|
|||
mbox->mapaddr);
|
||||
}
|
||||
|
||||
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id)
|
||||
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id,
|
||||
u32 *p_sys_status)
|
||||
{
|
||||
unsigned long end;
|
||||
char mrsr_pl[MLXSW_REG_MRSR_LEN];
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
mlxsw_reg_mrsr_pack(mrsr_pl);
|
||||
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
|
||||
if (err)
|
||||
return err;
|
||||
if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
|
||||
msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* We must wait for the HW to become responsive once again. */
|
||||
/* We must wait for the HW to become responsive. */
|
||||
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
|
||||
|
||||
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
do {
|
||||
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
|
||||
val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
|
||||
return 0;
|
||||
cond_resched();
|
||||
} while (time_before(jiffies, end));
|
||||
|
||||
*p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||
char mrsr_pl[MLXSW_REG_MRSR_LEN];
|
||||
u32 sys_status;
|
||||
int err;
|
||||
|
||||
err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
|
||||
sys_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
mlxsw_reg_mrsr_pack(mrsr_pl);
|
||||
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
|
||||
sys_status);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
|
||||
{
|
||||
int err;
|
||||
|
|
|
@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
|
|||
return 0;
|
||||
|
||||
err_erif_unresolve:
|
||||
list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
|
||||
vif_node)
|
||||
list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
|
||||
vif_node)
|
||||
mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
|
||||
err_irif_unresolve:
|
||||
list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
|
||||
vif_node)
|
||||
list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
|
||||
vif_node)
|
||||
mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
|
||||
mr_vif->rif = NULL;
|
||||
return err;
|
||||
|
|
|
@ -6812,7 +6812,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
|
|||
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
|
||||
rtl_lock_config_regs(tp);
|
||||
/* fall through */
|
||||
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
|
||||
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
|
||||
flags = PCI_IRQ_LEGACY;
|
||||
break;
|
||||
default:
|
||||
|
@ -6903,6 +6903,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
|
|||
if (!tp->phydev) {
|
||||
mdiobus_unregister(new_bus);
|
||||
return -ENODEV;
|
||||
} else if (!tp->phydev->drv) {
|
||||
/* Most chip versions fail with the genphy driver.
|
||||
* Therefore ensure that the dedicated PHY driver is loaded.
|
||||
*/
|
||||
dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
|
||||
mdiobus_unregister(new_bus);
|
||||
return -EUNATCH;
|
||||
}
|
||||
|
||||
/* PHY will be woken up in rtl_open() */
|
||||
|
@ -7064,15 +7071,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
int chipset, region;
|
||||
int jumbo_max, rc;
|
||||
|
||||
/* Some tools for creating an initramfs don't consider softdeps, then
|
||||
* r8169.ko may be in initramfs, but realtek.ko not. Then the generic
|
||||
* PHY driver is used that doesn't work with most chip versions.
|
||||
*/
|
||||
if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
|
||||
dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -2279,7 +2279,7 @@ static int __init sxgbe_cmdline_opt(char *str)
|
|||
if (!str || !*str)
|
||||
return -EINVAL;
|
||||
while ((opt = strsep(&str, ",")) != NULL) {
|
||||
if (!strncmp(opt, "eee_timer:", 6)) {
|
||||
if (!strncmp(opt, "eee_timer:", 10)) {
|
||||
if (kstrtoint(opt + 10, 0, &eee_timer))
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
|
|||
|
||||
ret = rk_gmac_clk_init(plat_dat);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
|
||||
ret = rk_gmac_powerup(plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
|
|
|
@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
|
|||
if (!net_eq(dev_net(geneve->dev), net))
|
||||
unregister_netdevice_queue(geneve->dev, head);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&gn->sock_list));
|
||||
}
|
||||
|
||||
static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
|
||||
|
@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
|
|||
/* unregister the devices gathered above */
|
||||
unregister_netdevice_many(&list);
|
||||
rtnl_unlock();
|
||||
|
||||
list_for_each_entry(net, net_list, exit_list) {
|
||||
const struct geneve_net *gn = net_generic(net, geneve_net_id);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&gn->sock_list));
|
||||
}
|
||||
}
|
||||
|
||||
static struct pernet_operations geneve_net_ops = {
|
||||
|
|
|
@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|||
}
|
||||
|
||||
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
|
||||
skb->tc_redirected = 0;
|
||||
skb->redirected = 0;
|
||||
skb->tc_skip_classify = 1;
|
||||
|
||||
u64_stats_update_begin(&txp->tsync);
|
||||
|
@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
|
|||
rcu_read_unlock();
|
||||
skb->skb_iif = txp->dev->ifindex;
|
||||
|
||||
if (!skb->tc_from_ingress) {
|
||||
if (!skb->from_ingress) {
|
||||
dev_queue_xmit(skb);
|
||||
} else {
|
||||
skb_pull_rcsum(skb, skb->mac_len);
|
||||
|
@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
txp->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&txp->rsync);
|
||||
|
||||
if (!skb->tc_redirected || !skb->skb_iif) {
|
||||
if (!skb->redirected || !skb->skb_iif) {
|
||||
dev_kfree_skb(skb);
|
||||
dev->stats.rx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <net/genetlink.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/gro_cells.h>
|
||||
#include <linux/if_arp.h>
|
||||
|
||||
#include <uapi/linux/if_macsec.h>
|
||||
|
||||
|
@ -3236,6 +3237,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
|||
real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!real_dev)
|
||||
return -ENODEV;
|
||||
if (real_dev->type != ARPHRD_ETHER)
|
||||
return -EINVAL;
|
||||
|
||||
dev->priv_flags |= IFF_MACSEC;
|
||||
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
#define DP83867_CFG3 0x1e
|
||||
|
||||
/* Extended Registers */
|
||||
#define DP83867_CFG4 0x0031
|
||||
#define DP83867_FLD_THR_CFG 0x002e
|
||||
#define DP83867_CFG4 0x0031
|
||||
#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
|
||||
#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
|
||||
#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
|
||||
|
@ -74,6 +75,7 @@
|
|||
#define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
|
||||
#define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
|
||||
#define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
|
||||
#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
|
||||
|
||||
/* PHY CTRL bits */
|
||||
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
|
||||
|
@ -103,6 +105,9 @@
|
|||
/* CFG4 bits */
|
||||
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
|
||||
|
||||
/* FLD_THR_CFG */
|
||||
#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7
|
||||
|
||||
enum {
|
||||
DP83867_PORT_MIRROING_KEEP,
|
||||
DP83867_PORT_MIRROING_EN,
|
||||
|
@ -318,6 +323,20 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
|
||||
BIT(7));
|
||||
|
||||
bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
|
||||
if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
|
||||
/* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
|
||||
* be set to 0x2. This may causes the PHY link to be unstable -
|
||||
* the default value 0x1 need to be restored.
|
||||
*/
|
||||
ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
|
||||
DP83867_FLD_THR_CFG,
|
||||
DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
|
||||
0x1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (phy_interface_is_rgmii(phydev)) {
|
||||
val = phy_read(phydev, MII_DP83867_PHYCTRL);
|
||||
if (val < 0)
|
||||
|
|
|
@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
else
|
||||
priv->clk = NULL;
|
||||
|
||||
ret = clk_prepare_enable(priv->clk);
|
||||
if (ret)
|
||||
|
|
|
@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
|
|||
static int mdio_mux_iproc_resume(struct device *dev)
|
||||
{
|
||||
struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
clk_prepare_enable(md->core_clk);
|
||||
rc = clk_prepare_enable(md->core_clk);
|
||||
if (rc) {
|
||||
dev_err(md->dev, "failed to enable core clk\n");
|
||||
return rc;
|
||||
}
|
||||
mdio_mux_iproc_config(md);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
|
||||
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
|
||||
{QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
|
||||
{QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
|
||||
|
|
|
@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
|
|||
/* Setup stats when device is created */
|
||||
static int vxlan_init(struct net_device *dev)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
|
||||
if (!dev->tstats)
|
||||
return -ENOMEM;
|
||||
|
||||
err = gro_cells_init(&vxlan->gro_cells, dev);
|
||||
if (err) {
|
||||
free_percpu(dev->tstats);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
|
|||
|
||||
vxlan->dev = dev;
|
||||
|
||||
gro_cells_init(&vxlan->gro_cells, dev);
|
||||
|
||||
for (h = 0; h < FDB_HASH_SIZE; ++h) {
|
||||
spin_lock_init(&vxlan->hash_lock[h]);
|
||||
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
|
||||
|
|
|
@ -1181,7 +1181,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
|
|||
|
||||
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
|
|
|
@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
|
|||
rxmcs == DESC92C_RATE11M)
|
||||
|
||||
struct phy_status_rpt {
|
||||
u8 padding[2];
|
||||
u8 ch_corr[2];
|
||||
u8 cck_sig_qual_ofdm_pwdb_all;
|
||||
u8 cck_agc_rpt_ofdm_cfosho_a;
|
||||
|
|
|
@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
|
|||
const struct firmware *fw;
|
||||
struct sk_buff *skb;
|
||||
unsigned long len;
|
||||
u8 max_size, payload_size;
|
||||
int max_size, payload_size;
|
||||
int rc = 0;
|
||||
|
||||
if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
|
||||
|
@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
|
|||
|
||||
while (len) {
|
||||
|
||||
payload_size = min_t(unsigned long, (unsigned long) max_size,
|
||||
len);
|
||||
payload_size = min_t(unsigned long, max_size, len);
|
||||
|
||||
skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -268,6 +268,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
|||
rc = of_mdiobus_register_phy(mdio, child, addr);
|
||||
if (rc && rc != -ENODEV)
|
||||
goto unregister;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1244,7 +1244,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
|
|||
if (count == 1)
|
||||
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
|
||||
|
||||
card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
|
||||
card->qdio.no_out_queues = count;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2634,12 +2633,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
|
|||
buf->rx_skb = netdev_alloc_skb(card->dev,
|
||||
QETH_RX_PULL_LEN + ETH_HLEN);
|
||||
if (!buf->rx_skb)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pool_entry = qeth_find_free_buffer_pool_entry(card);
|
||||
if (!pool_entry)
|
||||
return 1;
|
||||
return -ENOBUFS;
|
||||
|
||||
/*
|
||||
* since the buffer is accessed only from the input_tasklet
|
||||
|
@ -2671,10 +2670,15 @@ int qeth_init_qdio_queues(struct qeth_card *card)
|
|||
/* inbound queue */
|
||||
qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
||||
memset(&card->rx, 0, sizeof(struct qeth_rx));
|
||||
|
||||
qeth_initialize_working_pool_list(card);
|
||||
/*give only as many buffers to hardware as we have buffer pool entries*/
|
||||
for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
|
||||
qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
|
||||
for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
|
||||
rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
card->qdio.in_q->next_buf_to_init =
|
||||
card->qdio.in_buf_pool.buf_count - 1;
|
||||
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
|
||||
|
|
|
@ -9947,6 +9947,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
|||
ioa_cfg->max_devs_supported = ipr_max_devs;
|
||||
|
||||
if (ioa_cfg->sis64) {
|
||||
host->max_channel = IPR_MAX_SIS64_BUSES;
|
||||
host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
|
||||
host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
|
||||
if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
|
||||
|
@ -9955,6 +9956,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
|||
+ ((sizeof(struct ipr_config_table_entry64)
|
||||
* ioa_cfg->max_devs_supported)));
|
||||
} else {
|
||||
host->max_channel = IPR_VSET_BUS;
|
||||
host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
|
||||
host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
|
||||
if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
|
||||
|
@ -9964,7 +9966,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
|||
* ioa_cfg->max_devs_supported)));
|
||||
}
|
||||
|
||||
host->max_channel = IPR_VSET_BUS;
|
||||
host->unique_id = host->host_no;
|
||||
host->max_cmd_len = IPR_MAX_CDB_LEN;
|
||||
host->can_queue = ioa_cfg->max_cmds;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue