- MIPS: octeon: fix minor bugs of initial merge

- MIPS: octeon: add support for QLM and PCI-E controller
 - MIPS: octeon: add support for AHCI and SATA
 - MIPS: octeon: add E1000 ethernet support
 - MIPS: octeon: add Octeon III NIC23 board
 - ata/scsi: add support for Big Endian platforms
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiQkHUH+J02LLC9InKPlOlyTyXBgFAmCEf04ACgkQKPlOlyTy
 XBj2Aw//SxPX48XynT979cMIOoylO6Rs1saWtQr32YmycWnGC0/wvFNPU/kjNP/a
 jinOxvY2TSBNqToSuCDZxGXyijMIdAnWVCQCUXmah7bZ+GtiYWR0SHNO0TQ+qrBW
 06CC7hgQQqQswGClJe4njwTyVIzTQvyo/UdjS6RWggDZPsnGpyMlR5O56ZCNNH8o
 1XFCG8hSrQBRO38RpXciGS3dFeaqOQy96K9M5pellf9Fh5a+jHyAmK8Z/0R+T6cG
 +PoNh6qZAsXFj0PVuvu0CaXFHpP+fhdA4kEwt64PJCe8D3x7d05COuN0bL+e6D6u
 E4cSnkjmzNgYvoi4YAGaQN9f4PKPSMHuYf7YdIBFCQQ8OtxNXpUHOg0zr4SRnq/C
 ypq285f2O7Aq+3t0BrC7jTjo+Y2PzVIUR8MXpvh698ZSdJnqOlJf0y5tRwdhf/+k
 gh+rT7mWIOoRRWpb6qHMUr5gnparl0gOykspMlQ2NKB+kfmC4RP2pYxQFp82TFIq
 ZMUaXer2/g5dVptEhTU0vBsTrk7s/u239WtwyXpOCdoM6sI/FoMoiCoQoD2isKP6
 mUv/iwTpfNTtQnx5MM2Xsu+8pzcXjaVeoMaNcI55jZ2QTCoMAFA6odYTk+GHS3Vx
 Yt3TLrH6O46PXNSULROkmTR4RgiDHcGCX/HGTKkcA/+X9GDjjNc=
 =JwRN
 -----END PGP SIGNATURE-----

Merge tag 'mips-pull-2021-04-24' of https://source.denx.de/u-boot/custodians/u-boot-mips

- MIPS: octeon: fix minor bugs of initial merge
- MIPS: octeon: add support for QLM and PCI-E controller
- MIPS: octeon: add support for AHCI and SATA
- MIPS: octeon: add E1000 ethernet support
- MIPS: octeon: add Octeon III NIC23 board
- ata/scsi: add support for Big Endian platforms
This commit is contained in:
Tom Rini 2021-04-24 19:39:14 -04:00
commit 4dda435131
131 changed files with 120962 additions and 265 deletions

View File

@ -20,6 +20,7 @@ dtb-$(CONFIG_BOARD_MT7628_RFB) += mediatek,mt7628-rfb.dtb
dtb-$(CONFIG_BOARD_GARDENA_SMART_GATEWAY_MT7688) += gardena-smart-gateway-mt7688.dtb
dtb-$(CONFIG_BOARD_LINKIT_SMART_7688) += linkit-smart-7688.dtb
dtb-$(CONFIG_TARGET_OCTEON_EBB7304) += mrvl,octeon-ebb7304.dtb
dtb-$(CONFIG_TARGET_OCTEON_NIC23) += mrvl,octeon-nic23.dtb
dtb-$(CONFIG_BOARD_NETGEAR_CG3100D) += netgear,cg3100d.dtb
dtb-$(CONFIG_BOARD_NETGEAR_DGND3700V2) += netgear,dgnd3700v2.dtb
dtb-$(CONFIG_BOARD_SAGEM_FAST1704) += sagem,f@st1704.dtb

View File

@ -97,6 +97,7 @@
uart0: serial@1180000000800 {
compatible = "cavium,octeon-3860-uart","ns16550";
reg = <0x11800 0x00000800 0x0 0x400>;
clocks = <&clk OCTEON_CLK_IO>;
clock-frequency = <0>;
current-speed = <115200>;
reg-shift = <3>;
@ -106,6 +107,7 @@
uart1: serial@1180000000c00 {
compatible = "cavium,octeon-3860-uart","ns16550";
reg = <0x11800 0x00000c00 0x0 0x400>;
clocks = <&clk OCTEON_CLK_IO>;
clock-frequency = <0>;
current-speed = <115200>;
reg-shift = <3>;
@ -230,5 +232,40 @@
dr_mode = "host";
};
};
/* PCIe 0 */
pcie0: pcie@1180069000000 {
compatible = "marvell,pcie-host-octeon";
reg = <0 0xf2600000 0 0x10000>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
dma-coherent;
bus-range = <0 0xff>;
marvell,pcie-port = <0>;
ranges = <0x81000000 0x00000000 0xd0000000 0x00011a00 0xd0000000 0x00000000 0x01000000 /* IO */
0x02000000 0x00000000 0xe0000000 0x00011b00 0xe0000000 0x00000000 0x10000000 /* non-prefetchable memory */
0x43000000 0x00011c00 0x00000000 0x00011c00 0x00000000 0x00000010 0x00000000>;/* prefetchable memory */
};
uctl@118006c000000 {
compatible = "cavium,octeon-7130-sata-uctl", "simple-bus";
reg = <0x11800 0x6c000000 0x0 0x100>;
ranges; /* Direct mapping */
#address-cells = <2>;
#size-cells = <2>;
portmap = <0x3>;
staggered-spinup;
cavium,qlm-trim = "4,sata";
sata: sata@16c0000000000 {
compatible = "cavium,octeon-7130-ahci";
reg = <0x16c00 0x00000000 0x0 0x200>;
#address-cells = <2>;
#size-cells = <2>;
interrupts = <0x6c010 4>;
};
};
};
};

View File

@ -112,13 +112,20 @@
};
};
&uart0 {
clock-frequency = <1200000000>;
};
&i2c0 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
rtc@68 {
compatible = "dallas,ds1337";
reg = <0x68>;
};
tlv-eeprom@56 {
compatible = "atmel,24c256", "microchip,24lc256";
reg = <0x56>;
pagesize = <64>;
};
};
&i2c1 {

View File

@ -0,0 +1,162 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell / Cavium Inc. NIC23
*/
/dts-v1/;
#include "mrvl,cn73xx.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ {
model = "cavium,nic23";
compatible = "cavium,nic23";
aliases {
mmc0 = &mmc0;
serial0 = &uart0;
spi0 = &spi;
};
regulators {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <0>;
/* Power on GPIO 8, active high */
reg_mmc_3v3: regulator@0 {
compatible = "regulator-fixed";
reg = <0>;
regulator-name = "mmc-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio 8 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
};
chosen {
stdout-path = &uart0;
};
};
&bootbus {
/*
* bootbus CS0 for CFI flash is remapped (0x1fc0.0000 -> 1f40.0000)
* as the initial size is too small for the 8MiB flash device
*/
ranges = <0 0 0 0x1f400000 0xc00000>,
<1 0 0x10000 0x10000000 0>,
<2 0 0x10000 0x20000000 0>,
<3 0 0x10000 0x30000000 0>,
<4 0 0 0x1d020000 0x10000>,
<5 0 0x10000 0x50000000 0>,
<6 0 0x10000 0x60000000 0>,
<7 0 0x10000 0x70000000 0>;
cavium,cs-config@0 {
compatible = "cavium,octeon-3860-bootbus-config";
cavium,cs-index = <0>;
cavium,t-adr = <10>;
cavium,t-ce = <50>;
cavium,t-oe = <50>;
cavium,t-we = <35>;
cavium,t-rd-hld = <25>;
cavium,t-wr-hld = <35>;
cavium,t-pause = <0>;
cavium,t-wait = <50>;
cavium,t-page = <30>;
cavium,t-rd-dly = <0>;
cavium,page-mode = <1>;
cavium,pages = <8>;
cavium,bus-width = <8>;
};
cavium,cs-config@4 {
compatible = "cavium,octeon-3860-bootbus-config";
cavium,cs-index = <4>;
cavium,t-adr = <10>;
cavium,t-ce = <10>;
cavium,t-oe = <160>;
cavium,t-we = <100>;
cavium,t-rd-hld = <10>;
cavium,t-wr-hld = <0>;
cavium,t-pause = <50>;
cavium,t-wait = <50>;
cavium,t-page = <10>;
cavium,t-rd-dly = <10>;
cavium,pages = <0>;
cavium,bus-width = <8>;
};
flash0: nor@0,0 {
compatible = "cfi-flash";
reg = <0 0 0x800000>;
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
label = "bootloader";
reg = <0 0x340000>;
read-only;
};
partition@300000 {
label = "storage";
reg = <0x340000 0x4be000>;
};
partition@7fe000 {
label = "environment";
reg = <0x7fe000 0x2000>;
read-only;
};
};
};
&uart0 {
clock-frequency = <800000000>;
};
&i2c0 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
};
&i2c1 {
u-boot,dm-pre-reloc; /* Needed early for DDR SPD EEPROM */
clock-frequency = <100000>;
};
&mmc {
status = "okay";
mmc0: mmc-slot@0 {
compatible = "cavium,octeon-6130-mmc-slot", "mmc-slot";
reg = <0>;
vqmmc-supply = <&reg_mmc_3v3>;
voltage-ranges = <3300 3300>;
spi-max-frequency = <52000000>;
/* bus width can be 1, 4 or 8 */
bus-width = <8>; /* new std property */
cavium,bus-max-width = <8>; /* custom property */
non-removable;
};
};
&soc0 {
pci-console@0 {
compatible = "marvell,pci-console";
status = "okay";
};
pci-bootcmd@0 {
compatible = "marvell,pci-bootcmd";
status = "okay";
};
};
&spi {
flash@0 {
compatible = "micron,n25q128a11", "jedec,spi-nor";
spi-max-frequency = <2000000>;
reg = <0>;
};
};

View File

@ -8,6 +8,12 @@
#define __ASM_GBL_DATA_H
#include <asm/regdef.h>
#include <asm/types.h>
struct octeon_eeprom_mac_addr {
u8 mac_addr_base[6];
u8 count;
};
/* Architecture-specific global data */
struct arch_global_data {
@ -30,6 +36,9 @@ struct arch_global_data {
#ifdef CONFIG_ARCH_MTMIPS
unsigned long timer_freq;
#endif
#ifdef CONFIG_ARCH_OCTEON
struct octeon_eeprom_mac_addr mac_desc;
#endif
};
#include <asm-generic/global_data.h>

View File

@ -41,6 +41,12 @@ config TARGET_OCTEON_EBB7304
help
Choose this for the Octeon EBB7304 board
config TARGET_OCTEON_NIC23
bool "Marvell Octeon NIC23"
select OCTEON_CN73XX
help
Choose this for the Octeon NIC23 board
endchoice
config SYS_DCACHE_SIZE
@ -55,6 +61,11 @@ config SYS_ICACHE_SIZE
config SYS_ICACHE_LINE_SIZE
default 128
config SYS_PCI_64BIT
bool
default y
source "board/Marvell/octeon_ebb7304/Kconfig"
source "board/Marvell/octeon_nic23/Kconfig"
endmenu

View File

@ -11,3 +11,14 @@ obj-y += dram.o
obj-y += cvmx-coremask.o
obj-y += cvmx-bootmem.o
obj-y += bootoctlinux.o
# QLM related code
obj-y += cvmx-helper-cfg.o
obj-y += cvmx-helper-fdt.o
obj-y += cvmx-helper-jtag.o
obj-y += cvmx-helper-util.o
obj-y += cvmx-helper.o
obj-y += cvmx-pcie.o
obj-y += cvmx-qlm.o
obj-y += octeon_fdt.o
obj-y += octeon_qlm.o

View File

@ -25,6 +25,7 @@
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
#include <mach/bootoct_cmd.h>
#include <mach/cvmx-ciu-defs.h>
DECLARE_GLOBAL_DATA_PTR;

View File

@ -3,6 +3,10 @@
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <dm.h>
#include <dm/uclass.h>
#include <env.h>
#include <iomux.h>
#include <asm/global_data.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
@ -10,9 +14,26 @@
#include <linux/io.h>
#include <mach/clock.h>
#include <mach/cavm-reg.h>
#include <mach/cvmx-bootmem.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-sata-defs.h>
DECLARE_GLOBAL_DATA_PTR;
/*
* Important:
* This address cannot be changed as the PCI console tool relies on exactly
* this value!
*/
#define BOOTLOADER_BOOTMEM_DESC_ADDR 0x6c100
#define BOOTLOADER_BOOTMEM_DESC_SPACE (BOOTLOADER_BOOTMEM_DESC_ADDR + 0x8)
#define OCTEON_RESERVED_LOW_BOOT_MEM_SIZE (1024 * 1024)
#define BOOTCMD_NAME "pci-bootcmd"
#define CONSOLE_NAME "pci-console@0"
#define OCTEON_BOOTLOADER_LOAD_MEM_NAME "__tmp_load"
/*
* TRUE for devices having registers with little-endian byte
* order, FALSE for registers with native-endian byte order.
@ -30,7 +51,6 @@ const bool octeon_should_swizzle_table[256] = {
[0x1e] = true, /* PCI mmio window */
[0x68] = true, /* OCTEON III USB */
[0x69] = true, /* OCTEON III USB */
[0x6c] = true, /* OCTEON III SATA */
[0x6f] = true, /* OCTEON II USB */
};
@ -85,3 +105,329 @@ int print_cpuinfo(void)
return 0;
}
static int octeon_bootmem_init(void)
{
int ret;
/* Call old single-node func: it uses only gd->ram_size */
ret = cvmx_bootmem_phy_mem_list_init(gd->ram_size,
OCTEON_RESERVED_LOW_BOOT_MEM_SIZE,
(void *)CKSEG0ADDR(BOOTLOADER_BOOTMEM_DESC_SPACE));
if (!ret) {
printf("FATAL: Error initializing bootmem list\n");
return -ENOSPC;
}
/*
* Put bootmem descriptor address in known location for host.
* Make sure it is not in kseg0, as we want physical address
*/
writeq((u64)__cvmx_bootmem_internal_get_desc_ptr() & 0x7fffffffull,
(void *)CKSEG0ADDR(BOOTLOADER_BOOTMEM_DESC_ADDR));
debug("Reserving first 1MB of memory\n");
ret = cvmx_bootmem_reserve_memory(0, OCTEON_RESERVED_LOW_BOOT_MEM_SIZE,
"__low_reserved", 0);
if (!ret)
puts("Error reserving low 1MB of memory\n");
#ifdef DEBUG
cvmx_bootmem_phy_list_print();
#endif
return 0;
}
static int octeon_configure_load_memory(void)
{
char *eptr;
u32 addr;
u32 size;
int ret;
eptr = env_get("octeon_reserved_mem_load_size");
if (!eptr || !strcmp("auto", eptr)) {
/*
* Pick a size that we think is appropriate.
* Please note that for small memory boards this guess
* will likely not be ideal.
* Please pick a specific size for boards/applications
* that require it.
*/
if (gd->ram_size <= (256 << 20)) {
size = min_t(u64, (128 << 20),
((gd->ram_size * 2) / 5) & ~0xFFFFF);
} else {
size = min_t(u64, (256 << 20),
((gd->ram_size - (256 << 20)) / 3) & ~0xFFFFF);
}
} else {
size = simple_strtol(eptr, NULL, 16);
debug("octeon_reserved_mem_load_size=0x%08x\n", size);
}
if (size) {
debug("Linux reserved load size 0x%08x\n", size);
eptr = env_get("octeon_reserved_mem_load_base");
if (!eptr || !strcmp("auto", eptr)) {
u64 mem_top;
/*
* Leave some room for previous allocations that
* are made starting at the top of the low
* 256 Mbytes of DRAM
*/
int adjust = (1 << 20);
if (gd->ram_size <= (512 << 20))
adjust = (17 << 20);
/* Put block at the top of DDR0, or bottom of DDR2 */
if ((gd->ram_size <= (256 << 20)) ||
(size > (gd->ram_size - (256 << 20)))) {
mem_top = min_t(u64, gd->ram_size - adjust,
(256 << 20) - adjust);
} else if ((gd->ram_size <= (512 << 20)) ||
(size > (gd->ram_size - (512 << 20)))) {
mem_top = min_t(u64, gd->ram_size - adjust,
(512 << 20) - adjust);
} else {
/*
* We have enough room, so set
* mem_top so that the block is
* at the base of the DDR2
* segment
*/
mem_top = (512 << 20) + size;
}
/*
* Adjust for boot bus memory hole on OCTEON II
* and later.
*/
if ((gd->ram_size > (256 << 20)))
mem_top += (256 << 20);
debug("Adjusted memory top is 0x%llx\n", mem_top);
addr = mem_top - size;
if (addr > (512 << 20))
addr = (512 << 20);
if ((addr >= (256 << 20)) && addr < (512 << 20)) {
/*
* The address landed in the boot-bus
* memory hole. Dig it out of the hole.
*/
addr = (512 << 20);
}
} else {
addr = simple_strtol(eptr, NULL, 16);
}
ret = cvmx_bootmem_phy_named_block_alloc(size, addr,
addr + size, 0,
OCTEON_BOOTLOADER_LOAD_MEM_NAME,
0);
if (ret < 0) {
printf("ERROR: Unable to allocate bootloader reserved memory (addr: 0x%x, size: 0x%x).\n",
addr, size);
} else {
/*
* Set default load address to base of memory
* reserved for loading. The setting of the
* env. variable also sets the load_addr global
* variable.
* This environment variable is overridden each
* boot if a reserved block is created.
*/
char str[20];
snprintf(str, sizeof(str), "0x%x", addr);
env_set("loadaddr", str);
debug("Setting load address to 0x%08x, size 0x%x\n",
addr, size);
}
return 0;
}
printf("WARNING: No reserved memory for image loading.\n");
return -1;
}
static int init_pcie_console(void)
{
char *stdinname = env_get("stdin");
char *stdoutname = env_get("stdout");
char *stderrname = env_get("stderr");
struct udevice *pcie_console_dev = NULL;
bool stdin_set, stdout_set, stderr_set;
char iomux_name[128];
int ret = 0;
debug("%s: stdin: %s, stdout: %s, stderr: %s\n", __func__, stdinname,
stdoutname, stderrname);
if (!stdinname) {
env_set("stdin", "serial");
stdinname = env_get("stdin");
}
if (!stdoutname) {
env_set("stdout", "serial");
stdoutname = env_get("stdout");
}
if (!stderrname) {
env_set("stderr", "serial");
stderrname = env_get("stderr");
}
if (!stdinname || !stdoutname || !stderrname) {
printf("%s: Error setting environment variables for serial\n",
__func__);
return -1;
}
stdin_set = !!strstr(stdinname, CONSOLE_NAME);
stdout_set = !!strstr(stdoutname, CONSOLE_NAME);
stderr_set = !!strstr(stderrname, CONSOLE_NAME);
log_debug("stdin: %d, \"%s\", stdout: %d, \"%s\", stderr: %d, \"%s\"\n",
stdin_set, stdinname, stdout_set, stdoutname,
stderr_set, stderrname);
ret = uclass_get_device_by_name(UCLASS_SERIAL, CONSOLE_NAME,
&pcie_console_dev);
if (ret || !pcie_console_dev) {
debug("%s: No PCI console device %s found\n", __func__,
CONSOLE_NAME);
return 0;
}
if (stdin_set)
strncpy(iomux_name, stdinname, sizeof(iomux_name));
else
snprintf(iomux_name, sizeof(iomux_name), "%s,%s",
stdinname, pcie_console_dev->name);
ret = iomux_doenv(stdin, iomux_name);
if (ret) {
log_err("%s: Error setting I/O stdin MUX to %s\n",
__func__, iomux_name);
return ret;
}
if (!stdin_set)
env_set("stdin", iomux_name);
if (stdout_set)
strncpy(iomux_name, stdoutname, sizeof(iomux_name));
else
snprintf(iomux_name, sizeof(iomux_name), "%s,%s", stdoutname,
pcie_console_dev->name);
ret = iomux_doenv(stdout, iomux_name);
if (ret) {
log_err("%s: Error setting I/O stdout MUX to %s\n",
__func__, iomux_name);
return ret;
}
if (!stdout_set)
env_set("stdout", iomux_name);
if (stderr_set)
strncpy(iomux_name, stderrname, sizeof(iomux_name));
else
snprintf(iomux_name, sizeof(iomux_name), "%s,%s", stderrname,
pcie_console_dev->name);
ret = iomux_doenv(stderr, iomux_name);
if (ret) {
log_err("%s: Error setting I/O stderr MUX to %s\n",
__func__, iomux_name);
return ret;
}
if (!stderr_set)
env_set("stderr", iomux_name);
debug("%s: stdin: %s, stdout: %s, stderr: %s, ret: %d\n",
__func__, env_get("stdin"), env_get("stdout"),
env_get("stderr"), ret);
return ret;
}
static int init_bootcmd_console(void)
{
char *stdinname = env_get("stdin");
struct udevice *bootcmd_dev = NULL;
bool stdin_set;
char iomux_name[128];
int ret = 0;
debug("%s: stdin before: %s\n", __func__,
stdinname ? stdinname : "NONE");
if (!stdinname) {
env_set("stdin", "serial");
stdinname = env_get("stdin");
}
stdin_set = !!strstr(stdinname, BOOTCMD_NAME);
ret = uclass_get_device_by_driver(UCLASS_SERIAL,
DM_DRIVER_GET(octeon_bootcmd),
&bootcmd_dev);
if (ret) {
log_err("%s: Error getting %s serial class\n", __func__,
BOOTCMD_NAME);
} else if (bootcmd_dev) {
if (stdin_set)
strncpy(iomux_name, stdinname, sizeof(iomux_name));
else
snprintf(iomux_name, sizeof(iomux_name), "%s,%s",
stdinname, bootcmd_dev->name);
ret = iomux_doenv(stdin, iomux_name);
if (ret)
log_err("%s: Error %d enabling the PCI bootcmd input console \"%s\"\n",
__func__, ret, iomux_name);
if (!stdin_set)
env_set("stdin", iomux_name);
}
debug("%s: Set iomux and stdin to %s (ret: %d)\n",
__func__, iomux_name, ret);
return ret;
}
int arch_misc_init(void)
{
int ret;
ret = octeon_bootmem_init();
if (ret)
return ret;
ret = octeon_configure_load_memory();
if (ret)
return ret;
if (CONFIG_IS_ENABLED(OCTEON_SERIAL_PCIE_CONSOLE))
init_pcie_console();
if (CONFIG_IS_ENABLED(OCTEON_SERIAL_BOOTCMD))
init_bootcmd_console();
return 0;
}
int board_ahci_enable(void)
{
cvmx_sata_uctl_shim_cfg_t shim_cfg;
/*
* Configure proper endian swapping for the AHCI port so that the
* common AHCI code can be used
*/
shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
shim_cfg.s.dma_endian_mode = 1;
/* Use 1 for LE mode when running BE, or 3 for BE mode running BE */
shim_cfg.s.csr_endian_mode = 3; /* Don't byte swap */
shim_cfg.s.dma_read_cmd = 1; /* No allocate L2C */
csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
return 0;
}

View File

@ -21,12 +21,6 @@
DECLARE_GLOBAL_DATA_PTR;
#define CVMX_MIPS32_SPACE_KSEG0 1L
#define CVMX_MIPS_SPACE_XKPHYS 2LL
#define CVMX_ADD_SEG(seg, add) ((((u64)(seg)) << 62) | (add))
#define CVMX_ADD_SEG32(seg, add) (((u32)(seg) << 31) | (u32)(add))
/**
* This is the physical location of a struct cvmx_bootmem_desc
* structure in Octeon's memory. Note that dues to addressing
@ -289,8 +283,8 @@ static int __cvmx_bootmem_check_version(int exact_match)
int major_version;
major_version = CVMX_BOOTMEM_DESC_GET_FIELD(major_version);
if (major_version > 3 ||
(exact_match && major_version) != exact_match) {
if ((major_version > 3) ||
(exact_match && major_version != exact_match)) {
debug("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: 0x%llx\n",
major_version,
(int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version),

View File

@ -14,6 +14,7 @@
#include <mach/cvmx-fuse.h>
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-ciu-defs.h>
struct cvmx_coremask *get_coremask_override(struct cvmx_coremask *pcm)
{

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,970 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* FDT Helper functions similar to those provided to U-Boot.
*/
#include <log.h>
#include <malloc.h>
#include <net.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-csr.h>
#include <mach/cvmx-bootmem.h>
#include <mach/octeon-model.h>
#include <mach/octeon_fdt.h>
#include <mach/cvmx-helper.h>
#include <mach/cvmx-helper-board.h>
#include <mach/cvmx-helper-cfg.h>
#include <mach/cvmx-helper-fdt.h>
#include <mach/cvmx-helper-gpio.h>
/** Structure used to get type of GPIO from device tree */
struct gpio_compat {
char *compatible; /** Compatible string */
enum cvmx_gpio_type type; /** Type */
int8_t size; /** (max) Number of pins */
};
#define GPIO_REG_PCA953X_IN 0
#define GPIO_REG_PCA953X_OUT 1
#define GPIO_REG_PCA953X_INVERT 2
#define GPIO_REG_PCA953X_DIR 3
#define GPIO_REG_PCA957X_IN 0
#define GPIO_REG_PCA957X_INVERT 1
#define GPIO_REG_PCA957X_CFG 4
#define GPIO_REG_PCA957X_OUT 5
enum cvmx_i2c_mux_type { I2C_MUX, I2C_SWITCH };
/** Structure used to get type of GPIO from device tree */
struct mux_compat {
char *compatible; /** Compatible string */
enum cvmx_i2c_bus_type type; /** Mux chip type */
enum cvmx_i2c_mux_type mux_type; /** Type of mux */
u8 enable; /** Enable bit for mux */
u8 size; /** (max) Number of channels */
};
/**
* Local allocator to handle both SE and U-Boot that also zeroes out memory
*
* @param size number of bytes to allocate
*
* @return pointer to allocated memory or NULL if out of memory.
* Alignment is set to 8-bytes.
*/
void *__cvmx_fdt_alloc(size_t size)
{
return calloc(size, 1);
}
/**
* Free allocated memory.
*
* @param ptr pointer to memory to free
*
* NOTE: This only works in U-Boot since SE does not really have a freeing
* mechanism. In SE the memory is zeroed out.
*/
void __cvmx_fdt_free(void *ptr, size_t size)
{
free(ptr);
}
/**
* Look up a phandle and follow it to its node then return the offset of that
* node.
*
* @param[in] fdt_addr pointer to FDT blob
* @param node node to read phandle from
* @param[in] prop_name name of property to find
* @param[in,out] lenp Number of phandles, input max number
* @param[out] nodes Array of phandle nodes
*
* @return -ve error code on error or 0 for success
*/
int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node,
const char *prop_name, int *lenp,
int *nodes)
{
const u32 *phandles;
int count;
int i;
phandles = fdt_getprop(fdt_addr, node, prop_name, &count);
if (!phandles || count < 0)
return -FDT_ERR_NOTFOUND;
count /= 4;
if (count > *lenp)
count = *lenp;
for (i = 0; i < count; i++)
nodes[i] = fdt_node_offset_by_phandle(fdt_addr,
fdt32_to_cpu(phandles[i]));
*lenp = count;
return 0;
}
/**
* Given a FDT node return the CPU node number
*
* @param[in] fdt_addr Address of FDT
* @param node FDT node number
*
* @return CPU node number or error if negative
*/
int cvmx_fdt_get_cpu_node(const void *fdt_addr, int node)
{
int parent = node;
const u32 *ranges;
int len = 0;
while (fdt_node_check_compatible(fdt_addr, parent, "simple-bus") != 0) {
parent = fdt_parent_offset(fdt_addr, parent);
if (parent < 0)
return parent;
}
ranges = fdt_getprop(fdt_addr, parent, "ranges", &len);
if (!ranges)
return len;
if (len == 0)
return 0;
if (len < 24)
return -FDT_ERR_TRUNCATED;
return fdt32_to_cpu(ranges[2]) / 0x10;
}
/**
* Get the total size of the flat device tree
*
* @param[in] fdt_addr Address of FDT
*
* @return Size of flat device tree in bytes or error if negative.
*/
int cvmx_fdt_get_fdt_size(const void *fdt_addr)
{
int rc;
rc = fdt_check_header(fdt_addr);
if (rc)
return rc;
return fdt_totalsize(fdt_addr);
}
/**
* Returns if a node is compatible with one of the items in the string list
*
* @param[in] fdt_addr Pointer to flat device tree
* @param node Node offset to check
* @param[in] strlist Array of FDT device compatibility strings,
* must end with NULL or empty string.
*
* @return 0 if at least one item matches, 1 if no matches
*/
int cvmx_fdt_node_check_compatible_list(const void *fdt_addr, int node, const char *const *strlist)
{
while (*strlist && **strlist) {
if (!fdt_node_check_compatible(fdt_addr, node, *strlist))
return 0;
strlist++;
}
return 1;
}
/**
* Given a FDT node, return the next compatible node.
*
* @param[in] fdt_addr Pointer to flat device tree
* @param start_offset Starting node offset or -1 to find the first
* @param strlist Array of FDT device compatibility strings, must
* end with NULL or empty string.
*
* @return next matching node or -1 if no more matches.
*/
int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffset,
const char *const *strlist)
{
int offset;
for (offset = fdt_next_node(fdt_addr, startoffset, NULL); offset >= 0;
offset = fdt_next_node(fdt_addr, offset, NULL)) {
if (!cvmx_fdt_node_check_compatible_list(fdt_addr, offset, strlist))
return offset;
}
return -1;
}
/**
* Attaches a PHY to a SFP or QSFP.
*
* @param sfp sfp to attach PHY to
* @param phy_info phy descriptor to attach or NULL to detach
*/
void cvmx_sfp_attach_phy(struct cvmx_fdt_sfp_info *sfp, struct cvmx_phy_info *phy_info)
{
sfp->phy_info = phy_info;
if (phy_info)
phy_info->sfp_info = sfp;
}
/**
* Assigns an IPD port to a SFP slot
*
* @param sfp Handle to SFP data structure
* @param ipd_port Port to assign it to
*
* @return 0 for success, -1 on error
*/
int cvmx_sfp_set_ipd_port(struct cvmx_fdt_sfp_info *sfp, int ipd_port)
{
int i;
if (sfp->is_qsfp) {
int xiface;
cvmx_helper_interface_mode_t mode;
xiface = cvmx_helper_get_interface_num(ipd_port);
mode = cvmx_helper_interface_get_mode(xiface);
sfp->ipd_port[0] = ipd_port;
switch (mode) {
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_XFI:
case CVMX_HELPER_INTERFACE_MODE_10G_KR:
for (i = 1; i < 4; i++)
sfp->ipd_port[i] = cvmx_helper_get_ipd_port(xiface, i);
break;
case CVMX_HELPER_INTERFACE_MODE_XLAUI:
case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
sfp->ipd_port[0] = ipd_port;
for (i = 1; i < 4; i++)
sfp->ipd_port[i] = -1;
break;
default:
debug("%s: Interface mode %s for interface 0x%x, ipd_port %d not supported for QSFP\n",
__func__, cvmx_helper_interface_mode_to_string(mode), xiface,
ipd_port);
return -1;
}
} else {
sfp->ipd_port[0] = ipd_port;
for (i = 1; i < 4; i++)
sfp->ipd_port[i] = -1;
}
return 0;
}
/**
* Parses all of the channels assigned to a VSC7224 device
*
* @param[in] fdt_addr Address of flat device tree
* @param of_offset Offset of vsc7224 node
* @param[in,out] vsc7224 Data structure to hold the data
*
* @return 0 for success, -1 on error
*/
static int cvmx_fdt_parse_vsc7224_channels(const void *fdt_addr, int of_offset,
struct cvmx_vsc7224 *vsc7224)
{
int parent_offset = of_offset;
int err = 0;
int reg;
int num_chan = 0;
struct cvmx_vsc7224_chan *channel;
struct cvmx_fdt_sfp_info *sfp_info;
int len;
int num_taps;
int i;
const u32 *tap_values;
int of_mac;
int xiface, index;
bool is_tx;
bool is_qsfp;
const char *mac_str;
debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, vsc7224->name);
do {
/* Walk through all channels */
of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
"vitesse,vsc7224-channel");
if (of_offset == -FDT_ERR_NOTFOUND) {
break;
} else if (of_offset < 0) {
debug("%s: Failed finding compatible channel\n",
__func__);
err = -1;
break;
}
if (fdt_parent_offset(fdt_addr, of_offset) != parent_offset)
break;
reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
if (reg < 0 || reg > 3) {
debug("%s: channel reg is either not present or out of range\n",
__func__);
err = -1;
break;
}
is_tx = cvmx_fdt_get_bool(fdt_addr, of_offset, "direction-tx");
debug("%s(%s): Adding %cx channel %d\n",
__func__, vsc7224->name, is_tx ? 't' : 'r',
reg);
tap_values = (const uint32_t *)fdt_getprop(fdt_addr, of_offset, "taps", &len);
if (!tap_values) {
debug("%s: Error: no taps defined for vsc7224 channel %d\n",
__func__, reg);
err = -1;
break;
}
if (vsc7224->channel[reg]) {
debug("%s: Error: channel %d already assigned at %p\n",
__func__, reg,
vsc7224->channel[reg]);
err = -1;
break;
}
if (len % 16) {
debug("%s: Error: tap format error for channel %d\n",
__func__, reg);
err = -1;
break;
}
num_taps = len / 16;
debug("%s: Adding %d taps\n", __func__, num_taps);
channel = __cvmx_fdt_alloc(sizeof(*channel) +
num_taps * sizeof(struct cvmx_vsc7224_tap));
if (!channel) {
debug("%s: Out of memory\n", __func__);
err = -1;
break;
}
vsc7224->channel[reg] = channel;
channel->num_taps = num_taps;
channel->lane = reg;
channel->of_offset = of_offset;
channel->is_tx = is_tx;
channel->pretap_disable = cvmx_fdt_get_bool(fdt_addr, of_offset, "pretap-disable");
channel->posttap_disable =
cvmx_fdt_get_bool(fdt_addr, of_offset, "posttap-disable");
channel->vsc7224 = vsc7224;
/* Read all the tap values */
for (i = 0; i < num_taps; i++) {
channel->taps[i].len = fdt32_to_cpu(tap_values[i * 4 + 0]);
channel->taps[i].main_tap = fdt32_to_cpu(tap_values[i * 4 + 1]);
channel->taps[i].pre_tap = fdt32_to_cpu(tap_values[i * 4 + 2]);
channel->taps[i].post_tap = fdt32_to_cpu(tap_values[i * 4 + 3]);
debug("%s: tap %d: len: %d, main_tap: 0x%x, pre_tap: 0x%x, post_tap: 0x%x\n",
__func__, i, channel->taps[i].len, channel->taps[i].main_tap,
channel->taps[i].pre_tap, channel->taps[i].post_tap);
}
/* Now find out which interface it's mapped to */
channel->ipd_port = -1;
mac_str = "sfp-mac";
if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
is_qsfp = false;
} else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
is_qsfp = true;
mac_str = "qsfp-mac";
} else {
debug("%s: Error: MAC not found for %s channel %d\n", __func__,
vsc7224->name, reg);
return -1;
}
of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
if (of_mac < 0) {
debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
mac_str, vsc7224->name);
return -1;
}
debug("%s: Found mac at offset %d\n", __func__, of_mac);
err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
if (!err) {
channel->xiface = xiface;
channel->index = index;
channel->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
xiface, index, channel->ipd_port);
if (channel->ipd_port >= 0) {
cvmx_helper_cfg_set_vsc7224_chan_info(xiface, index, channel);
debug("%s: Storing config channel for xiface 0x%x, index %d\n",
__func__, xiface, index);
}
sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
if (!sfp_info) {
debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d for channel %d\n",
__func__, xiface, index, channel->lane);
continue;
}
/* Link it */
channel->next = sfp_info->vsc7224_chan;
if (sfp_info->vsc7224_chan)
sfp_info->vsc7224_chan->prev = channel;
sfp_info->vsc7224_chan = channel;
sfp_info->is_vsc7224 = true;
debug("%s: Registering VSC7224 %s channel %d with SFP %s\n", __func__,
vsc7224->name, channel->lane, sfp_info->name);
if (!sfp_info->mod_abs_changed) {
debug("%s: Registering cvmx_sfp_vsc7224_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
__func__, &cvmx_sfp_vsc7224_mod_abs_changed, xiface, index);
cvmx_sfp_register_mod_abs_changed(
sfp_info,
&cvmx_sfp_vsc7224_mod_abs_changed,
NULL);
}
}
} while (!err && num_chan < 4);
return err;
}
/**
* @INTERNAL
* Parses all instances of the Vitesse VSC7224 reclocking chip
*
* @param[in] fdt_addr Address of flat device tree
*
* @return 0 for success, error otherwise
*/
int __cvmx_fdt_parse_vsc7224(const void *fdt_addr)
{
int of_offset = -1;
struct cvmx_vsc7224 *vsc7224 = NULL;
struct cvmx_fdt_gpio_info *gpio_info = NULL;
int err = 0;
int of_parent;
static bool parsed;
debug("%s(%p)\n", __func__, fdt_addr);
if (parsed) {
debug("%s: Already parsed\n", __func__);
return 0;
}
do {
of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
"vitesse,vsc7224");
debug("%s: of_offset: %d\n", __func__, of_offset);
if (of_offset == -FDT_ERR_NOTFOUND) {
break;
} else if (of_offset < 0) {
err = -1;
debug("%s: Error %d parsing FDT\n",
__func__, of_offset);
break;
}
vsc7224 = __cvmx_fdt_alloc(sizeof(*vsc7224));
if (!vsc7224) {
debug("%s: Out of memory!\n", __func__);
return -1;
}
vsc7224->of_offset = of_offset;
vsc7224->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
"reg", -1);
of_parent = fdt_parent_offset(fdt_addr, of_offset);
vsc7224->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
if (vsc7224->i2c_addr < 0) {
debug("%s: Error: reg field missing\n", __func__);
err = -1;
break;
}
if (!vsc7224->i2c_bus) {
debug("%s: Error getting i2c bus\n", __func__);
err = -1;
break;
}
vsc7224->name = fdt_get_name(fdt_addr, of_offset, NULL);
debug("%s: Adding %s\n", __func__, vsc7224->name);
if (fdt_getprop(fdt_addr, of_offset, "reset", NULL)) {
gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
vsc7224->reset_gpio = gpio_info;
}
if (fdt_getprop(fdt_addr, of_offset, "los", NULL)) {
gpio_info = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "los");
vsc7224->los_gpio = gpio_info;
}
debug("%s: Parsing channels\n", __func__);
err = cvmx_fdt_parse_vsc7224_channels(fdt_addr, of_offset, vsc7224);
if (err) {
debug("%s: Error parsing VSC7224 channels\n", __func__);
break;
}
} while (of_offset > 0);
if (err) {
debug("%s(): Error\n", __func__);
if (vsc7224) {
if (vsc7224->reset_gpio)
__cvmx_fdt_free(vsc7224->reset_gpio, sizeof(*vsc7224->reset_gpio));
if (vsc7224->los_gpio)
__cvmx_fdt_free(vsc7224->los_gpio, sizeof(*vsc7224->los_gpio));
if (vsc7224->i2c_bus)
cvmx_fdt_free_i2c_bus(vsc7224->i2c_bus);
__cvmx_fdt_free(vsc7224, sizeof(*vsc7224));
}
}
if (!err)
parsed = true;
return err;
}
/**
* @INTERNAL
* Parses all instances of the Avago AVSP5410 gearbox phy
*
* @param[in] fdt_addr Address of flat device tree
*
* @return 0 for success, error otherwise
*/
int __cvmx_fdt_parse_avsp5410(const void *fdt_addr)
{
int of_offset = -1;
struct cvmx_avsp5410 *avsp5410 = NULL;
struct cvmx_fdt_sfp_info *sfp_info;
int err = 0;
int of_parent;
static bool parsed;
int of_mac;
int xiface, index;
bool is_qsfp;
const char *mac_str;
debug("%s(%p)\n", __func__, fdt_addr);
if (parsed) {
debug("%s: Already parsed\n", __func__);
return 0;
}
do {
of_offset = fdt_node_offset_by_compatible(fdt_addr, of_offset,
"avago,avsp-5410");
debug("%s: of_offset: %d\n", __func__, of_offset);
if (of_offset == -FDT_ERR_NOTFOUND) {
break;
} else if (of_offset < 0) {
err = -1;
debug("%s: Error %d parsing FDT\n", __func__, of_offset);
break;
}
avsp5410 = __cvmx_fdt_alloc(sizeof(*avsp5410));
if (!avsp5410) {
debug("%s: Out of memory!\n", __func__);
return -1;
}
avsp5410->of_offset = of_offset;
avsp5410->i2c_addr = cvmx_fdt_get_int(fdt_addr, of_offset,
"reg", -1);
of_parent = fdt_parent_offset(fdt_addr, of_offset);
avsp5410->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, of_parent);
if (avsp5410->i2c_addr < 0) {
debug("%s: Error: reg field missing\n", __func__);
err = -1;
break;
}
if (!avsp5410->i2c_bus) {
debug("%s: Error getting i2c bus\n", __func__);
err = -1;
break;
}
avsp5410->name = fdt_get_name(fdt_addr, of_offset, NULL);
debug("%s: Adding %s\n", __func__, avsp5410->name);
/* Now find out which interface it's mapped to */
avsp5410->ipd_port = -1;
mac_str = "sfp-mac";
if (fdt_getprop(fdt_addr, of_offset, mac_str, NULL)) {
is_qsfp = false;
} else if (fdt_getprop(fdt_addr, of_offset, "qsfp-mac", NULL)) {
is_qsfp = true;
mac_str = "qsfp-mac";
} else {
debug("%s: Error: MAC not found for %s\n", __func__, avsp5410->name);
return -1;
}
of_mac = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, mac_str);
if (of_mac < 0) {
debug("%s: Error %d with MAC %s phandle for %s\n", __func__, of_mac,
mac_str, avsp5410->name);
return -1;
}
debug("%s: Found mac at offset %d\n", __func__, of_mac);
err = cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(of_mac, &xiface, &index);
if (!err) {
avsp5410->xiface = xiface;
avsp5410->index = index;
avsp5410->ipd_port = cvmx_helper_get_ipd_port(xiface, index);
debug("%s: Found MAC, xiface: 0x%x, index: %d, ipd port: %d\n", __func__,
xiface, index, avsp5410->ipd_port);
if (avsp5410->ipd_port >= 0) {
cvmx_helper_cfg_set_avsp5410_info(xiface, index, avsp5410);
debug("%s: Storing config phy for xiface 0x%x, index %d\n",
__func__, xiface, index);
}
sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
if (!sfp_info) {
debug("%s: Warning: no (Q)SFP+ slot found for xinterface 0x%x, index %d\n",
__func__, xiface, index);
continue;
}
sfp_info->is_avsp5410 = true;
sfp_info->avsp5410 = avsp5410;
debug("%s: Registering AVSP5410 %s with SFP %s\n", __func__, avsp5410->name,
sfp_info->name);
if (!sfp_info->mod_abs_changed) {
debug("%s: Registering cvmx_sfp_avsp5410_mod_abs_changed at %p for xinterface 0x%x, index %d\n",
__func__, &cvmx_sfp_avsp5410_mod_abs_changed, xiface, index);
cvmx_sfp_register_mod_abs_changed(
sfp_info,
&cvmx_sfp_avsp5410_mod_abs_changed,
NULL);
}
}
} while (of_offset > 0);
if (err) {
debug("%s(): Error\n", __func__);
if (avsp5410) {
if (avsp5410->i2c_bus)
cvmx_fdt_free_i2c_bus(avsp5410->i2c_bus);
__cvmx_fdt_free(avsp5410, sizeof(*avsp5410));
}
}
if (!err)
parsed = true;
return err;
}
/**
* Parse QSFP GPIOs for SFP
*
* @param[in] fdt_addr Pointer to flat device tree
* @param of_offset Offset of QSFP node
* @param[out] sfp_info Pointer to sfp info to fill in
*
* @return 0 for success
*/
static int cvmx_parse_qsfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
{
sfp_info->select = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "select");
sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_prs");
sfp_info->reset = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "reset");
sfp_info->interrupt = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "interrupt");
sfp_info->lp_mode = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "lp_mode");
return 0;
}
/**
* Parse SFP GPIOs for SFP
*
* @param[in] fdt_addr Pointer to flat device tree
* @param of_offset Offset of SFP node
* @param[out] sfp_info Pointer to sfp info to fill in
*
* @return 0 for success
*/
static int cvmx_parse_sfp(const void *fdt_addr, int of_offset, struct cvmx_fdt_sfp_info *sfp_info)
{
sfp_info->mod_abs = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "mod_abs");
sfp_info->rx_los = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "rx_los");
sfp_info->tx_disable = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_disable");
sfp_info->tx_error = cvmx_fdt_gpio_get_info_phandle(fdt_addr, of_offset, "tx_error");
return 0;
}
/**
* Parse SFP/QSFP EEPROM and diag
*
* @param[in] fdt_addr Pointer to flat device tree
* @param of_offset Offset of SFP node
* @param[out] sfp_info Pointer to sfp info to fill in
*
* @return 0 for success, -1 on error
*/
static int cvmx_parse_sfp_eeprom(const void *fdt_addr, int of_offset,
struct cvmx_fdt_sfp_info *sfp_info)
{
int of_eeprom;
int of_diag;
debug("%s(%p, %d, %s)\n", __func__, fdt_addr, of_offset, sfp_info->name);
of_eeprom = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "eeprom");
if (of_eeprom < 0) {
debug("%s: Missing \"eeprom\" from device tree for %s\n", __func__, sfp_info->name);
return -1;
}
sfp_info->i2c_bus = cvmx_fdt_get_i2c_bus(fdt_addr, fdt_parent_offset(fdt_addr, of_eeprom));
sfp_info->i2c_eeprom_addr = cvmx_fdt_get_int(fdt_addr, of_eeprom, "reg", 0x50);
debug("%s(%p, %d, %s, %d)\n", __func__, fdt_addr, of_offset, sfp_info->name,
sfp_info->i2c_eeprom_addr);
if (!sfp_info->i2c_bus) {
debug("%s: Error: could not determine i2c bus for eeprom for %s\n", __func__,
sfp_info->name);
return -1;
}
of_diag = cvmx_fdt_lookup_phandle(fdt_addr, of_offset, "diag");
if (of_diag >= 0)
sfp_info->i2c_diag_addr = cvmx_fdt_get_int(fdt_addr, of_diag, "reg", 0x51);
else
sfp_info->i2c_diag_addr = 0x51;
return 0;
}
/**
* Parse SFP information from device tree
*
* @param[in] fdt_addr Address of flat device tree
*
* @return pointer to sfp info or NULL if error
*/
struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset)
{
struct cvmx_fdt_sfp_info *sfp_info = NULL;
int err = -1;
bool is_qsfp;
if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,sfp-slot")) {
is_qsfp = false;
} else if (!fdt_node_check_compatible(fdt_addr, of_offset, "ethernet,qsfp-slot")) {
is_qsfp = true;
} else {
debug("%s: Error: incompatible sfp/qsfp slot, compatible=%s\n", __func__,
(char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
goto error_exit;
}
debug("%s: %ssfp module found at offset %d\n", __func__, is_qsfp ? "q" : "", of_offset);
sfp_info = __cvmx_fdt_alloc(sizeof(*sfp_info));
if (!sfp_info) {
debug("%s: Error: out of memory\n", __func__);
goto error_exit;
}
sfp_info->name = fdt_get_name(fdt_addr, of_offset, NULL);
sfp_info->of_offset = of_offset;
sfp_info->is_qsfp = is_qsfp;
sfp_info->last_mod_abs = -1;
sfp_info->last_rx_los = -1;
if (is_qsfp)
err = cvmx_parse_qsfp(fdt_addr, of_offset, sfp_info);
else
err = cvmx_parse_sfp(fdt_addr, of_offset, sfp_info);
if (err) {
debug("%s: Error in %s parsing %ssfp GPIO info\n", __func__, sfp_info->name,
is_qsfp ? "q" : "");
goto error_exit;
}
debug("%s: Parsing %ssfp module eeprom\n", __func__, is_qsfp ? "q" : "");
err = cvmx_parse_sfp_eeprom(fdt_addr, of_offset, sfp_info);
if (err) {
debug("%s: Error parsing eeprom info for %s\n", __func__, sfp_info->name);
goto error_exit;
}
/* Register default check for mod_abs changed */
if (!err)
cvmx_sfp_register_check_mod_abs(sfp_info, cvmx_sfp_check_mod_abs, NULL);
error_exit:
/* Note: we don't free any data structures on error since it gets
* rather complicated with i2c buses and whatnot.
*/
return err ? NULL : sfp_info;
}
/**
* @INTERNAL
* Parse a slice of the Inphi/Cortina CS4343 in the device tree
*
* @param[in] fdt_addr Address of flat device tree
* @param of_offset fdt offset of slice
* @param phy_info phy_info data structure
*
* @return slice number if non-negative, otherwise error
*/
static int cvmx_fdt_parse_cs4343_slice(const void *fdt_addr, int of_offset,
struct cvmx_phy_info *phy_info)
{
struct cvmx_cs4343_slice_info *slice;
int reg;
int reg_offset;
reg = cvmx_fdt_get_int(fdt_addr, of_offset, "reg", -1);
reg_offset = cvmx_fdt_get_int(fdt_addr, of_offset, "slice_offset", -1);
if (reg < 0 || reg >= 4) {
debug("%s(%p, %d, %p): Error: reg %d undefined or out of range\n", __func__,
fdt_addr, of_offset, phy_info, reg);
return -1;
}
if (reg_offset % 0x1000 || reg_offset > 0x3000 || reg_offset < 0) {
debug("%s(%p, %d, %p): Error: reg_offset 0x%x undefined or out of range\n",
__func__, fdt_addr, of_offset, phy_info, reg_offset);
return -1;
}
if (!phy_info->cs4343_info) {
debug("%s: Error: phy info cs4343 datastructure is NULL\n", __func__);
return -1;
}
debug("%s(%p, %d, %p): %s, reg: %d, slice offset: 0x%x\n", __func__, fdt_addr, of_offset,
phy_info, fdt_get_name(fdt_addr, of_offset, NULL), reg, reg_offset);
slice = &phy_info->cs4343_info->slice[reg];
slice->name = fdt_get_name(fdt_addr, of_offset, NULL);
slice->mphy = phy_info->cs4343_info;
slice->phy_info = phy_info;
slice->of_offset = of_offset;
slice->slice_no = reg;
slice->reg_offset = reg_offset;
/* SR settings */
slice->sr_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-cmode-res", 3);
slice->sr_stx_drv_lower_cm =
cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-drv-lower-cm", 8);
slice->sr_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-level", 0x1c);
slice->sr_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-pre-peak", 1);
slice->sr_stx_muxsubrate_sel =
cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-muxsubrate-sel", 0);
slice->sr_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "sr-stx-post-peak", 8);
/* CX settings */
slice->cx_stx_cmode_res = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-cmode-res", 3);
slice->cx_stx_drv_lower_cm =
cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-drv-lower-cm", 8);
slice->cx_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-level", 0x1c);
slice->cx_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-pre-peak", 1);
slice->cx_stx_muxsubrate_sel =
cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-muxsubrate-sel", 0);
slice->cx_stx_post_peak = cvmx_fdt_get_int(fdt_addr, of_offset, "cx-stx-post-peak", 0xC);
/* 1000Base-X settings */
/* CX settings */
slice->basex_stx_cmode_res =
cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-cmode-res", 3);
slice->basex_stx_drv_lower_cm =
cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-drv-lower-cm", 8);
slice->basex_stx_level = cvmx_fdt_get_int(fdt_addr, of_offset,
"basex-stx-level", 0x1c);
slice->basex_stx_pre_peak = cvmx_fdt_get_int(fdt_addr, of_offset,
"basex-stx-pre-peak", 1);
slice->basex_stx_muxsubrate_sel =
cvmx_fdt_get_int(fdt_addr, of_offset,
"basex-stx-muxsubrate-sel", 0);
slice->basex_stx_post_peak =
cvmx_fdt_get_int(fdt_addr, of_offset, "basex-stx-post-peak", 8);
/* Get the link LED gpio pin */
slice->link_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
"link-led-gpio", -1);
slice->error_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
"error-led-gpio", -1);
slice->los_gpio = cvmx_fdt_get_int(fdt_addr, of_offset,
"los-input-gpio", -1);
slice->link_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
"link-led-gpio-inverted");
slice->error_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
"error-led-gpio-inverted");
slice->los_inverted = cvmx_fdt_get_bool(fdt_addr, of_offset,
"los-input-gpio-inverted");
/* Convert GPIOs to be die based if they're not already */
if (slice->link_gpio > 4 && slice->link_gpio <= 8)
slice->link_gpio -= 4;
if (slice->error_gpio > 4 && slice->error_gpio <= 8)
slice->error_gpio -= 4;
if (slice->los_gpio > 4 && slice->los_gpio <= 8)
slice->los_gpio -= 4;
return reg;
}
/**
* @INTERNAL
* Parses either a CS4343 phy or a slice of the phy from the device tree
* @param[in] fdt_addr Address of FDT
* @param of_offset offset of slice or phy in device tree
* @param phy_info phy_info data structure to fill in
*
* @return 0 for success, -1 on error
*/
int cvmx_fdt_parse_cs4343(const void *fdt_addr, int of_offset, struct cvmx_phy_info *phy_info)
{
int of_slice = -1;
struct cvmx_cs4343_info *cs4343;
int err = -1;
int reg;
debug("%s(%p, %d, %p): %s (%s)\n", __func__,
fdt_addr, of_offset, phy_info,
fdt_get_name(fdt_addr, of_offset, NULL),
(const char *)fdt_getprop(fdt_addr, of_offset, "compatible", NULL));
if (!phy_info->cs4343_info)
phy_info->cs4343_info = __cvmx_fdt_alloc(sizeof(struct cvmx_cs4343_info));
if (!phy_info->cs4343_info) {
debug("%s: Error: out of memory!\n", __func__);
return -1;
}
cs4343 = phy_info->cs4343_info;
/* If we're passed to a slice then process only that slice */
if (!fdt_node_check_compatible(fdt_addr, of_offset, "cortina,cs4343-slice")) {
err = 0;
of_slice = of_offset;
of_offset = fdt_parent_offset(fdt_addr, of_offset);
reg = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice, phy_info);
if (reg >= 0)
phy_info->cs4343_slice_info = &cs4343->slice[reg];
else
err = reg;
} else if (!fdt_node_check_compatible(fdt_addr, of_offset,
"cortina,cs4343")) {
/* Walk through and process all of the slices */
of_slice =
fdt_node_offset_by_compatible(fdt_addr, of_offset, "cortina,cs4343-slice");
while (of_slice > 0 && fdt_parent_offset(fdt_addr, of_slice) ==
of_offset) {
debug("%s: Parsing slice %s\n", __func__,
fdt_get_name(fdt_addr, of_slice, NULL));
err = cvmx_fdt_parse_cs4343_slice(fdt_addr, of_slice,
phy_info);
if (err < 0)
break;
of_slice = fdt_node_offset_by_compatible(fdt_addr,
of_slice,
"cortina,cs4343-slice");
}
} else {
debug("%s: Error: unknown compatible string %s for %s\n", __func__,
(const char *)fdt_getprop(fdt_addr, of_offset,
"compatible", NULL),
fdt_get_name(fdt_addr, of_offset, NULL));
}
if (err >= 0) {
cs4343->name = fdt_get_name(fdt_addr, of_offset, NULL);
cs4343->phy_info = phy_info;
cs4343->of_offset = of_offset;
}
return err < 0 ? -1 : 0;
}

View File

@ -0,0 +1,172 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper utilities for qlm_jtag.
*/
#include <log.h>
#include <asm/global_data.h>
#include <linux/delay.h>
#include <mach/cvmx-regs.h>
#include <mach/octeon-model.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-feature.h>
#include <mach/cvmx-qlm.h>
#include <mach/octeon_qlm.h>
#include <mach/cvmx-pcie.h>
#include <mach/cvmx-ciu-defs.h>
DECLARE_GLOBAL_DATA_PTR;
/**
* Initialize the internal QLM JTAG logic to allow programming
* of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
* These functions should only be used at the direction of Cavium
* Networks. Programming incorrect values into the JTAG chain
* can cause chip damage.
*/
void cvmx_helper_qlm_jtag_init(void)
{
union cvmx_ciu_qlm_jtgc jtgc;
int clock_div = 0;
int divisor;
divisor = gd->bus_clk / (1000000 * (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 10 : 25));
divisor = (divisor - 1) >> 2;
/* Convert the divisor into a power of 2 shift */
while (divisor) {
clock_div++;
divisor >>= 1;
}
/*
* Clock divider for QLM JTAG operations. sclk is divided by
* 2^(CLK_DIV + 2)
*/
jtgc.u64 = 0;
jtgc.s.clk_div = clock_div;
jtgc.s.mux_sel = 0;
if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
jtgc.s.bypass = 0x7;
else
jtgc.s.bypass = 0xf;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
jtgc.s.bypass_ext = 1;
csr_wr(CVMX_CIU_QLM_JTGC, jtgc.u64);
csr_rd(CVMX_CIU_QLM_JTGC);
}
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain for
* CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
* for CN63XX is 4 * 300 bits long, or 1200.
*
* @param qlm QLM to shift value into
* @param bits Number of bits to shift in (1-32).
* @param data Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* @return The low order bits of the JTAG chain that shifted out of the
* circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
union cvmx_ciu_qlm_jtgc jtgc;
union cvmx_ciu_qlm_jtgd jtgd;
jtgc.u64 = csr_rd(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
csr_wr(CVMX_CIU_QLM_JTGC, jtgc.u64);
csr_rd(CVMX_CIU_QLM_JTGC);
jtgd.u64 = 0;
jtgd.s.shift = 1;
jtgd.s.shft_cnt = bits - 1;
jtgd.s.shft_reg = data;
jtgd.s.select = 1 << qlm;
csr_wr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do {
jtgd.u64 = csr_rd(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.shift);
return jtgd.s.shft_reg >> (32 - bits);
}
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @param qlm QLM to shift zeros into
* @param bits
*/
void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
{
while (bits > 0) {
int n = bits;
if (n > 32)
n = 32;
cvmx_helper_qlm_jtag_shift(qlm, n, 0);
bits -= n;
}
}
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in the proper number of bits into the
* JTAG chain. Updating invalid values can possibly cause chip damage.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_update(int qlm)
{
union cvmx_ciu_qlm_jtgc jtgc;
union cvmx_ciu_qlm_jtgd jtgd;
jtgc.u64 = csr_rd(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
csr_wr(CVMX_CIU_QLM_JTGC, jtgc.u64);
csr_rd(CVMX_CIU_QLM_JTGC);
/* Update the new data */
jtgd.u64 = 0;
jtgd.s.update = 1;
jtgd.s.select = 1 << qlm;
csr_wr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do {
jtgd.u64 = csr_rd(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.update);
}
/**
* Load the QLM JTAG chain with data from all lanes of the QLM.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_capture(int qlm)
{
union cvmx_ciu_qlm_jtgc jtgc;
union cvmx_ciu_qlm_jtgd jtgd;
jtgc.u64 = csr_rd(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
csr_wr(CVMX_CIU_QLM_JTGC, jtgc.u64);
csr_rd(CVMX_CIU_QLM_JTGC);
jtgd.u64 = 0;
jtgd.s.capture = 1;
jtgd.s.select = 1 << qlm;
csr_wr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do {
jtgd.u64 = csr_rd(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.capture);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Typedefs and defines for working with Octeon physical addresses.
*/
#ifndef __CVMX_ADDRESS_H__
#define __CVMX_ADDRESS_H__
typedef enum {
CVMX_MIPS_SPACE_XKSEG = 3LL,
CVMX_MIPS_SPACE_XKPHYS = 2LL,
CVMX_MIPS_SPACE_XSSEG = 1LL,
CVMX_MIPS_SPACE_XUSEG = 0LL
} cvmx_mips_space_t;
typedef enum {
CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
} cvmx_mips_xkseg_space_t;
/* decodes <14:13> of a kseg3 window address */
typedef enum {
CVMX_ADD_WIN_SCR = 0L,
CVMX_ADD_WIN_DMA = 1L,
CVMX_ADD_WIN_UNUSED = 2L,
CVMX_ADD_WIN_UNUSED2 = 3L
} cvmx_add_win_dec_t;
/* decode within DMA space */
typedef enum {
CVMX_ADD_WIN_DMA_ADD = 0L,
CVMX_ADD_WIN_DMA_SENDMEM = 1L,
/* store data must be normal DRAM memory space address in this case */
CVMX_ADD_WIN_DMA_SENDDMA = 2L,
/* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
CVMX_ADD_WIN_DMA_SENDIO = 3L,
/* store data must be normal IO space address in this case */
CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
/* no write buffer data needed/used */
} cvmx_add_win_dma_dec_t;
/**
* Physical Address Decode
*
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
* - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
* - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
* - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
* - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
* - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
* - 0x1 01X0 0000 0000 to Other NCB Uncached
* - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
typedef union {
u64 u64;
struct {
cvmx_mips_space_t R : 2;
u64 offset : 62;
} sva;
struct {
u64 zeroes : 33;
u64 offset : 31;
} suseg;
struct {
u64 ones : 33;
cvmx_mips_xkseg_space_t sp : 2;
u64 offset : 29;
} sxkseg;
struct {
cvmx_mips_space_t R : 2;
u64 cca : 3;
u64 mbz : 10;
u64 pa : 49;
} sxkphys;
struct {
u64 mbz : 15;
u64 is_io : 1;
u64 did : 8;
u64 unaddr : 4;
u64 offset : 36;
} sphys;
struct {
u64 zeroes : 24;
u64 unaddr : 4;
u64 offset : 36;
} smem;
struct {
u64 mem_region : 2;
u64 mbz : 13;
u64 is_io : 1;
u64 did : 8;
u64 unaddr : 4;
u64 offset : 36;
} sio;
struct {
u64 ones : 49;
cvmx_add_win_dec_t csrdec : 2;
u64 addr : 13;
} sscr;
/* there should only be stores to IOBDMA space, no loads */
struct {
u64 ones : 49;
cvmx_add_win_dec_t csrdec : 2;
u64 unused2 : 3;
cvmx_add_win_dma_dec_t type : 3;
u64 addr : 7;
} sdma;
struct {
u64 didspace : 24;
u64 unused : 40;
} sfilldidspace;
} cvmx_addr_t;
/* These macros for used by 32 bit applications */
#define CVMX_MIPS32_SPACE_KSEG0 1l
#define CVMX_ADD_SEG32(segment, add) (((s32)segment << 31) | (s32)(add))
/*
* Currently all IOs are performed using XKPHYS addressing. Linux uses the
* CvmMemCtl register to enable XKPHYS addressing to IO space from user mode.
* Future OSes may need to change the upper bits of IO addresses. The
* following define controls the upper two bits for all IO addresses generated
* by the simple executive library
*/
#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
/* These macros simplify the process of creating common IO addresses */
#define CVMX_ADD_SEG(segment, add) ((((u64)segment) << 62) | (add))
#define CVMX_ADD_IO_SEG(add) (add)
#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
/* from include/ncb_rsl_id.v */
#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
#define CVMX_OCT_DID_GMX0 1ULL
#define CVMX_OCT_DID_GMX1 2ULL
#define CVMX_OCT_DID_PCI 3ULL
#define CVMX_OCT_DID_KEY 4ULL
#define CVMX_OCT_DID_FPA 5ULL
#define CVMX_OCT_DID_DFA 6ULL
#define CVMX_OCT_DID_ZIP 7ULL
#define CVMX_OCT_DID_RNG 8ULL
#define CVMX_OCT_DID_IPD 9ULL
#define CVMX_OCT_DID_PKT 10ULL
#define CVMX_OCT_DID_TIM 11ULL
#define CVMX_OCT_DID_TAG 12ULL
/* the rest are not on the IO bus */
#define CVMX_OCT_DID_L2C 16ULL
#define CVMX_OCT_DID_LMC 17ULL
#define CVMX_OCT_DID_SPX0 18ULL
#define CVMX_OCT_DID_SPX1 19ULL
#define CVMX_OCT_DID_PIP 20ULL
#define CVMX_OCT_DID_ASX0 22ULL
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
#define CVMX_OCT_DID_TAG_TAG5 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 5ULL)
#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
/* Cast to unsigned long long, mainly for use in printfs. */
#define CAST_ULL(v) ((unsigned long long)(v))
#define UNMAPPED_PTR(x) ((1ULL << 63) | (x))
#endif /* __CVMX_ADDRESS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,709 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon asxx.
*/
#ifndef __CVMX_ASXX_DEFS_H__
#define __CVMX_ASXX_DEFS_H__
#define CVMX_ASXX_GMII_RX_CLK_SET(offset) (0x00011800B0000180ull)
#define CVMX_ASXX_GMII_RX_DAT_SET(offset) (0x00011800B0000188ull)
#define CVMX_ASXX_INT_EN(offset) (0x00011800B0000018ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_INT_REG(offset) (0x00011800B0000010ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_MII_RX_DAT_SET(offset) (0x00011800B0000190ull)
#define CVMX_ASXX_PRT_LOOP(offset) (0x00011800B0000040ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_BYPASS(offset) (0x00011800B0000248ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_BYPASS_SETTING(offset) (0x00011800B0000250ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_COMP(offset) (0x00011800B0000220ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_DATA_DRV(offset) (0x00011800B0000218ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_FCRAM_MODE(offset) (0x00011800B0000210ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_NCTL_STRONG(offset) (0x00011800B0000230ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_NCTL_WEAK(offset) (0x00011800B0000240ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_PCTL_STRONG(offset) (0x00011800B0000228ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_PCTL_WEAK(offset) (0x00011800B0000238ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_SETTING(offset) (0x00011800B0000258ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \
(0x00011800B0000020ull + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_RX_PRT_EN(offset) (0x00011800B0000000ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL(offset) (0x00011800B0000100ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_MSK(offset) (0x00011800B0000108ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_POWOK(offset) (0x00011800B0000118ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_SIG(offset) (0x00011800B0000110ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \
(0x00011800B0000048ull + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_COMP_BYP(offset) (0x00011800B0000068ull + ((offset) & 1) * 0x8000000ull)
#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \
(0x00011800B0000080ull + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_PRT_EN(offset) (0x00011800B0000008ull + ((offset) & 1) * 0x8000000ull)
/**
* cvmx_asx#_gmii_rx_clk_set
*
* ASX_GMII_RX_CLK_SET = GMII Clock delay setting
*
*/
union cvmx_asxx_gmii_rx_clk_set {
u64 u64;
struct cvmx_asxx_gmii_rx_clk_set_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
};
typedef union cvmx_asxx_gmii_rx_clk_set cvmx_asxx_gmii_rx_clk_set_t;
/**
* cvmx_asx#_gmii_rx_dat_set
*
* ASX_GMII_RX_DAT_SET = GMII Clock delay setting
*
*/
union cvmx_asxx_gmii_rx_dat_set {
u64 u64;
struct cvmx_asxx_gmii_rx_dat_set_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
};
typedef union cvmx_asxx_gmii_rx_dat_set cvmx_asxx_gmii_rx_dat_set_t;
/**
* cvmx_asx#_int_en
*
* ASX_INT_EN = Interrupt Enable
*
*/
union cvmx_asxx_int_en {
u64 u64;
struct cvmx_asxx_int_en_s {
u64 reserved_12_63 : 52;
u64 txpsh : 4;
u64 txpop : 4;
u64 ovrflw : 4;
} s;
struct cvmx_asxx_int_en_cn30xx {
u64 reserved_11_63 : 53;
u64 txpsh : 3;
u64 reserved_7_7 : 1;
u64 txpop : 3;
u64 reserved_3_3 : 1;
u64 ovrflw : 3;
} cn30xx;
struct cvmx_asxx_int_en_cn30xx cn31xx;
struct cvmx_asxx_int_en_s cn38xx;
struct cvmx_asxx_int_en_s cn38xxp2;
struct cvmx_asxx_int_en_cn30xx cn50xx;
struct cvmx_asxx_int_en_s cn58xx;
struct cvmx_asxx_int_en_s cn58xxp1;
};
typedef union cvmx_asxx_int_en cvmx_asxx_int_en_t;
/**
* cvmx_asx#_int_reg
*
* ASX_INT_REG = Interrupt Register
*
*/
union cvmx_asxx_int_reg {
u64 u64;
struct cvmx_asxx_int_reg_s {
u64 reserved_12_63 : 52;
u64 txpsh : 4;
u64 txpop : 4;
u64 ovrflw : 4;
} s;
struct cvmx_asxx_int_reg_cn30xx {
u64 reserved_11_63 : 53;
u64 txpsh : 3;
u64 reserved_7_7 : 1;
u64 txpop : 3;
u64 reserved_3_3 : 1;
u64 ovrflw : 3;
} cn30xx;
struct cvmx_asxx_int_reg_cn30xx cn31xx;
struct cvmx_asxx_int_reg_s cn38xx;
struct cvmx_asxx_int_reg_s cn38xxp2;
struct cvmx_asxx_int_reg_cn30xx cn50xx;
struct cvmx_asxx_int_reg_s cn58xx;
struct cvmx_asxx_int_reg_s cn58xxp1;
};
typedef union cvmx_asxx_int_reg cvmx_asxx_int_reg_t;
/**
* cvmx_asx#_mii_rx_dat_set
*
* ASX_MII_RX_DAT_SET = GMII Clock delay setting
*
*/
union cvmx_asxx_mii_rx_dat_set {
u64 u64;
struct cvmx_asxx_mii_rx_dat_set_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
};
typedef union cvmx_asxx_mii_rx_dat_set cvmx_asxx_mii_rx_dat_set_t;
/**
* cvmx_asx#_prt_loop
*
* ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins)
*
*/
union cvmx_asxx_prt_loop {
u64 u64;
struct cvmx_asxx_prt_loop_s {
u64 reserved_8_63 : 56;
u64 ext_loop : 4;
u64 int_loop : 4;
} s;
struct cvmx_asxx_prt_loop_cn30xx {
u64 reserved_7_63 : 57;
u64 ext_loop : 3;
u64 reserved_3_3 : 1;
u64 int_loop : 3;
} cn30xx;
struct cvmx_asxx_prt_loop_cn30xx cn31xx;
struct cvmx_asxx_prt_loop_s cn38xx;
struct cvmx_asxx_prt_loop_s cn38xxp2;
struct cvmx_asxx_prt_loop_cn30xx cn50xx;
struct cvmx_asxx_prt_loop_s cn58xx;
struct cvmx_asxx_prt_loop_s cn58xxp1;
};
typedef union cvmx_asxx_prt_loop cvmx_asxx_prt_loop_t;
/**
* cvmx_asx#_rld_bypass
*
* ASX_RLD_BYPASS
*
*/
union cvmx_asxx_rld_bypass {
u64 u64;
struct cvmx_asxx_rld_bypass_s {
u64 reserved_1_63 : 63;
u64 bypass : 1;
} s;
struct cvmx_asxx_rld_bypass_s cn38xx;
struct cvmx_asxx_rld_bypass_s cn38xxp2;
struct cvmx_asxx_rld_bypass_s cn58xx;
struct cvmx_asxx_rld_bypass_s cn58xxp1;
};
typedef union cvmx_asxx_rld_bypass cvmx_asxx_rld_bypass_t;
/**
* cvmx_asx#_rld_bypass_setting
*
* ASX_RLD_BYPASS_SETTING
*
*/
union cvmx_asxx_rld_bypass_setting {
u64 u64;
struct cvmx_asxx_rld_bypass_setting_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_rld_bypass_setting_s cn38xx;
struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
struct cvmx_asxx_rld_bypass_setting_s cn58xx;
struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
};
typedef union cvmx_asxx_rld_bypass_setting cvmx_asxx_rld_bypass_setting_t;
/**
* cvmx_asx#_rld_comp
*
* ASX_RLD_COMP
*
*/
union cvmx_asxx_rld_comp {
u64 u64;
struct cvmx_asxx_rld_comp_s {
u64 reserved_9_63 : 55;
u64 pctl : 5;
u64 nctl : 4;
} s;
struct cvmx_asxx_rld_comp_cn38xx {
u64 reserved_8_63 : 56;
u64 pctl : 4;
u64 nctl : 4;
} cn38xx;
struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
struct cvmx_asxx_rld_comp_s cn58xx;
struct cvmx_asxx_rld_comp_s cn58xxp1;
};
typedef union cvmx_asxx_rld_comp cvmx_asxx_rld_comp_t;
/**
* cvmx_asx#_rld_data_drv
*
* ASX_RLD_DATA_DRV
*
*/
union cvmx_asxx_rld_data_drv {
u64 u64;
struct cvmx_asxx_rld_data_drv_s {
u64 reserved_8_63 : 56;
u64 pctl : 4;
u64 nctl : 4;
} s;
struct cvmx_asxx_rld_data_drv_s cn38xx;
struct cvmx_asxx_rld_data_drv_s cn38xxp2;
struct cvmx_asxx_rld_data_drv_s cn58xx;
struct cvmx_asxx_rld_data_drv_s cn58xxp1;
};
typedef union cvmx_asxx_rld_data_drv cvmx_asxx_rld_data_drv_t;
/**
* cvmx_asx#_rld_fcram_mode
*
* ASX_RLD_FCRAM_MODE
*
*/
union cvmx_asxx_rld_fcram_mode {
u64 u64;
struct cvmx_asxx_rld_fcram_mode_s {
u64 reserved_1_63 : 63;
u64 mode : 1;
} s;
struct cvmx_asxx_rld_fcram_mode_s cn38xx;
struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
};
typedef union cvmx_asxx_rld_fcram_mode cvmx_asxx_rld_fcram_mode_t;
/**
* cvmx_asx#_rld_nctl_strong
*
* ASX_RLD_NCTL_STRONG
*
*/
union cvmx_asxx_rld_nctl_strong {
u64 u64;
struct cvmx_asxx_rld_nctl_strong_s {
u64 reserved_5_63 : 59;
u64 nctl : 5;
} s;
struct cvmx_asxx_rld_nctl_strong_s cn38xx;
struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
struct cvmx_asxx_rld_nctl_strong_s cn58xx;
struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
};
typedef union cvmx_asxx_rld_nctl_strong cvmx_asxx_rld_nctl_strong_t;
/**
* cvmx_asx#_rld_nctl_weak
*
* ASX_RLD_NCTL_WEAK
*
*/
union cvmx_asxx_rld_nctl_weak {
u64 u64;
struct cvmx_asxx_rld_nctl_weak_s {
u64 reserved_5_63 : 59;
u64 nctl : 5;
} s;
struct cvmx_asxx_rld_nctl_weak_s cn38xx;
struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
struct cvmx_asxx_rld_nctl_weak_s cn58xx;
struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
};
typedef union cvmx_asxx_rld_nctl_weak cvmx_asxx_rld_nctl_weak_t;
/**
* cvmx_asx#_rld_pctl_strong
*
* ASX_RLD_PCTL_STRONG
*
*/
union cvmx_asxx_rld_pctl_strong {
u64 u64;
struct cvmx_asxx_rld_pctl_strong_s {
u64 reserved_5_63 : 59;
u64 pctl : 5;
} s;
struct cvmx_asxx_rld_pctl_strong_s cn38xx;
struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
struct cvmx_asxx_rld_pctl_strong_s cn58xx;
struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
};
typedef union cvmx_asxx_rld_pctl_strong cvmx_asxx_rld_pctl_strong_t;
/**
* cvmx_asx#_rld_pctl_weak
*
* ASX_RLD_PCTL_WEAK
*
*/
union cvmx_asxx_rld_pctl_weak {
u64 u64;
struct cvmx_asxx_rld_pctl_weak_s {
u64 reserved_5_63 : 59;
u64 pctl : 5;
} s;
struct cvmx_asxx_rld_pctl_weak_s cn38xx;
struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
struct cvmx_asxx_rld_pctl_weak_s cn58xx;
struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
};
typedef union cvmx_asxx_rld_pctl_weak cvmx_asxx_rld_pctl_weak_t;
/**
* cvmx_asx#_rld_setting
*
* ASX_RLD_SETTING
*
*/
union cvmx_asxx_rld_setting {
u64 u64;
struct cvmx_asxx_rld_setting_s {
u64 reserved_13_63 : 51;
u64 dfaset : 5;
u64 dfalag : 1;
u64 dfalead : 1;
u64 dfalock : 1;
u64 setting : 5;
} s;
struct cvmx_asxx_rld_setting_cn38xx {
u64 reserved_5_63 : 59;
u64 setting : 5;
} cn38xx;
struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
struct cvmx_asxx_rld_setting_s cn58xx;
struct cvmx_asxx_rld_setting_s cn58xxp1;
};
typedef union cvmx_asxx_rld_setting cvmx_asxx_rld_setting_t;
/**
* cvmx_asx#_rx_clk_set#
*
* ASX_RX_CLK_SET = RGMII Clock delay setting
*
*
* Notes:
* Setting to place on the open-loop RXC (RGMII receive clk)
* delay line, which can delay the received clock. This
* can be used if the board and/or transmitting device
* has not otherwise delayed the clock.
*
* A value of SETTING=0 disables the delay line. The delay
* line should be disabled unless the transmitter or board
* does not delay the clock.
*
* Note that this delay line provides only a coarse control
* over the delay. Generally, it can only reliably provide
* a delay in the range 1.25-2.5ns, which may not be adequate
* for some system applications.
*
* The open loop delay line selects
* from among a series of tap positions. Each incremental
* tap position adds a delay of 50ps to 135ps per tap, depending
* on the chip, its temperature, and the voltage.
* To achieve from 1.25-2.5ns of delay on the received
* clock, a fixed value of SETTING=24 may work.
* For more precision, we recommend the following settings
* based on the chip voltage:
*
* VDD SETTING
* -----------------------------
* 1.0 18
* 1.05 19
* 1.1 21
* 1.15 22
* 1.2 23
* 1.25 24
* 1.3 25
*/
union cvmx_asxx_rx_clk_setx {
u64 u64;
struct cvmx_asxx_rx_clk_setx_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_rx_clk_setx_s cn30xx;
struct cvmx_asxx_rx_clk_setx_s cn31xx;
struct cvmx_asxx_rx_clk_setx_s cn38xx;
struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
struct cvmx_asxx_rx_clk_setx_s cn50xx;
struct cvmx_asxx_rx_clk_setx_s cn58xx;
struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
};
typedef union cvmx_asxx_rx_clk_setx cvmx_asxx_rx_clk_setx_t;
/**
* cvmx_asx#_rx_prt_en
*
* ASX_RX_PRT_EN = RGMII Port Enable
*
*/
union cvmx_asxx_rx_prt_en {
u64 u64;
struct cvmx_asxx_rx_prt_en_s {
u64 reserved_4_63 : 60;
u64 prt_en : 4;
} s;
struct cvmx_asxx_rx_prt_en_cn30xx {
u64 reserved_3_63 : 61;
u64 prt_en : 3;
} cn30xx;
struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
struct cvmx_asxx_rx_prt_en_s cn38xx;
struct cvmx_asxx_rx_prt_en_s cn38xxp2;
struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
struct cvmx_asxx_rx_prt_en_s cn58xx;
struct cvmx_asxx_rx_prt_en_s cn58xxp1;
};
typedef union cvmx_asxx_rx_prt_en cvmx_asxx_rx_prt_en_t;
/**
* cvmx_asx#_rx_wol
*
* ASX_RX_WOL = RGMII RX Wake on LAN status register
*
*/
union cvmx_asxx_rx_wol {
u64 u64;
struct cvmx_asxx_rx_wol_s {
u64 reserved_2_63 : 62;
u64 status : 1;
u64 enable : 1;
} s;
struct cvmx_asxx_rx_wol_s cn38xx;
struct cvmx_asxx_rx_wol_s cn38xxp2;
};
typedef union cvmx_asxx_rx_wol cvmx_asxx_rx_wol_t;
/**
* cvmx_asx#_rx_wol_msk
*
* ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask
*
*/
union cvmx_asxx_rx_wol_msk {
u64 u64;
struct cvmx_asxx_rx_wol_msk_s {
u64 msk : 64;
} s;
struct cvmx_asxx_rx_wol_msk_s cn38xx;
struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
};
typedef union cvmx_asxx_rx_wol_msk cvmx_asxx_rx_wol_msk_t;
/**
* cvmx_asx#_rx_wol_powok
*
* ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK
*
*/
union cvmx_asxx_rx_wol_powok {
u64 u64;
struct cvmx_asxx_rx_wol_powok_s {
u64 reserved_1_63 : 63;
u64 powerok : 1;
} s;
struct cvmx_asxx_rx_wol_powok_s cn38xx;
struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
};
typedef union cvmx_asxx_rx_wol_powok cvmx_asxx_rx_wol_powok_t;
/**
* cvmx_asx#_rx_wol_sig
*
* ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature
*
*/
union cvmx_asxx_rx_wol_sig {
u64 u64;
struct cvmx_asxx_rx_wol_sig_s {
u64 reserved_32_63 : 32;
u64 sig : 32;
} s;
struct cvmx_asxx_rx_wol_sig_s cn38xx;
struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
};
typedef union cvmx_asxx_rx_wol_sig cvmx_asxx_rx_wol_sig_t;
/**
* cvmx_asx#_tx_clk_set#
*
* ASX_TX_CLK_SET = RGMII Clock delay setting
*
*
* Notes:
* Setting to place on the open-loop TXC (RGMII transmit clk)
* delay line, which can delay the transmited clock. This
* can be used if the board and/or transmitting device
* has not otherwise delayed the clock.
*
* A value of SETTING=0 disables the delay line. The delay
* line should be disabled unless the transmitter or board
* does not delay the clock.
*
* Note that this delay line provides only a coarse control
* over the delay. Generally, it can only reliably provide
* a delay in the range 1.25-2.5ns, which may not be adequate
* for some system applications.
*
* The open loop delay line selects
* from among a series of tap positions. Each incremental
* tap position adds a delay of 50ps to 135ps per tap, depending
* on the chip, its temperature, and the voltage.
* To achieve from 1.25-2.5ns of delay on the received
* clock, a fixed value of SETTING=24 may work.
* For more precision, we recommend the following settings
* based on the chip voltage:
*
* VDD SETTING
* -----------------------------
* 1.0 18
* 1.05 19
* 1.1 21
* 1.15 22
* 1.2 23
* 1.25 24
* 1.3 25
*/
union cvmx_asxx_tx_clk_setx {
u64 u64;
struct cvmx_asxx_tx_clk_setx_s {
u64 reserved_5_63 : 59;
u64 setting : 5;
} s;
struct cvmx_asxx_tx_clk_setx_s cn30xx;
struct cvmx_asxx_tx_clk_setx_s cn31xx;
struct cvmx_asxx_tx_clk_setx_s cn38xx;
struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
struct cvmx_asxx_tx_clk_setx_s cn50xx;
struct cvmx_asxx_tx_clk_setx_s cn58xx;
struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
};
typedef union cvmx_asxx_tx_clk_setx cvmx_asxx_tx_clk_setx_t;
/**
* cvmx_asx#_tx_comp_byp
*
* ASX_TX_COMP_BYP = RGMII Clock delay setting
*
*/
union cvmx_asxx_tx_comp_byp {
u64 u64;
struct cvmx_asxx_tx_comp_byp_s {
u64 reserved_0_63 : 64;
} s;
struct cvmx_asxx_tx_comp_byp_cn30xx {
u64 reserved_9_63 : 55;
u64 bypass : 1;
u64 pctl : 4;
u64 nctl : 4;
} cn30xx;
struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
struct cvmx_asxx_tx_comp_byp_cn38xx {
u64 reserved_8_63 : 56;
u64 pctl : 4;
u64 nctl : 4;
} cn38xx;
struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
struct cvmx_asxx_tx_comp_byp_cn50xx {
u64 reserved_17_63 : 47;
u64 bypass : 1;
u64 reserved_13_15 : 3;
u64 pctl : 5;
u64 reserved_5_7 : 3;
u64 nctl : 5;
} cn50xx;
struct cvmx_asxx_tx_comp_byp_cn58xx {
u64 reserved_13_63 : 51;
u64 pctl : 5;
u64 reserved_5_7 : 3;
u64 nctl : 5;
} cn58xx;
struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
};
typedef union cvmx_asxx_tx_comp_byp cvmx_asxx_tx_comp_byp_t;
/**
* cvmx_asx#_tx_hi_water#
*
* ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark
*
*/
union cvmx_asxx_tx_hi_waterx {
u64 u64;
struct cvmx_asxx_tx_hi_waterx_s {
u64 reserved_4_63 : 60;
u64 mark : 4;
} s;
struct cvmx_asxx_tx_hi_waterx_cn30xx {
u64 reserved_3_63 : 61;
u64 mark : 3;
} cn30xx;
struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
struct cvmx_asxx_tx_hi_waterx_s cn38xx;
struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
struct cvmx_asxx_tx_hi_waterx_s cn58xx;
struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
};
typedef union cvmx_asxx_tx_hi_waterx cvmx_asxx_tx_hi_waterx_t;
/**
* cvmx_asx#_tx_prt_en
*
* ASX_TX_PRT_EN = RGMII Port Enable
*
*/
union cvmx_asxx_tx_prt_en {
u64 u64;
struct cvmx_asxx_tx_prt_en_s {
u64 reserved_4_63 : 60;
u64 prt_en : 4;
} s;
struct cvmx_asxx_tx_prt_en_cn30xx {
u64 reserved_3_63 : 61;
u64 prt_en : 3;
} cn30xx;
struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
struct cvmx_asxx_tx_prt_en_s cn38xx;
struct cvmx_asxx_tx_prt_en_s cn38xxp2;
struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
struct cvmx_asxx_tx_prt_en_s cn58xx;
struct cvmx_asxx_tx_prt_en_s cn58xxp1;
};
typedef union cvmx_asxx_tx_prt_en cvmx_asxx_tx_prt_en_t;
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,441 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Support functions for managing command queues used for
* various hardware blocks.
*
* The common command queue infrastructure abstracts out the
* software necessary for adding to Octeon's chained queue
* structures. These structures are used for commands to the
* PKO, ZIP, DFA, RAID, HNA, and DMA engine blocks. Although each
* hardware unit takes commands and CSRs of different types,
* they all use basic linked command buffers to store the
* pending request. In general, users of the CVMX API don't
* call cvmx-cmd-queue functions directly. Instead the hardware
* unit specific wrapper should be used. The wrappers perform
* unit specific validation and CSR writes to submit the
* commands.
*
* Even though most software will never directly interact with
* cvmx-cmd-queue, knowledge of its internal workings can help
* in diagnosing performance problems and help with debugging.
*
* Command queue pointers are stored in a global named block
* called "cvmx_cmd_queues". Except for the PKO queues, each
* hardware queue is stored in its own cache line to reduce SMP
* contention on spin locks. The PKO queues are stored such that
* every 16th queue is next to each other in memory. This scheme
* allows for queues being in separate cache lines when there
* are low number of queues per port. With 16 queues per port,
* the first queue for each port is in the same cache area. The
* second queues for each port are in another area, etc. This
* allows software to implement very efficient lockless PKO with
* 16 queues per port using a minimum of cache lines per core.
* All queues for a given core will be isolated in the same
* cache area.
*
* In addition to the memory pointer layout, cvmx-cmd-queue
* provides an optimized fair ll/sc locking mechanism for the
* queues. The lock uses a "ticket / now serving" model to
* maintain fair order on contended locks. In addition, it uses
* predicted locking time to limit cache contention. When a core
* know it must wait in line for a lock, it spins on the
* internal cycle counter to completely eliminate any causes of
* bus traffic.
*/
#ifndef __CVMX_CMD_QUEUE_H__
#define __CVMX_CMD_QUEUE_H__
/**
* By default we disable the max depth support. Most programs
* don't use it and it slows down the command queue processing
* significantly.
*/
#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
#endif
/**
* Enumeration representing all hardware blocks that use command
* queues. Each hardware block has up to 65536 sub identifiers for
* multiple command queues. Not all chips support all hardware
* units.
*/
typedef enum {
CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
#define CVMX_CMD_QUEUE_PKO(queue) \
((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff & (queue))))
CVMX_CMD_QUEUE_ZIP = 0x10000,
#define CVMX_CMD_QUEUE_ZIP_QUE(queue) \
((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff & (queue))))
CVMX_CMD_QUEUE_DFA = 0x20000,
CVMX_CMD_QUEUE_RAID = 0x30000,
CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
#define CVMX_CMD_QUEUE_DMA(queue) \
((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff & (queue))))
CVMX_CMD_QUEUE_BCH = 0x50000,
#define CVMX_CMD_QUEUE_BCH(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_BCH + (0xffff & (queue))))
CVMX_CMD_QUEUE_HNA = 0x60000,
CVMX_CMD_QUEUE_END = 0x70000,
} cvmx_cmd_queue_id_t;
#define CVMX_CMD_QUEUE_ZIP3_QUE(node, queue) \
((cvmx_cmd_queue_id_t)((node) << 24 | CVMX_CMD_QUEUE_ZIP | (0xffff & (queue))))
/**
* Command write operations can fail if the command queue needs
* a new buffer and the associated FPA pool is empty. It can also
* fail if the number of queued command words reaches the maximum
* set at initialization.
*/
typedef enum {
CVMX_CMD_QUEUE_SUCCESS = 0,
CVMX_CMD_QUEUE_NO_MEMORY = -1,
CVMX_CMD_QUEUE_FULL = -2,
CVMX_CMD_QUEUE_INVALID_PARAM = -3,
CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
} cvmx_cmd_queue_result_t;
typedef struct {
/* First 64-bit word: */
u64 fpa_pool : 16;
u64 base_paddr : 48;
s32 index;
u16 max_depth;
u16 pool_size_m1;
} __cvmx_cmd_queue_state_t;
/**
* command-queue locking uses a fair ticket spinlock algo,
* with 64-bit tickets for endianness-neutrality and
* counter overflow protection.
* Lock is free when both counters are of equal value.
*/
typedef struct {
u64 ticket;
u64 now_serving;
} __cvmx_cmd_queue_lock_t;
/**
* @INTERNAL
* This structure contains the global state of all command queues.
* It is stored in a bootmem named block and shared by all
* applications running on Octeon. Tickets are stored in a different
* cache line that queue information to reduce the contention on the
* ll/sc used to get a ticket. If this is not the case, the update
* of queue state causes the ll/sc to fail quite often.
*/
typedef struct {
__cvmx_cmd_queue_lock_t lock[(CVMX_CMD_QUEUE_END >> 16) * 256];
__cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
} __cvmx_cmd_queue_all_state_t;
extern __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptrs[CVMX_MAX_NODES];
/**
* @INTERNAL
* Internal function to handle the corner cases
* of adding command words to a queue when the current
* block is getting full.
*/
cvmx_cmd_queue_result_t __cvmx_cmd_queue_write_raw(cvmx_cmd_queue_id_t queue_id,
__cvmx_cmd_queue_state_t *qptr, int cmd_count,
const u64 *cmds);
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @param queue_id Hardware command queue to initialize.
* @param max_depth Maximum outstanding commands that can be queued.
* @param fpa_pool FPA pool the command queues should come from.
* @param pool_size Size of each buffer in the FPA pool (bytes)
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth,
int fpa_pool, int pool_size);
/**
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @param queue_id Queue to shutdown
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
/**
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @param queue_id Hardware command queue to query
*
* @return Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access to the low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @param queue_id Command queue to query
*
* @return Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
/**
* @INTERNAL
* Retrieve or allocate command queue state named block
*/
cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(unsigned int node);
/**
* @INTERNAL
* Get the index into the state arrays for the supplied queue id.
*
* @param queue_id Queue ID to get an index for
*
* @return Index into the state arrays
*/
static inline unsigned int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
{
/* Warning: This code currently only works with devices that have 256
* queues or less. Devices with more than 16 queues are laid out in
* memory to allow cores quick access to every 16th queue. This reduces
* cache thrashing when you are running 16 queues per port to support
* lockless operation
*/
unsigned int unit = (queue_id >> 16) & 0xff;
unsigned int q = (queue_id >> 4) & 0xf;
unsigned int core = queue_id & 0xf;
return (unit << 8) | (core << 4) | q;
}
static inline int __cvmx_cmd_queue_get_node(cvmx_cmd_queue_id_t queue_id)
{
unsigned int node = queue_id >> 24;
return node;
}
/**
* @INTERNAL
* Lock the supplied queue so nobody else is updating it at the same
* time as us.
*
* @param queue_id Queue ID to lock
*
*/
static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id)
{
}
/**
* @INTERNAL
* Unlock the queue, flushing all writes.
*
* @param queue_id Queue ID to lock
*
*/
static inline void __cvmx_cmd_queue_unlock(cvmx_cmd_queue_id_t queue_id)
{
CVMX_SYNCWS; /* nudge out the unlock. */
}
/**
* @INTERNAL
* Initialize a command-queue lock to "unlocked" state.
*/
static inline void __cvmx_cmd_queue_lock_init(cvmx_cmd_queue_id_t queue_id)
{
unsigned int index = __cvmx_cmd_queue_get_index(queue_id);
unsigned int node = __cvmx_cmd_queue_get_node(queue_id);
__cvmx_cmd_queue_state_ptrs[node]->lock[index] = (__cvmx_cmd_queue_lock_t){ 0, 0 };
CVMX_SYNCWS;
}
/**
* @INTERNAL
* Get the queue state structure for the given queue id
*
* @param queue_id Queue id to get
*
* @return Queue structure or NULL on failure
*/
static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
{
unsigned int index;
unsigned int node;
__cvmx_cmd_queue_state_t *qptr;
node = __cvmx_cmd_queue_get_node(queue_id);
index = __cvmx_cmd_queue_get_index(queue_id);
if (cvmx_unlikely(!__cvmx_cmd_queue_state_ptrs[node]))
__cvmx_cmd_queue_init_state_ptr(node);
qptr = &__cvmx_cmd_queue_state_ptrs[node]->state[index];
return qptr;
}
/**
* Write an arbitrary number of command words to a command queue.
* This is a generic function; the fixed number of command word
* functions yield higher performance.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd_count Number of command words to write
* @param cmds Array of commands to write
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t
cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, bool use_locking, int cmd_count, const u64 *cmds)
{
cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;
u64 *cmd_ptr;
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id);
/* Most of the time there is lots of free words in current block */
if (cvmx_unlikely((qptr->index + cmd_count) >= qptr->pool_size_m1)) {
/* The rare case when nearing end of block */
ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, cmd_count, cmds);
} else {
cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
/* Loop easy for compiler to unroll for the likely case */
while (cmd_count > 0) {
cmd_ptr[qptr->index++] = *cmds++;
cmd_count--;
}
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(queue_id);
else
CVMX_SYNCWS;
return ret;
}
/**
* Simple function to write two command words to a command queue.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd1 Command
* @param cmd2 Command
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id,
bool use_locking, u64 cmd1, u64 cmd2)
{
cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;
u64 *cmd_ptr;
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id);
if (cvmx_unlikely((qptr->index + 2) >= qptr->pool_size_m1)) {
/* The rare case when nearing end of block */
u64 cmds[2];
cmds[0] = cmd1;
cmds[1] = cmd2;
ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 2, cmds);
} else {
/* Likely case to work fast */
cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
cmd_ptr += qptr->index;
qptr->index += 2;
cmd_ptr[0] = cmd1;
cmd_ptr[1] = cmd2;
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(queue_id);
else
CVMX_SYNCWS;
return ret;
}
/**
* Simple function to write three command words to a command queue.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd1 Command
* @param cmd2 Command
* @param cmd3 Command
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t
cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, bool use_locking, u64 cmd1, u64 cmd2, u64 cmd3)
{
cvmx_cmd_queue_result_t ret = CVMX_CMD_QUEUE_SUCCESS;
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
u64 *cmd_ptr;
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id);
if (cvmx_unlikely((qptr->index + 3) >= qptr->pool_size_m1)) {
/* Most of the time there is lots of free words in current block */
u64 cmds[3];
cmds[0] = cmd1;
cmds[1] = cmd2;
cmds[2] = cmd3;
ret = __cvmx_cmd_queue_write_raw(queue_id, qptr, 3, cmds);
} else {
cmd_ptr = (u64 *)cvmx_phys_to_ptr((u64)qptr->base_paddr);
cmd_ptr += qptr->index;
qptr->index += 3;
cmd_ptr[0] = cmd1;
cmd_ptr[1] = cmd2;
cmd_ptr[2] = cmd3;
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(queue_id);
else
CVMX_SYNCWS;
return ret;
}
#endif /* __CVMX_CMD_QUEUE_H__ */

View File

@ -741,8 +741,9 @@ void cvmx_coremask_print(const struct cvmx_coremask *pcm);
static inline void cvmx_coremask_dprint(const struct cvmx_coremask *pcm)
{
if (IS_ENABLED(DEBUG))
cvmx_coremask_print(pcm);
#if defined(DEBUG)
cvmx_coremask_print(pcm);
#endif
}
struct cvmx_coremask *octeon_get_available_coremask(struct cvmx_coremask *pcm);

View File

@ -0,0 +1,87 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Definitions for enumerations used with Octeon CSRs.
*/
#ifndef __CVMX_CSR_ENUMS_H__
#define __CVMX_CSR_ENUMS_H__
typedef enum {
CVMX_IPD_OPC_MODE_STT = 0LL,
CVMX_IPD_OPC_MODE_STF = 1LL,
CVMX_IPD_OPC_MODE_STF1_STT = 2LL,
CVMX_IPD_OPC_MODE_STF2_STT = 3LL
} cvmx_ipd_mode_t;
/**
* Enumeration representing the amount of packet processing
* and validation performed by the input hardware.
*/
typedef enum {
CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
} cvmx_pip_port_parse_mode_t;
/**
* This enumeration controls how a QoS watcher matches a packet.
*
* @deprecated This enumeration was used with cvmx_pip_config_watcher which has
* been deprecated.
*/
typedef enum {
CVMX_PIP_QOS_WATCH_DISABLE = 0ull,
CVMX_PIP_QOS_WATCH_PROTNH = 1ull,
CVMX_PIP_QOS_WATCH_TCP = 2ull,
CVMX_PIP_QOS_WATCH_UDP = 3ull
} cvmx_pip_qos_watch_types;
/**
* This enumeration is used in PIP tag config to control how
* POW tags are generated by the hardware.
*/
typedef enum {
CVMX_PIP_TAG_MODE_TUPLE = 0ull,
CVMX_PIP_TAG_MODE_MASK = 1ull,
CVMX_PIP_TAG_MODE_IP_OR_MASK = 2ull,
CVMX_PIP_TAG_MODE_TUPLE_XOR_MASK = 3ull
} cvmx_pip_tag_mode_t;
/**
* Tag type definitions
*/
typedef enum {
CVMX_POW_TAG_TYPE_ORDERED = 0L,
CVMX_POW_TAG_TYPE_ATOMIC = 1L,
CVMX_POW_TAG_TYPE_NULL = 2L,
CVMX_POW_TAG_TYPE_NULL_NULL = 3L
} cvmx_pow_tag_type_t;
/**
* LCR bits 0 and 1 control the number of bits per character. See the following table for encodings:
*
* - 00 = 5 bits (bits 0-4 sent)
* - 01 = 6 bits (bits 0-5 sent)
* - 10 = 7 bits (bits 0-6 sent)
* - 11 = 8 bits (all bits sent)
*/
typedef enum {
CVMX_UART_BITS5 = 0,
CVMX_UART_BITS6 = 1,
CVMX_UART_BITS7 = 2,
CVMX_UART_BITS8 = 3
} cvmx_uart_bits_t;
typedef enum {
CVMX_UART_IID_NONE = 1,
CVMX_UART_IID_RX_ERROR = 6,
CVMX_UART_IID_RX_DATA = 4,
CVMX_UART_IID_RX_TIMEOUT = 12,
CVMX_UART_IID_TX_EMPTY = 2,
CVMX_UART_IID_MODEM = 0,
CVMX_UART_IID_BUSY = 7
} cvmx_uart_iid_t;
#endif /* __CVMX_CSR_ENUMS_H__ */

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Configuration and status register (CSR) address and type definitions for
* Octoen.
*/
#ifndef __CVMX_CSR_H__
#define __CVMX_CSR_H__
#include "cvmx-csr-enums.h"
#include "cvmx-pip-defs.h"
typedef cvmx_pip_prt_cfgx_t cvmx_pip_port_cfg_t;
/* The CSRs for bootbus region zero used to be independent of the
other 1-7. As of SDK 1.7.0 these were combined. These macros
are for backwards compactability */
#define CVMX_MIO_BOOT_REG_CFG0 CVMX_MIO_BOOT_REG_CFGX(0)
#define CVMX_MIO_BOOT_REG_TIM0 CVMX_MIO_BOOT_REG_TIMX(0)
/* The CN3XXX and CN58XX chips used to not have a LMC number
passed to the address macros. These are here to supply backwards
compatibility with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_LMC_BIST_CTL CVMX_LMCX_BIST_CTL(0)
#define CVMX_LMC_BIST_RESULT CVMX_LMCX_BIST_RESULT(0)
#define CVMX_LMC_COMP_CTL CVMX_LMCX_COMP_CTL(0)
#define CVMX_LMC_CTL CVMX_LMCX_CTL(0)
#define CVMX_LMC_CTL1 CVMX_LMCX_CTL1(0)
#define CVMX_LMC_DCLK_CNT_HI CVMX_LMCX_DCLK_CNT_HI(0)
#define CVMX_LMC_DCLK_CNT_LO CVMX_LMCX_DCLK_CNT_LO(0)
#define CVMX_LMC_DCLK_CTL CVMX_LMCX_DCLK_CTL(0)
#define CVMX_LMC_DDR2_CTL CVMX_LMCX_DDR2_CTL(0)
#define CVMX_LMC_DELAY_CFG CVMX_LMCX_DELAY_CFG(0)
#define CVMX_LMC_DLL_CTL CVMX_LMCX_DLL_CTL(0)
#define CVMX_LMC_DUAL_MEMCFG CVMX_LMCX_DUAL_MEMCFG(0)
#define CVMX_LMC_ECC_SYND CVMX_LMCX_ECC_SYND(0)
#define CVMX_LMC_FADR CVMX_LMCX_FADR(0)
#define CVMX_LMC_IFB_CNT_HI CVMX_LMCX_IFB_CNT_HI(0)
#define CVMX_LMC_IFB_CNT_LO CVMX_LMCX_IFB_CNT_LO(0)
#define CVMX_LMC_MEM_CFG0 CVMX_LMCX_MEM_CFG0(0)
#define CVMX_LMC_MEM_CFG1 CVMX_LMCX_MEM_CFG1(0)
#define CVMX_LMC_OPS_CNT_HI CVMX_LMCX_OPS_CNT_HI(0)
#define CVMX_LMC_OPS_CNT_LO CVMX_LMCX_OPS_CNT_LO(0)
#define CVMX_LMC_PLL_BWCTL CVMX_LMCX_PLL_BWCTL(0)
#define CVMX_LMC_PLL_CTL CVMX_LMCX_PLL_CTL(0)
#define CVMX_LMC_PLL_STATUS CVMX_LMCX_PLL_STATUS(0)
#define CVMX_LMC_READ_LEVEL_CTL CVMX_LMCX_READ_LEVEL_CTL(0)
#define CVMX_LMC_READ_LEVEL_DBG CVMX_LMCX_READ_LEVEL_DBG(0)
#define CVMX_LMC_READ_LEVEL_RANKX CVMX_LMCX_READ_LEVEL_RANKX(0)
#define CVMX_LMC_RODT_COMP_CTL CVMX_LMCX_RODT_COMP_CTL(0)
#define CVMX_LMC_RODT_CTL CVMX_LMCX_RODT_CTL(0)
#define CVMX_LMC_WODT_CTL CVMX_LMCX_WODT_CTL0(0)
#define CVMX_LMC_WODT_CTL0 CVMX_LMCX_WODT_CTL0(0)
#define CVMX_LMC_WODT_CTL1 CVMX_LMCX_WODT_CTL1(0)
/* The CN3XXX and CN58XX chips used to not have a TWSI bus number
passed to the address macros. These are here to supply backwards
compatibility with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_MIO_TWS_INT CVMX_MIO_TWSX_INT(0)
#define CVMX_MIO_TWS_SW_TWSI CVMX_MIO_TWSX_SW_TWSI(0)
#define CVMX_MIO_TWS_SW_TWSI_EXT CVMX_MIO_TWSX_SW_TWSI_EXT(0)
#define CVMX_MIO_TWS_TWSI_SW CVMX_MIO_TWSX_TWSI_SW(0)
/* The CN3XXX and CN58XX chips used to not have a SMI/MDIO bus number
passed to the address macros. These are here to supply backwards
compatibility with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_SMI_CLK CVMX_SMIX_CLK(0)
#define CVMX_SMI_CMD CVMX_SMIX_CMD(0)
#define CVMX_SMI_EN CVMX_SMIX_EN(0)
#define CVMX_SMI_RD_DAT CVMX_SMIX_RD_DAT(0)
#define CVMX_SMI_WR_DAT CVMX_SMIX_WR_DAT(0)
#endif /* __CVMX_CSR_H__ */

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon dbg.
*/
#ifndef __CVMX_DBG_DEFS_H__
#define __CVMX_DBG_DEFS_H__
#define CVMX_DBG_DATA (0x00011F00000001E8ull)
/**
* cvmx_dbg_data
*
* DBG_DATA = Debug Data Register
*
* Value returned on the debug-data lines from the RSLs
*/
union cvmx_dbg_data {
u64 u64;
struct cvmx_dbg_data_s {
u64 reserved_23_63 : 41;
u64 c_mul : 5;
u64 dsel_ext : 1;
u64 data : 17;
} s;
};
typedef union cvmx_dbg_data cvmx_dbg_data_t;
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,456 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the Octeon extended error status.
*/
#ifndef __CVMX_ERROR_H__
#define __CVMX_ERROR_H__
/**
* There are generally many error status bits associated with a
* single logical group. The enumeration below is used to
* communicate high level groups to the error infastructure so
* error status bits can be enable or disabled in large groups.
*/
typedef enum {
CVMX_ERROR_GROUP_INTERNAL,
CVMX_ERROR_GROUP_L2C,
CVMX_ERROR_GROUP_ETHERNET,
CVMX_ERROR_GROUP_MGMT_PORT,
CVMX_ERROR_GROUP_PCI,
CVMX_ERROR_GROUP_SRIO,
CVMX_ERROR_GROUP_USB,
CVMX_ERROR_GROUP_LMC,
CVMX_ERROR_GROUP_ILK,
CVMX_ERROR_GROUP_DFM,
CVMX_ERROR_GROUP_ILA,
} cvmx_error_group_t;
/**
* Flags representing special handling for some error registers.
* These flags are passed to cvmx_error_initialize() to control
* the handling of bits where the same flags were passed to the
* added cvmx_error_info_t.
*/
typedef enum {
CVMX_ERROR_TYPE_NONE = 0,
CVMX_ERROR_TYPE_SBE = 1 << 0,
CVMX_ERROR_TYPE_DBE = 1 << 1,
} cvmx_error_type_t;
/**
* When registering for interest in an error status register, the
* type of the register needs to be known by cvmx-error. Most
* registers are either IO64 or IO32, but some blocks contain
* registers that can't be directly accessed. A good example of
* would be PCIe extended error state stored in config space.
*/
typedef enum {
__CVMX_ERROR_REGISTER_NONE,
CVMX_ERROR_REGISTER_IO64,
CVMX_ERROR_REGISTER_IO32,
CVMX_ERROR_REGISTER_PCICONFIG,
CVMX_ERROR_REGISTER_SRIOMAINT,
} cvmx_error_register_t;
struct cvmx_error_info;
/**
* Error handling functions must have the following prototype.
*/
typedef int (*cvmx_error_func_t)(const struct cvmx_error_info *info);
/**
* This structure is passed to all error handling functions.
*/
typedef struct cvmx_error_info {
cvmx_error_register_t reg_type;
u64 status_addr;
u64 status_mask;
u64 enable_addr;
u64 enable_mask;
cvmx_error_type_t flags;
cvmx_error_group_t group;
int group_index;
cvmx_error_func_t func;
u64 user_info;
struct {
cvmx_error_register_t reg_type;
u64 status_addr;
u64 status_mask;
} parent;
} cvmx_error_info_t;
/**
* Initialize the error status system. This should be called once
* before any other functions are called. This function adds default
* handlers for most all error events but does not enable them. Later
* calls to cvmx_error_enable() are needed.
*
* @param flags Optional flags.
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_initialize(void);
/**
* Poll the error status registers and call the appropriate error
* handlers. This should be called in the RSL interrupt handler
* for your application or operating system.
*
* @return Number of error handlers called. Zero means this call
* found no errors and was spurious.
*/
int cvmx_error_poll(void);
/**
* Register to be called when an error status bit is set. Most users
* will not need to call this function as cvmx_error_initialize()
* registers default handlers for most error conditions. This function
* is normally used to add more handlers without changing the existing
* handlers.
*
* @param new_info Information about the handler for a error register. The
* structure passed is copied and can be destroyed after the
* call. All members of the structure must be populated, even the
* parent information.
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_add(const cvmx_error_info_t *new_info);
/**
* Remove all handlers for a status register and mask. Normally
* this function should not be called. Instead a new handler should be
* installed to replace the existing handler. In the even that all
* reporting of a error bit should be removed, then use this
* function.
*
* @param reg_type Type of the status register to remove
* @param status_addr
* Status register to remove.
* @param status_mask
* All handlers for this status register with this mask will be
* removed.
* @param old_info If not NULL, this is filled with information about the handler
* that was removed.
*
* @return Zero on success, negative on failure (not found).
*/
int cvmx_error_remove(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask,
cvmx_error_info_t *old_info);
/**
* Change the function and user_info for an existing error status
* register. This function should be used to replace the default
* handler with an application specific version as needed.
*
* @param reg_type Type of the status register to change
* @param status_addr
* Status register to change.
* @param status_mask
* All handlers for this status register with this mask will be
* changed.
* @param new_func New function to use to handle the error status
* @param new_user_info
* New user info parameter for the function
* @param old_func If not NULL, the old function is returned. Useful for restoring
* the old handler.
* @param old_user_info
* If not NULL, the old user info parameter.
*
* @return Zero on success, negative on failure
*/
int cvmx_error_change_handler(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask,
cvmx_error_func_t new_func, u64 new_user_info,
cvmx_error_func_t *old_func, u64 *old_user_info);
/**
* Enable all error registers for a logical group. This should be
* called whenever a logical group is brought online.
*
* @param group Logical group to enable
* @param group_index
* Index for the group as defined in the cvmx_error_group_t
* comments.
*
* @return Zero on success, negative on failure.
*/
/*
* Rather than conditionalize the calls throughout the executive to not enable
* interrupts in Uboot, simply make the enable function do nothing
*/
static inline int cvmx_error_enable_group(cvmx_error_group_t group, int group_index)
{
return 0;
}
/**
* Disable all error registers for a logical group. This should be
* called whenever a logical group is brought offline. Many blocks
* will report spurious errors when offline unless this function
* is called.
*
* @param group Logical group to disable
* @param group_index
* Index for the group as defined in the cvmx_error_group_t
* comments.
*
* @return Zero on success, negative on failure.
*/
/*
* Rather than conditionalize the calls throughout the executive to not disable
* interrupts in Uboot, simply make the enable function do nothing
*/
static inline int cvmx_error_disable_group(cvmx_error_group_t group, int group_index)
{
return 0;
}
/**
* Enable all handlers for a specific status register mask.
*
* @param reg_type Type of the status register
* @param status_addr
* Status register address
* @param status_mask
* All handlers for this status register with this mask will be
* enabled.
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_enable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask);
/**
* Disable all handlers for a specific status register and mask.
*
* @param reg_type Type of the status register
* @param status_addr
* Status register address
* @param status_mask
* All handlers for this status register with this mask will be
* disabled.
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_disable(cvmx_error_register_t reg_type, u64 status_addr, u64 status_mask);
/**
* @INTERNAL
* Function for processing non leaf error status registers. This function
* calls all handlers for this passed register and all children linked
* to it.
*
* @param info Error register to check
*
* @return Number of error status bits found or zero if no bits were set.
*/
int __cvmx_error_decode(const cvmx_error_info_t *info);
/**
* @INTERNAL
* This error bit handler simply prints a message and clears the status bit
*
* @param info Error register to check
*
* @return
*/
int __cvmx_error_display(const cvmx_error_info_t *info);
/**
* Find the handler for a specific status register and mask
*
* @param status_addr
* Status register address
*
* @return Return the handler on success or null on failure.
*/
cvmx_error_info_t *cvmx_error_get_index(u64 status_addr);
void __cvmx_install_gmx_error_handler_for_xaui(void);
/**
* 78xx related
*/
/**
* Compare two INTSN values.
*
* @param key INTSN value to search for
* @param data current entry from the searched array
*
* @return Negative, 0 or positive when respectively key is less than,
* equal or greater than data.
*/
int cvmx_error_intsn_cmp(const void *key, const void *data);
/**
* @INTERNAL
*
* @param intsn Interrupt source number to display
*
* @param node Node number
*
* @return Zero on success, -1 on error
*/
int cvmx_error_intsn_display_v3(int node, u32 intsn);
/**
* Initialize the error status system for cn78xx. This should be called once
* before any other functions are called. This function enables the interrupts
* described in the array.
*
* @param node Node number
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_initialize_cn78xx(int node);
/**
* Enable interrupt for a specific INTSN.
*
* @param node Node number
* @param intsn Interrupt source number
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_intsn_enable_v3(int node, u32 intsn);
/**
* Disable interrupt for a specific INTSN.
*
* @param node Node number
* @param intsn Interrupt source number
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_intsn_disable_v3(int node, u32 intsn);
/**
* Clear interrupt for a specific INTSN.
*
* @param intsn Interrupt source number
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_intsn_clear_v3(int node, u32 intsn);
/**
* Enable interrupts for a specific CSR(all the bits/intsn in the csr).
*
* @param node Node number
* @param csr_address CSR address
*
* @return Zero on success, negative on failure.
*/
int cvmx_error_csr_enable_v3(int node, u64 csr_address);
/**
* Disable interrupts for a specific CSR (all the bits/intsn in the csr).
*
* @param node Node number
* @param csr_address CSR address
*
* @return Zero
*/
int cvmx_error_csr_disable_v3(int node, u64 csr_address);
/**
* Enable all error registers for a logical group. This should be
* called whenever a logical group is brought online.
*
* @param group Logical group to enable
* @param xipd_port The IPD port value
*
* @return Zero.
*/
int cvmx_error_enable_group_v3(cvmx_error_group_t group, int xipd_port);
/**
* Disable all error registers for a logical group.
*
* @param group Logical group to enable
* @param xipd_port The IPD port value
*
* @return Zero.
*/
int cvmx_error_disable_group_v3(cvmx_error_group_t group, int xipd_port);
/**
* Enable all error registers for a specific category in a logical group.
* This should be called whenever a logical group is brought online.
*
* @param group Logical group to enable
* @param type Category in a logical group to enable
* @param xipd_port The IPD port value
*
* @return Zero.
*/
int cvmx_error_enable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type,
int xipd_port);
/**
* Disable all error registers for a specific category in a logical group.
* This should be called whenever a logical group is brought online.
*
* @param group Logical group to disable
* @param type Category in a logical group to disable
* @param xipd_port The IPD port value
*
* @return Zero.
*/
int cvmx_error_disable_group_type_v3(cvmx_error_group_t group, cvmx_error_type_t type,
int xipd_port);
/**
* Clear all error registers for a logical group.
*
* @param group Logical group to disable
* @param xipd_port The IPD port value
*
* @return Zero.
*/
int cvmx_error_clear_group_v3(cvmx_error_group_t group, int xipd_port);
/**
* Enable all error registers for a particular category.
*
* @param node CCPI node
* @param type category to enable
*
*@return Zero.
*/
int cvmx_error_enable_type_v3(int node, cvmx_error_type_t type);
/**
* Disable all error registers for a particular category.
*
* @param node CCPI node
* @param type category to disable
*
*@return Zero.
*/
int cvmx_error_disable_type_v3(int node, cvmx_error_type_t type);
void cvmx_octeon_hang(void) __attribute__((__noreturn__));
/**
* @INTERNAL
*
* Process L2C single and multi-bit ECC errors
*
*/
int __cvmx_cn7xxx_l2c_l2d_ecc_error_display(int node, int intsn);
/**
* Handle L2 cache TAG ECC errors and noway errors
*
* @param CCPI node
* @param intsn intsn from error array.
* @param remote true for remote node (cn78xx only)
*
* @return 1 if handled, 0 if not handled
*/
int __cvmx_cn7xxx_l2c_tag_error_display(int node, int intsn, bool remote);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,217 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Free Pool Allocator.
*/
#ifndef __CVMX_FPA_H__
#define __CVMX_FPA_H__
#include "cvmx-scratch.h"
#include "cvmx-fpa-defs.h"
#include "cvmx-fpa1.h"
#include "cvmx-fpa3.h"
#define CVMX_FPA_MIN_BLOCK_SIZE 128
#define CVMX_FPA_ALIGNMENT 128
#define CVMX_FPA_POOL_NAME_LEN 16
/* On CN78XX in backward-compatible mode, pool is mapped to AURA */
#define CVMX_FPA_NUM_POOLS \
(octeon_has_feature(OCTEON_FEATURE_FPA3) ? cvmx_fpa3_num_auras() : CVMX_FPA1_NUM_POOLS)
/**
* Structure to store FPA pool configuration parameters.
*/
struct cvmx_fpa_pool_config {
s64 pool_num;
u64 buffer_size;
u64 buffer_count;
};
typedef struct cvmx_fpa_pool_config cvmx_fpa_pool_config_t;
/**
* Return the name of the pool
*
* @param pool_num Pool to get the name of
* @return The name
*/
const char *cvmx_fpa_get_name(int pool_num);
/**
* Initialize FPA per node
*/
int cvmx_fpa_global_init_node(int node);
/**
* Enable the FPA
*/
static inline void cvmx_fpa_enable(void)
{
if (!octeon_has_feature(OCTEON_FEATURE_FPA3))
cvmx_fpa1_enable();
else
cvmx_fpa_global_init_node(cvmx_get_node_num());
}
/**
* Disable the FPA
*/
static inline void cvmx_fpa_disable(void)
{
if (!octeon_has_feature(OCTEON_FEATURE_FPA3))
cvmx_fpa1_disable();
/* FPA3 does not have a disable function */
}
/**
* @INTERNAL
* @deprecated OBSOLETE
*
* Kept for transition assistance only
*/
static inline void cvmx_fpa_global_initialize(void)
{
cvmx_fpa_global_init_node(cvmx_get_node_num());
}
/**
* @INTERNAL
*
* Convert FPA1 style POOL into FPA3 AURA in
* backward compatibility mode.
*/
static inline cvmx_fpa3_gaura_t cvmx_fpa1_pool_to_fpa3_aura(cvmx_fpa1_pool_t pool)
{
if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
unsigned int node = cvmx_get_node_num();
cvmx_fpa3_gaura_t aura = __cvmx_fpa3_gaura(node, pool);
return aura;
}
return CVMX_FPA3_INVALID_GAURA;
}
/**
* Get a new block from the FPA
*
* @param pool Pool to get the block from
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa_alloc(u64 pool)
{
/* FPA3 is handled differently */
if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
return cvmx_fpa3_alloc(cvmx_fpa1_pool_to_fpa3_aura(pool));
} else
return cvmx_fpa1_alloc(pool);
}
/**
* Asynchronously get a new block from the FPA
*
* The result of cvmx_fpa_async_alloc() may be retrieved using
* cvmx_fpa_async_alloc_finish().
*
* @param scr_addr Local scratch address to put response in. This is a byte
* address but must be 8 byte aligned.
* @param pool Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(u64 scr_addr, u64 pool)
{
if ((octeon_has_feature(OCTEON_FEATURE_FPA3))) {
return cvmx_fpa3_async_alloc(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool));
} else
return cvmx_fpa1_async_alloc(scr_addr, pool);
}
/**
* Retrieve the result of cvmx_fpa_async_alloc
*
* @param scr_addr The Local scratch address. Must be the same value
* passed to cvmx_fpa_async_alloc().
*
* @param pool Pool the block came from. Must be the same value
* passed to cvmx_fpa_async_alloc.
*
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa_async_alloc_finish(u64 scr_addr, u64 pool)
{
if ((octeon_has_feature(OCTEON_FEATURE_FPA3)))
return cvmx_fpa3_async_alloc_finish(scr_addr, cvmx_fpa1_pool_to_fpa3_aura(pool));
else
return cvmx_fpa1_async_alloc_finish(scr_addr, pool);
}
/**
* Free a block allocated with a FPA pool.
* Does NOT provide memory ordering in cases where the memory block was
* modified by the core.
*
* @param ptr Block to free
* @param pool Pool to put it in
* @param num_cache_lines
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, u64 pool, u64 num_cache_lines)
{
/* FPA3 is handled differently */
if ((octeon_has_feature(OCTEON_FEATURE_FPA3)))
cvmx_fpa3_free_nosync(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines);
else
cvmx_fpa1_free_nosync(ptr, pool, num_cache_lines);
}
/**
* Free a block allocated with a FPA pool. Provides required memory
* ordering in cases where memory block was modified by core.
*
* @param ptr Block to free
* @param pool Pool to put it in
* @param num_cache_lines
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, u64 pool, u64 num_cache_lines)
{
if ((octeon_has_feature(OCTEON_FEATURE_FPA3)))
cvmx_fpa3_free(ptr, cvmx_fpa1_pool_to_fpa3_aura(pool), num_cache_lines);
else
cvmx_fpa1_free(ptr, pool, num_cache_lines);
}
/**
* Setup a FPA pool to control a new block of memory.
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
* @param pool Pool to initialize
* @param name Constant character string to name this pool.
* String is not copied.
* @param buffer Pointer to the block of memory to use. This must be
* accessible by all processors and external hardware.
* @param block_size Size for each block controlled by the FPA
* @param num_blocks Number of blocks
*
* @return the pool number on Success,
* -1 on failure
*/
int cvmx_fpa_setup_pool(int pool, const char *name, void *buffer, u64 block_size, u64 num_blocks);
int cvmx_fpa_shutdown_pool(int pool);
/**
* Gets the block size of buffer in specified pool
* @param pool Pool to get the block size from
* @return Size of buffer in specified pool
*/
unsigned int cvmx_fpa_get_block_size(int pool);
int cvmx_fpa_is_pool_available(int pool_num);
u64 cvmx_fpa_get_pool_owner(int pool_num);
int cvmx_fpa_get_max_pools(void);
int cvmx_fpa_get_current_count(int pool_num);
int cvmx_fpa_validate_pool(int pool);
#endif /* __CVM_FPA_H__ */

View File

@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Free Pool Allocator on Octeon chips.
* These are the legacy models, i.e. prior to CN78XX/CN76XX.
*/
#ifndef __CVMX_FPA1_HW_H__
#define __CVMX_FPA1_HW_H__
#include "cvmx-scratch.h"
#include "cvmx-fpa-defs.h"
#include "cvmx-fpa3.h"
/* Legacy pool range is 0..7 and 8 on CN68XX */
typedef int cvmx_fpa1_pool_t;
#define CVMX_FPA1_NUM_POOLS 8
#define CVMX_FPA1_INVALID_POOL ((cvmx_fpa1_pool_t)-1)
#define CVMX_FPA1_NAME_SIZE 16
/**
* Structure describing the data format used for stores to the FPA.
*/
typedef union {
u64 u64;
struct {
u64 scraddr : 8;
u64 len : 8;
u64 did : 8;
u64 addr : 40;
} s;
} cvmx_fpa1_iobdma_data_t;
/*
* Allocate or reserve the specified fpa pool.
*
* @param pool FPA pool to allocate/reserve. If -1 it
* finds an empty pool to allocate.
* @return Alloctaed pool number or CVMX_FPA1_POOL_INVALID
* if fails to allocate the pool
*/
cvmx_fpa1_pool_t cvmx_fpa1_reserve_pool(cvmx_fpa1_pool_t pool);
/**
* Free the specified fpa pool.
* @param pool Pool to free
* @return 0 for success -1 failure
*/
int cvmx_fpa1_release_pool(cvmx_fpa1_pool_t pool);
static inline void cvmx_fpa1_free(void *ptr, cvmx_fpa1_pool_t pool, u64 num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/* Make sure that any previous writes to memory go out before we free
* this buffer. This also serves as a barrier to prevent GCC from
* reordering operations to after the free.
*/
CVMX_SYNCWS;
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
static inline void cvmx_fpa1_free_nosync(void *ptr, cvmx_fpa1_pool_t pool,
unsigned int num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/* Prevent GCC from reordering around free */
asm volatile("" : : : "memory");
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
/**
* Enable the FPA for use. Must be performed after any CSR
* configuration but before any other FPA functions.
*/
static inline void cvmx_fpa1_enable(void)
{
cvmx_fpa_ctl_status_t status;
status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
if (status.s.enb) {
/*
* CN68XXP1 should not reset the FPA (doing so may break
* the SSO, so we may end up enabling it more than once.
* Just return and don't spew messages.
*/
return;
}
status.u64 = 0;
status.s.enb = 1;
csr_wr(CVMX_FPA_CTL_STATUS, status.u64);
}
/**
* Reset FPA to disable. Make sure buffers from all FPA pools are freed
* before disabling FPA.
*/
static inline void cvmx_fpa1_disable(void)
{
cvmx_fpa_ctl_status_t status;
if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1))
return;
status.u64 = csr_rd(CVMX_FPA_CTL_STATUS);
status.s.reset = 1;
csr_wr(CVMX_FPA_CTL_STATUS, status.u64);
}
static inline void *cvmx_fpa1_alloc(cvmx_fpa1_pool_t pool)
{
u64 address;
for (;;) {
address = csr_rd(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
if (cvmx_likely(address)) {
return cvmx_phys_to_ptr(address);
} else {
if (csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)
udelay(50);
else
return NULL;
}
}
}
/**
* Asynchronously get a new block from the FPA
* @INTERNAL
*
* The result of cvmx_fpa_async_alloc() may be retrieved using
* cvmx_fpa_async_alloc_finish().
*
* @param scr_addr Local scratch address to put response in. This is a byte
* address but must be 8 byte aligned.
* @param pool Pool to get the block from
*/
static inline void cvmx_fpa1_async_alloc(u64 scr_addr, cvmx_fpa1_pool_t pool)
{
cvmx_fpa1_iobdma_data_t data;
/* Hardware only uses 64 bit aligned locations, so convert from byte
* address to 64-bit index
*/
data.u64 = 0ull;
data.s.scraddr = scr_addr >> 3;
data.s.len = 1;
data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
data.s.addr = 0;
cvmx_scratch_write64(scr_addr, 0ull);
CVMX_SYNCW;
cvmx_send_single(data.u64);
}
/**
* Retrieve the result of cvmx_fpa_async_alloc
* @INTERNAL
*
* @param scr_addr The Local scratch address. Must be the same value
* passed to cvmx_fpa_async_alloc().
*
* @param pool Pool the block came from. Must be the same value
* passed to cvmx_fpa_async_alloc.
*
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa1_async_alloc_finish(u64 scr_addr, cvmx_fpa1_pool_t pool)
{
u64 address;
CVMX_SYNCIOBDMA;
address = cvmx_scratch_read64(scr_addr);
if (cvmx_likely(address))
return cvmx_phys_to_ptr(address);
else
return cvmx_fpa1_alloc(pool);
}
static inline u64 cvmx_fpa1_get_available(cvmx_fpa1_pool_t pool)
{
return csr_rd(CVMX_FPA_QUEX_AVAILABLE(pool));
}
#endif /* __CVMX_FPA1_HW_H__ */

View File

@ -0,0 +1,566 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the CN78XX Free Pool Allocator, a.k.a. FPA3
*/
#include "cvmx-address.h"
#include "cvmx-fpa-defs.h"
#include "cvmx-scratch.h"
#ifndef __CVMX_FPA3_H__
#define __CVMX_FPA3_H__
typedef struct {
unsigned res0 : 6;
unsigned node : 2;
unsigned res1 : 2;
unsigned lpool : 6;
unsigned valid_magic : 16;
} cvmx_fpa3_pool_t;
typedef struct {
unsigned res0 : 6;
unsigned node : 2;
unsigned res1 : 6;
unsigned laura : 10;
unsigned valid_magic : 16;
} cvmx_fpa3_gaura_t;
#define CVMX_FPA3_VALID_MAGIC 0xf9a3
#define CVMX_FPA3_INVALID_GAURA ((cvmx_fpa3_gaura_t){ 0, 0, 0, 0, 0 })
#define CVMX_FPA3_INVALID_POOL ((cvmx_fpa3_pool_t){ 0, 0, 0, 0, 0 })
static inline bool __cvmx_fpa3_aura_valid(cvmx_fpa3_gaura_t aura)
{
if (aura.valid_magic != CVMX_FPA3_VALID_MAGIC)
return false;
return true;
}
static inline bool __cvmx_fpa3_pool_valid(cvmx_fpa3_pool_t pool)
{
if (pool.valid_magic != CVMX_FPA3_VALID_MAGIC)
return false;
return true;
}
static inline cvmx_fpa3_gaura_t __cvmx_fpa3_gaura(int node, int laura)
{
cvmx_fpa3_gaura_t aura;
if (node < 0)
node = cvmx_get_node_num();
if (laura < 0)
return CVMX_FPA3_INVALID_GAURA;
aura.node = node;
aura.laura = laura;
aura.valid_magic = CVMX_FPA3_VALID_MAGIC;
return aura;
}
static inline cvmx_fpa3_pool_t __cvmx_fpa3_pool(int node, int lpool)
{
cvmx_fpa3_pool_t pool;
if (node < 0)
node = cvmx_get_node_num();
if (lpool < 0)
return CVMX_FPA3_INVALID_POOL;
pool.node = node;
pool.lpool = lpool;
pool.valid_magic = CVMX_FPA3_VALID_MAGIC;
return pool;
}
#undef CVMX_FPA3_VALID_MAGIC
/**
* Structure describing the data format used for stores to the FPA.
*/
typedef union {
u64 u64;
struct {
u64 scraddr : 8;
u64 len : 8;
u64 did : 8;
u64 addr : 40;
} s;
struct {
u64 scraddr : 8;
u64 len : 8;
u64 did : 8;
u64 node : 4;
u64 red : 1;
u64 reserved2 : 9;
u64 aura : 10;
u64 reserved3 : 16;
} cn78xx;
} cvmx_fpa3_iobdma_data_t;
/**
* Struct describing load allocate operation addresses for FPA pool.
*/
union cvmx_fpa3_load_data {
u64 u64;
struct {
u64 seg : 2;
u64 reserved1 : 13;
u64 io : 1;
u64 did : 8;
u64 node : 4;
u64 red : 1;
u64 reserved2 : 9;
u64 aura : 10;
u64 reserved3 : 16;
};
};
typedef union cvmx_fpa3_load_data cvmx_fpa3_load_data_t;
/**
* Struct describing store free operation addresses from FPA pool.
*/
union cvmx_fpa3_store_addr {
u64 u64;
struct {
u64 seg : 2;
u64 reserved1 : 13;
u64 io : 1;
u64 did : 8;
u64 node : 4;
u64 reserved2 : 10;
u64 aura : 10;
u64 fabs : 1;
u64 reserved3 : 3;
u64 dwb_count : 9;
u64 reserved4 : 3;
};
};
typedef union cvmx_fpa3_store_addr cvmx_fpa3_store_addr_t;
enum cvmx_fpa3_pool_alignment_e {
FPA_NATURAL_ALIGNMENT,
FPA_OFFSET_ALIGNMENT,
FPA_OPAQUE_ALIGNMENT
};
#define CVMX_FPA3_AURAX_LIMIT_MAX ((1ull << 40) - 1)
/**
* @INTERNAL
* Accessor functions to return number of POOLS in an FPA3
* depending on SoC model.
* The number is per-node for models supporting multi-node configurations.
*/
static inline int cvmx_fpa3_num_pools(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 64;
if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
return 32;
if (OCTEON_IS_MODEL(OCTEON_CN73XX))
return 32;
printf("ERROR: %s: Unknowm model\n", __func__);
return -1;
}
/**
* @INTERNAL
* Accessor functions to return number of AURAS in an FPA3
* depending on SoC model.
* The number is per-node for models supporting multi-node configurations.
*/
static inline int cvmx_fpa3_num_auras(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 1024;
if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
return 512;
if (OCTEON_IS_MODEL(OCTEON_CN73XX))
return 512;
printf("ERROR: %s: Unknowm model\n", __func__);
return -1;
}
/**
* Get the FPA3 POOL underneath FPA3 AURA, containing all its buffers
*
*/
static inline cvmx_fpa3_pool_t cvmx_fpa3_aura_to_pool(cvmx_fpa3_gaura_t aura)
{
cvmx_fpa3_pool_t pool;
cvmx_fpa_aurax_pool_t aurax_pool;
aurax_pool.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura));
pool = __cvmx_fpa3_pool(aura.node, aurax_pool.s.pool);
return pool;
}
/**
* Get a new block from the FPA pool
*
* @param aura - aura number
* @return pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa3_alloc(cvmx_fpa3_gaura_t aura)
{
u64 address;
cvmx_fpa3_load_data_t load_addr;
load_addr.u64 = 0;
load_addr.seg = CVMX_MIPS_SPACE_XKPHYS;
load_addr.io = 1;
load_addr.did = 0x29; /* Device ID. Indicates FPA. */
load_addr.node = aura.node;
load_addr.red = 0; /* Perform RED on allocation.
* FIXME to use config option
*/
load_addr.aura = aura.laura;
address = cvmx_read64_uint64(load_addr.u64);
if (!address)
return NULL;
return cvmx_phys_to_ptr(address);
}
/**
* Asynchronously get a new block from the FPA
*
* The result of cvmx_fpa_async_alloc() may be retrieved using
* cvmx_fpa_async_alloc_finish().
*
* @param scr_addr Local scratch address to put response in. This is a byte
* address but must be 8 byte aligned.
* @param aura Global aura to get the block from
*/
static inline void cvmx_fpa3_async_alloc(u64 scr_addr, cvmx_fpa3_gaura_t aura)
{
cvmx_fpa3_iobdma_data_t data;
/* Hardware only uses 64 bit aligned locations, so convert from byte
* address to 64-bit index
*/
data.u64 = 0ull;
data.cn78xx.scraddr = scr_addr >> 3;
data.cn78xx.len = 1;
data.cn78xx.did = 0x29;
data.cn78xx.node = aura.node;
data.cn78xx.aura = aura.laura;
cvmx_scratch_write64(scr_addr, 0ull);
CVMX_SYNCW;
cvmx_send_single(data.u64);
}
/**
* Retrieve the result of cvmx_fpa3_async_alloc
*
* @param scr_addr The Local scratch address. Must be the same value
* passed to cvmx_fpa_async_alloc().
*
* @param aura Global aura the block came from. Must be the same value
* passed to cvmx_fpa_async_alloc.
*
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa3_async_alloc_finish(u64 scr_addr, cvmx_fpa3_gaura_t aura)
{
u64 address;
CVMX_SYNCIOBDMA;
address = cvmx_scratch_read64(scr_addr);
if (cvmx_likely(address))
return cvmx_phys_to_ptr(address);
else
/* Try regular alloc if async failed */
return cvmx_fpa3_alloc(aura);
}
/**
* Free a pointer back to the pool.
*
* @param aura global aura number
* @param ptr physical address of block to free.
* @param num_cache_lines Cache lines to invalidate
*/
static inline void cvmx_fpa3_free(void *ptr, cvmx_fpa3_gaura_t aura, unsigned int num_cache_lines)
{
cvmx_fpa3_store_addr_t newptr;
cvmx_addr_t newdata;
newdata.u64 = cvmx_ptr_to_phys(ptr);
/* Make sure that any previous writes to memory go out before we free
this buffer. This also serves as a barrier to prevent GCC from
reordering operations to after the free. */
CVMX_SYNCWS;
newptr.u64 = 0;
newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
newptr.io = 1;
newptr.did = 0x29; /* Device id, indicates FPA */
newptr.node = aura.node;
newptr.aura = aura.laura;
newptr.fabs = 0; /* Free absolute. FIXME to use config option */
newptr.dwb_count = num_cache_lines;
cvmx_write_io(newptr.u64, newdata.u64);
}
/**
* Free a pointer back to the pool without flushing the write buffer.
*
* @param aura global aura number
* @param ptr physical address of block to free.
* @param num_cache_lines Cache lines to invalidate
*/
static inline void cvmx_fpa3_free_nosync(void *ptr, cvmx_fpa3_gaura_t aura,
unsigned int num_cache_lines)
{
cvmx_fpa3_store_addr_t newptr;
cvmx_addr_t newdata;
newdata.u64 = cvmx_ptr_to_phys(ptr);
/* Prevent GCC from reordering writes to (*ptr) */
asm volatile("" : : : "memory");
newptr.u64 = 0;
newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
newptr.io = 1;
newptr.did = 0x29; /* Device id, indicates FPA */
newptr.node = aura.node;
newptr.aura = aura.laura;
newptr.fabs = 0; /* Free absolute. FIXME to use config option */
newptr.dwb_count = num_cache_lines;
cvmx_write_io(newptr.u64, newdata.u64);
}
static inline int cvmx_fpa3_pool_is_enabled(cvmx_fpa3_pool_t pool)
{
cvmx_fpa_poolx_cfg_t pool_cfg;
if (!__cvmx_fpa3_pool_valid(pool))
return -1;
pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
return pool_cfg.cn78xx.ena;
}
static inline int cvmx_fpa3_config_red_params(unsigned int node, int qos_avg_en, int red_lvl_dly,
int avg_dly)
{
cvmx_fpa_gen_cfg_t fpa_cfg;
cvmx_fpa_red_delay_t red_delay;
fpa_cfg.u64 = cvmx_read_csr_node(node, CVMX_FPA_GEN_CFG);
fpa_cfg.s.avg_en = qos_avg_en;
fpa_cfg.s.lvl_dly = red_lvl_dly;
cvmx_write_csr_node(node, CVMX_FPA_GEN_CFG, fpa_cfg.u64);
red_delay.u64 = cvmx_read_csr_node(node, CVMX_FPA_RED_DELAY);
red_delay.s.avg_dly = avg_dly;
cvmx_write_csr_node(node, CVMX_FPA_RED_DELAY, red_delay.u64);
return 0;
}
/**
* Gets the buffer size of the specified pool,
*
* @param aura Global aura number
* @return Returns size of the buffers in the specified pool.
*/
static inline int cvmx_fpa3_get_aura_buf_size(cvmx_fpa3_gaura_t aura)
{
cvmx_fpa3_pool_t pool;
cvmx_fpa_poolx_cfg_t pool_cfg;
int block_size;
pool = cvmx_fpa3_aura_to_pool(aura);
pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
block_size = pool_cfg.cn78xx.buf_size << 7;
return block_size;
}
/**
* Return the number of available buffers in an AURA
*
* @param aura to receive count for
* @return available buffer count
*/
static inline long long cvmx_fpa3_get_available(cvmx_fpa3_gaura_t aura)
{
cvmx_fpa3_pool_t pool;
cvmx_fpa_poolx_available_t avail_reg;
cvmx_fpa_aurax_cnt_t cnt_reg;
cvmx_fpa_aurax_cnt_limit_t limit_reg;
long long ret;
pool = cvmx_fpa3_aura_to_pool(aura);
/* Get POOL available buffer count */
avail_reg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
/* Get AURA current available count */
cnt_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura));
limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
if (limit_reg.cn78xx.limit < cnt_reg.cn78xx.cnt)
return 0;
/* Calculate AURA-based buffer allowance */
ret = limit_reg.cn78xx.limit - cnt_reg.cn78xx.cnt;
/* Use POOL real buffer availability when less then allowance */
if (ret > (long long)avail_reg.cn78xx.count)
ret = avail_reg.cn78xx.count;
return ret;
}
/**
* Configure the QoS parameters of an FPA3 AURA
*
* @param aura is the FPA3 AURA handle
* @param ena_bp enables backpressure when outstanding count exceeds 'bp_thresh'
* @param ena_red enables random early discard when outstanding count exceeds 'pass_thresh'
* @param pass_thresh is the maximum count to invoke flow control
* @param drop_thresh is the count threshold to begin dropping packets
* @param bp_thresh is the back-pressure threshold
*
*/
static inline void cvmx_fpa3_setup_aura_qos(cvmx_fpa3_gaura_t aura, bool ena_red, u64 pass_thresh,
u64 drop_thresh, bool ena_bp, u64 bp_thresh)
{
unsigned int shift = 0;
u64 shift_thresh;
cvmx_fpa_aurax_cnt_limit_t limit_reg;
cvmx_fpa_aurax_cnt_levels_t aura_level;
if (!__cvmx_fpa3_aura_valid(aura))
return;
/* Get AURAX count limit for validation */
limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
if (pass_thresh < 256)
pass_thresh = 255;
if (drop_thresh <= pass_thresh || drop_thresh > limit_reg.cn78xx.limit)
drop_thresh = limit_reg.cn78xx.limit;
if (bp_thresh < 256 || bp_thresh > limit_reg.cn78xx.limit)
bp_thresh = limit_reg.cn78xx.limit >> 1;
shift_thresh = (bp_thresh > drop_thresh) ? bp_thresh : drop_thresh;
/* Calculate shift so that the largest threshold fits in 8 bits */
for (shift = 0; shift < (1 << 6); shift++) {
if (0 == ((shift_thresh >> shift) & ~0xffull))
break;
};
aura_level.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura));
aura_level.s.pass = pass_thresh >> shift;
aura_level.s.drop = drop_thresh >> shift;
aura_level.s.bp = bp_thresh >> shift;
aura_level.s.shift = shift;
aura_level.s.red_ena = ena_red;
aura_level.s.bp_ena = ena_bp;
cvmx_write_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), aura_level.u64);
}
cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num);
int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura);
cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num);
int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool);
int cvmx_fpa3_is_aura_available(int node, int aura_num);
int cvmx_fpa3_is_pool_available(int node, int pool_num);
cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool, const char *name,
unsigned int block_size, unsigned int num_blocks,
void *buffer);
/**
* Function to attach an aura to an existing pool
*
* @param node - configure fpa on this node
* @param pool - configured pool to attach aura to
* @param desired_aura - pointer to aura to use, set to -1 to allocate
* @param name - name to register
* @param block_size - size of buffers to use
* @param num_blocks - number of blocks to allocate
*
* @return configured gaura on success, CVMX_FPA3_INVALID_GAURA on failure
*/
cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool, int desired_aura,
const char *name, unsigned int block_size,
unsigned int num_blocks);
/**
* Function to setup and initialize a pool.
*
* @param node - configure fpa on this node
* @param desired_aura - aura to use, -1 for dynamic allocation
* @param name - name to register
* @param block_size - size of buffers in pool
* @param num_blocks - max number of buffers allowed
*/
cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura, const char *name,
void *buffer, unsigned int block_size,
unsigned int num_blocks);
int cvmx_fpa3_shutdown_aura_and_pool(cvmx_fpa3_gaura_t aura);
int cvmx_fpa3_shutdown_aura(cvmx_fpa3_gaura_t aura);
int cvmx_fpa3_shutdown_pool(cvmx_fpa3_pool_t pool);
const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);
int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
/* FIXME: Need a different macro for stage2 of u-boot */
static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
int buffer_sz, int buf_cnt)
{
cvmx_fpa_poolx_cfg_t pool_cfg;
/* Configure pool stack */
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
/* Configure pool with buffer size */
pool_cfg.u64 = 0;
pool_cfg.cn78xx.nat_align = 1;
pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
pool_cfg.cn78xx.l_type = 0x2;
pool_cfg.cn78xx.ena = 0;
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
/* Reset pool before starting */
pool_cfg.cn78xx.ena = 1;
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
}
static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
{
cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
}
#endif /* __CVMX_FPA3_H__ */

View File

@ -0,0 +1,213 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef _CVMX_GLOBAL_RESOURCES_T_
#define _CVMX_GLOBAL_RESOURCES_T_
#define CVMX_GLOBAL_RESOURCES_DATA_NAME "cvmx-global-resources"
/*In macros below abbreviation GR stands for global resources. */
#define CVMX_GR_TAG_INVALID \
cvmx_get_gr_tag('i', 'n', 'v', 'a', 'l', 'i', 'd', '.', '.', '.', '.', '.', '.', '.', '.', \
'.')
/*Tag for pko que table range. */
#define CVMX_GR_TAG_PKO_QUEUES \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'q', 'u', 'e', 'u', 's', '.', '.', \
'.')
/*Tag for a pko internal ports range */
#define CVMX_GR_TAG_PKO_IPORTS \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'k', 'o', '_', 'i', 'p', 'o', 'r', 't', '.', '.', \
'.')
#define CVMX_GR_TAG_FPA \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'p', 'a', '.', '.', '.', '.', '.', '.', '.', '.', \
'.')
#define CVMX_GR_TAG_FAU \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'f', 'a', 'u', '.', '.', '.', '.', '.', '.', '.', '.', \
'.')
#define CVMX_GR_TAG_SSO_GRP(n) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 's', 's', 'o', '_', '0', (n) + '0', '.', '.', '.', \
'.', '.', '.');
#define CVMX_GR_TAG_TIM(n) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 't', 'i', 'm', '_', (n) + '0', '.', '.', '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_CLUSTERS(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'u', 's', 't', 'e', 'r', '_', (x + '0'), \
'.', '.', '.')
#define CVMX_GR_TAG_CLUSTER_GRP(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'l', 'g', 'r', 'p', '_', (x + '0'), '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_STYLE(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 's', 't', 'y', 'l', 'e', '_', (x + '0'), '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_QPG_ENTRY(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'q', 'p', 'g', 'e', 't', '_', (x + '0'), '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_BPID(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'b', 'p', 'i', 'd', 's', '_', (x + '0'), '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_MTAG_IDX(x) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'm', 't', 'a', 'g', 'x', '_', (x + '0'), '.', '.', \
'.', '.', '.')
#define CVMX_GR_TAG_PCAM(x, y, z) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'p', 'c', 'a', 'm', '_', (x + '0'), (y + '0'), \
(z + '0'), '.', '.', '.', '.')
#define CVMX_GR_TAG_CIU3_IDT(_n) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 'i', 'd', \
't', '.', '.')
/* Allocation of the 512 SW INTSTs (in the 12 bit SW INTSN space) */
#define CVMX_GR_TAG_CIU3_SWINTSN(_n) \
cvmx_get_gr_tag('c', 'v', 'm', '_', 'c', 'i', 'u', '3', '_', ((_n) + '0'), '_', 's', 'w', \
'i', 's', 'n')
#define TAG_INIT_PART(A, B, C, D, E, F, G, H) \
((((u64)(A) & 0xff) << 56) | (((u64)(B) & 0xff) << 48) | (((u64)(C) & 0xff) << 40) | \
(((u64)(D) & 0xff) << 32) | (((u64)(E) & 0xff) << 24) | (((u64)(F) & 0xff) << 16) | \
(((u64)(G) & 0xff) << 8) | (((u64)(H) & 0xff)))
struct global_resource_tag {
u64 lo;
u64 hi;
};
enum cvmx_resource_err { CVMX_RESOURCE_ALLOC_FAILED = -1, CVMX_RESOURCE_ALREADY_RESERVED = -2 };
/*
* @INTERNAL
* Creates a tag from the specified characters.
*/
static inline struct global_resource_tag cvmx_get_gr_tag(char a, char b, char c, char d, char e,
char f, char g, char h, char i, char j,
char k, char l, char m, char n, char o,
char p)
{
struct global_resource_tag tag;
tag.lo = TAG_INIT_PART(a, b, c, d, e, f, g, h);
tag.hi = TAG_INIT_PART(i, j, k, l, m, n, o, p);
return tag;
}
static inline int cvmx_gr_same_tag(struct global_resource_tag gr1, struct global_resource_tag gr2)
{
return (gr1.hi == gr2.hi) && (gr1.lo == gr2.lo);
}
/*
* @INTERNAL
* Creates a global resource range that can hold the specified number of
* elements
* @param tag is the tag of the range. The taga is created using the method
* cvmx_get_gr_tag()
* @param nelements is the number of elements to be held in the resource range.
*/
int cvmx_create_global_resource_range(struct global_resource_tag tag, int nelements);
/*
* @INTERNAL
* Allocate nelements in the global resource range with the specified tag. It
* is assumed that prior
* to calling this the global resource range has already been created using
* cvmx_create_global_resource_range().
* @param tag is the tag of the global resource range.
* @param nelements is the number of elements to be allocated.
* @param owner is a 64 bit number that identifes the owner of this range.
* @aligment specifes the required alignment of the returned base number.
* @return returns the base of the allocated range. -1 return value indicates
* failure.
*/
int cvmx_allocate_global_resource_range(struct global_resource_tag tag, u64 owner, int nelements,
int alignment);
/*
* @INTERNAL
* Allocate nelements in the global resource range with the specified tag.
* The elements allocated need not be contiguous. It is assumed that prior to
* calling this the global resource range has already
* been created using cvmx_create_global_resource_range().
* @param tag is the tag of the global resource range.
* @param nelements is the number of elements to be allocated.
* @param owner is a 64 bit number that identifes the owner of the allocated
* elements.
* @param allocated_elements returns indexs of the allocated entries.
* @return returns 0 on success and -1 on failure.
*/
int cvmx_resource_alloc_many(struct global_resource_tag tag, u64 owner, int nelements,
int allocated_elements[]);
int cvmx_resource_alloc_reverse(struct global_resource_tag, u64 owner);
/*
* @INTERNAL
* Reserve nelements starting from base in the global resource range with the
* specified tag.
* It is assumed that prior to calling this the global resource range has
* already been created using cvmx_create_global_resource_range().
* @param tag is the tag of the global resource range.
* @param nelements is the number of elements to be allocated.
* @param owner is a 64 bit number that identifes the owner of this range.
* @base specifies the base start of nelements.
* @return returns the base of the allocated range. -1 return value indicates
* failure.
*/
int cvmx_reserve_global_resource_range(struct global_resource_tag tag, u64 owner, int base,
int nelements);
/*
* @INTERNAL
* Free nelements starting at base in the global resource range with the
* specified tag.
* @param tag is the tag of the global resource range.
* @param base is the base number
* @param nelements is the number of elements that are to be freed.
* @return returns 0 if successful and -1 on failure.
*/
int cvmx_free_global_resource_range_with_base(struct global_resource_tag tag, int base,
int nelements);
/*
* @INTERNAL
* Free nelements with the bases specified in bases[] with the
* specified tag.
* @param tag is the tag of the global resource range.
* @param bases is an array containing the bases to be freed.
* @param nelements is the number of elements that are to be freed.
* @return returns 0 if successful and -1 on failure.
*/
int cvmx_free_global_resource_range_multiple(struct global_resource_tag tag, int bases[],
int nelements);
/*
* @INTERNAL
* Free elements from the specified owner in the global resource range with the
* specified tag.
* @param tag is the tag of the global resource range.
* @param owner is the owner of resources that are to be freed.
* @return returns 0 if successful and -1 on failure.
*/
int cvmx_free_global_resource_range_with_owner(struct global_resource_tag tag, int owner);
/*
* @INTERNAL
* Frees all the global resources that have been created.
* For use only from the bootloader, when it shutdown and boots up the
* application or kernel.
*/
int free_global_resources(void);
u64 cvmx_get_global_resource_owner(struct global_resource_tag tag, int base);
/*
* @INTERNAL
* Shows the global resource range with the specified tag. Use mainly for debug.
*/
void cvmx_show_global_resource_range(struct global_resource_tag tag);
/*
* @INTERNAL
* Shows all the global resources. Used mainly for debug.
*/
void cvmx_global_resources_show(void);
u64 cvmx_allocate_app_id(void);
u64 cvmx_get_app_id(void);
#endif

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the GMX hardware.
*/
#ifndef __CVMX_GMX_H__
#define __CVMX_GMX_H__
/* CSR typedefs have been moved to cvmx-gmx-defs.h */
int cvmx_gmx_set_backpressure_override(u32 interface, u32 port_mask);
int cvmx_agl_set_backpressure_override(u32 interface, u32 port_mask);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for AGL (RGMII) initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_AGL_H__
#define __CVMX_HELPER_AGL_H__
int __cvmx_helper_agl_enumerate(int interface);
int cvmx_helper_agl_get_port(int xiface);
/**
* @INTERNAL
* Probe a RGMII interface and determine the number of ports
* connected to it. The RGMII interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_agl_probe(int interface);
/**
* @INTERNAL
* Bringup and enable a RGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_agl_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_agl_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_agl_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
#endif /* __CVMX_HELPER_AGL_H__ */

View File

@ -0,0 +1,335 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions to configure the BGX MAC.
*/
#ifndef __CVMX_HELPER_BGX_H__
#define __CVMX_HELPER_BGX_H__
#define CVMX_BGX_RX_FIFO_SIZE (64 * 1024)
#define CVMX_BGX_TX_FIFO_SIZE (32 * 1024)
int __cvmx_helper_bgx_enumerate(int xiface);
/**
* @INTERNAL
* Disable the BGX port
*
* @param xipd_port IPD port of the BGX interface to disable
*/
void cvmx_helper_bgx_disable(int xipd_port);
/**
* @INTERNAL
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII/XAUI interface should still be down after
* this call. This is used by interfaces using the bgx mac.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_bgx_probe(int xiface);
/**
* @INTERNAL
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled. This is used by interfaces using the
* bgx mac.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_bgx_sgmii_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set(). This is used by
* interfaces using the bgx mac.
*
* @param xipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_bgx_sgmii_link_get(int xipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead. This is used by interfaces
* using the bgx mac.
*
* @param xipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_bgx_sgmii_link_set(int xipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again. This is used by
* interfaces using the bgx mac.
*
* @param xipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_bgx_sgmii_configure_loopback(int xipd_port, int enable_internal,
int enable_external);
/**
* @INTERNAL
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled. This is used by interfaces using the
* bgx mac.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_bgx_xaui_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set(). This is used by
* interfaces using the bgx mac.
*
* @param xipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_bgx_xaui_link_get(int xipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead. This is used by interfaces
* using the bgx mac.
*
* @param xipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_bgx_xaui_link_set(int xipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again. This is used by
* interfaces using the bgx mac.
*
* @param xipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_bgx_xaui_configure_loopback(int xipd_port, int enable_internal,
int enable_external);
int __cvmx_helper_bgx_mixed_enable(int xiface);
cvmx_helper_link_info_t __cvmx_helper_bgx_mixed_link_get(int xipd_port);
int __cvmx_helper_bgx_mixed_link_set(int xipd_port, cvmx_helper_link_info_t link_info);
int __cvmx_helper_bgx_mixed_configure_loopback(int xipd_port, int enable_internal,
int enable_external);
cvmx_helper_interface_mode_t cvmx_helper_bgx_get_mode(int xiface, int index);
/**
* @INTERNAL
* Configure Priority-Based Flow Control (a.k.a. PFC/CBFC)
* on a specific BGX interface/port.
*/
void __cvmx_helper_bgx_xaui_config_pfc(unsigned int node, unsigned int interface, unsigned int port,
bool pfc_enable);
/**
* This function control how the hardware handles incoming PAUSE
* packets. The most common modes of operation:
* ctl_bck = 1, ctl_drp = 1: hardware handles everything
* ctl_bck = 0, ctl_drp = 0: software sees all PAUSE frames
* ctl_bck = 0, ctl_drp = 1: all PAUSE frames are completely ignored
* @param node node number.
* @param interface interface number
* @param port port number
* @param ctl_bck 1: Forward PAUSE information to TX block
* @param ctl_drp 1: Drop control PAUSE frames.
*/
void cvmx_helper_bgx_rx_pause_ctl(unsigned int node, unsigned int interface, unsigned int port,
unsigned int ctl_bck, unsigned int ctl_drp);
/**
* This function configures the receive action taken for multicast, broadcast
* and dmac filter match packets.
* @param node node number.
* @param interface interface number
* @param port port number
* @param cam_accept 0: reject packets on dmac filter match
* 1: accept packet on dmac filter match
* @param mcast_mode 0x0 = Force reject all multicast packets
* 0x1 = Force accept all multicast packets
* 0x2 = Use the address filter CAM
* @param bcast_accept 0 = Reject all broadcast packets
* 1 = Accept all broadcast packets
*/
void cvmx_helper_bgx_rx_adr_ctl(unsigned int node, unsigned int interface, unsigned int port,
unsigned int cam_accept, unsigned int mcast_mode,
unsigned int bcast_accept);
/**
* Function to control the generation of FCS, padding by the BGX
*
*/
void cvmx_helper_bgx_tx_options(unsigned int node, unsigned int interface, unsigned int index,
bool fcs_enable, bool pad_enable);
/**
* Set mac for the ipd_port
*
* @param xipd_port ipd_port to set the mac
* @param bcst If set, accept all broadcast packets
* @param mcst Multicast mode
* 0 = Force reject all multicast packets
* 1 = Force accept all multicast packets
* 2 = use the address filter CAM.
* @param mac mac address for the ipd_port
*/
void cvmx_helper_bgx_set_mac(int xipd_port, int bcst, int mcst, u64 mac);
int __cvmx_helper_bgx_port_init(int xipd_port, int phy_pres);
void cvmx_helper_bgx_set_jabber(int xiface, unsigned int index, unsigned int size);
int cvmx_helper_bgx_shutdown_port(int xiface, int index);
int cvmx_bgx_set_backpressure_override(int xiface, unsigned int port_mask);
int __cvmx_helper_bgx_fifo_size(int xiface, unsigned int lmac);
/**
* Returns if an interface is RGMII or not
*
* @param xiface xinterface to check
* @param index port index (must be 0 for rgmii)
*
* @return true if RGMII, false otherwise
*/
static inline bool cvmx_helper_bgx_is_rgmii(int xiface, int index)
{
union cvmx_bgxx_cmrx_config cmr_config;
if (!OCTEON_IS_MODEL(OCTEON_CN73XX) || index != 0)
return false;
cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, xiface));
return cmr_config.s.lmac_type == 5;
}
/**
* Probes the BGX Super Path (SMU/SPU) mode
*
* @param xiface global interface number
* @param index interface index
*
* @return true, if Super-MAC/PCS mode, false -- otherwise
*/
bool cvmx_helper_bgx_is_smu(int xiface, int index);
/**
* @INTERNAL
* Configure parameters of PAUSE packet.
*
* @param xipd_port Global IPD port (node + IPD port).
* @param smac Source MAC address.
* @param dmac Destination MAC address.
* @param type PAUSE packet type.
* @param time Pause time for PAUSE packets (number of 512 bit-times).
* @param interval Interval between PAUSE packets (number of 512 bit-times).
* @return Zero on success, negative on failure.
*/
int cvmx_bgx_set_pause_pkt_param(int xipd_port, u64 smac, u64 dmac, unsigned int type,
unsigned int time, unsigned int interval);
/**
* @INTERNAL
* Setup the BGX flow-control mode.
*
* @param xipd_port Global IPD port (node + IPD port).
* @param type Flow-control type/protocol.
* @param mode Flow-control mode.
* @return Zero on success, negative on failure.
*/
int cvmx_bgx_set_flowctl_mode(int xipd_port, cvmx_qos_proto_t qos, cvmx_qos_pkt_mode_t mode);
/**
* Enables or disables autonegotiation for an interface.
*
* @param xiface interface to set autonegotiation
* @param index port index
* @param enable true to enable autonegotiation, false to disable it
*
* @return 0 for success, -1 on error.
*/
int cvmx_helper_set_autonegotiation(int xiface, int index, bool enable);
/**
* Enables or disables forward error correction
*
* @param xiface interface
* @param index port index
* @param enable set to true to enable FEC, false to disable
*
* @return 0 for success, -1 on error
*
* @NOTE: If autonegotiation is enabled then autonegotiation will be
* restarted for negotiating FEC.
*/
int cvmx_helper_set_fec(int xiface, int index, bool enable);
#ifdef CVMX_DUMP_BGX
/**
* Dump BGX configuration for node 0
*/
int cvmx_dump_bgx_config(unsigned int bgx);
/**
* Dump BGX status for node 0
*/
int cvmx_dump_bgx_status(unsigned int bgx);
/**
* Dump BGX configuration
*/
int cvmx_dump_bgx_config_node(unsigned int node, unsigned int bgx);
/**
* Dump BGX status
*/
int cvmx_dump_bgx_status_node(unsigned int node, unsigned int bgx);
#endif
#endif

View File

@ -0,0 +1,558 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions to abstract board specific data about
* network ports from the rest of the cvmx-helper files.
*/
#ifndef __CVMX_HELPER_BOARD_H__
#define __CVMX_HELPER_BOARD_H__
#define CVMX_VSC7224_NAME_LEN 16
typedef enum {
USB_CLOCK_TYPE_REF_12,
USB_CLOCK_TYPE_REF_24,
USB_CLOCK_TYPE_REF_48,
USB_CLOCK_TYPE_CRYSTAL_12,
} cvmx_helper_board_usb_clock_types_t;
typedef enum cvmx_phy_type {
BROADCOM_GENERIC_PHY,
MARVELL_GENERIC_PHY,
CORTINA_PHY, /** Now Inphi */
AQUANTIA_PHY,
GENERIC_8023_C22_PHY,
GENERIC_8023_C45_PHY,
INBAND_PHY,
QUALCOMM_S17, /** Qualcomm QCA833X switch */
VITESSE_VSC8490_PHY, /** Vitesse VSC8490 is non-standard for SGMII */
FAKE_PHY, /** Unsupported or no PHY, use GPIOs for LEDs */
} cvmx_phy_type_t;
/** Used to record the host mode used by the Cortina CS4321 PHY */
typedef enum {
CVMX_PHY_HOST_MODE_UNKNOWN,
CVMX_PHY_HOST_MODE_SGMII,
CVMX_PHY_HOST_MODE_QSGMII,
CVMX_PHY_HOST_MODE_XAUI,
CVMX_PHY_HOST_MODE_RXAUI,
} cvmx_phy_host_mode_t;
typedef enum {
set_phy_link_flags_autoneg = 0x1,
set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
set_phy_link_flags_flow_control_enable = 0x1 << 1,
set_phy_link_flags_flow_control_disable = 0x2 << 1,
set_phy_link_flags_flow_control_mask = 0x3 << 1,
} cvmx_helper_board_set_phy_link_flags_types_t;
/**
* The EBB6600 board uses a MDIO mux device to select between the two QLM
* modules since both QLM modules share the same PHY addresses. The
* MDIO mux is controlled via GPIO by a GPIO device that is also on
* the TWSI bus rather than native OCTEON GPIO libes.
*
* To further complicate matters, the TWSI GPIO device sits behind
* a TWSI mux device as well, making accessing the MDIO devices on
* this board a very complex operation involving writing to the TWSI mux,
* followed by the MDIO mux device.
*/
/** Maximum number of GPIO devices used to control the MDIO mux */
#define CVMX_PHY_MUX_MAX_GPIO 2
/** Type of MDIO mux device, currently OTHER isn't supported */
typedef enum {
SN74CBTLV3253, /** SN74CBTLV3253 I2C device */
OTHER /** Unknown/other */
} cvmx_phy_mux_type_t;
/** Type of GPIO line controlling MDIO mux */
typedef enum {
GPIO_OCTEON, /** Native OCTEON */
GPIO_PCA8574 /** TWSI mux device */
} cvmx_phy_gpio_type_t;
/* Forward declarations */
struct cvmx_fdt_sfp_info; /** Defined in cvmx-helper-fdt.h */
struct cvmx_vsc7224;
struct cvmx_fdt_gpio_info; /** Defined in cvmx-helper-fdt.h */
struct cvmx_fdt_i2c_bus_info; /** Defined in cvmx-helper-fdt.h */
struct cvmx_phy_info;
struct cvmx_fdt_i2c_bus_info;
struct cvmx_fdt_gpio_info;
struct cvmx_fdt_gpio_led;
/**
* @INTERNAL
* This data structure is used when the port LEDs are wired up to Octeon's GPIO
* lines instead of to a traditional PHY.
*/
struct cvmx_phy_gpio_leds {
struct cvmx_phy_gpio_leds *next; /** For when ports are grouped together */
u64 last_rx_count; /** Counters used to check for activity */
u64 last_tx_count; /** Counters used to check for activity */
u64 last_activity_poll_time; /** Last time activity was polled */
u64 last_link_poll_time; /** Last time link was polled */
int of_offset;
int link_poll_interval_ms; /** Link polling interval in ms */
int activity_poll_interval_ms; /** Activity polling interval in ms */
struct cvmx_fdt_gpio_led *link_status;
struct cvmx_fdt_gpio_led *error;
struct cvmx_fdt_gpio_led *rx_activity;
struct cvmx_fdt_gpio_led *tx_activity;
struct cvmx_fdt_gpio_led *identify;
struct cvmx_fdt_gpio_info *link_status_gpio;
struct cvmx_fdt_gpio_info *error_gpio;
/** Type of GPIO for error LED */
/** If GPIO expander, describe the bus to the expander */
struct cvmx_fdt_gpio_info *rx_activity_gpio;
struct cvmx_fdt_gpio_info *tx_activity_gpio;
u16 rx_activity_hz; /** RX activity blink time in hz */
u16 tx_activity_hz; /** TX activity blink time in hz */
/**
* Set if activity and/or link is using an Inphi/Cortina CS4343 or
* compatible phy that requires software assistance. NULL if not used.
*/
bool link_status_active_low; /** True if active link is active low */
bool error_status_active_low; /** True if error LED is active low */
bool error_active_low; /** True if error is active low */
bool rx_activity_active_low; /** True if rx activity is active low */
bool tx_activity_active_low; /** True if tx activity is active low */
/** Set true if LEDs are shared on an interface by all ports */
bool interface_leds;
int8_t rx_gpio_timer; /** GPIO clock generator timer [0-3] */
int8_t tx_gpio_timer; /** GPIO clock generator timer [0-3] */
/** True if LOS signal activates error LED */
bool los_generate_error;
/** True if the error LED is hooked up to a GPIO expander */
bool error_gpio_expander;
/** True if the link and RX activity LEDs are shared */
bool link_and_rx_activity_shared;
/** True if the link and TX activity LEDs are shared */
bool link_and_tx_activity_shared;
/** True if the RX activity and TX activity LEDs are shared */
bool rx_and_tx_activity_shared;
/** True if link is driven directly by the hardware */
bool link_led_hw_link;
bool error_lit; /** True if ERROR LED is lit */
bool quad_sfp_mode; /** True if using four SFPs for XLAUI */
/** User-defined function to update the link LED */
void (*update_link_led)(int xiface, int index, cvmx_helper_link_info_t result);
/** User-defined function to update the rx activity LED */
void (*update_rx_activity_led)(struct cvmx_phy_gpio_leds *led, int xiface, int index,
bool check_time);
};
/** This structure contains the tap values to use for various cable lengths */
struct cvmx_vsc7224_tap {
u16 len; /** Starting cable length for tap values */
u16 main_tap; /** Main tap value to use */
u16 pre_tap; /** Pre-tap value to use */
u16 post_tap; /** Post-tap value to use */
};
/** Data structure for Microsemi VSC7224 channel */
struct cvmx_vsc7224_chan {
struct cvmx_vsc7224_chan *next, *prev; /** Used for linking */
int ipd_port; /** IPD port this channel belongs to */
int xiface; /** xinterface of SFP */
int index; /** Port index of SFP */
int lane; /** Lane on port */
int of_offset; /** Offset of channel info in dt */
bool is_tx; /** True if is transmit channel */
bool maintap_disable; /** True to disable the main tap */
bool pretap_disable; /** True to disable pre-tap */
bool posttap_disable; /** True to disable post-tap */
int num_taps; /** Number of tap values */
/** (Q)SFP attached to this channel */
struct cvmx_fdt_sfp_info *sfp_info;
struct cvmx_vsc7224 *vsc7224; /** Pointer to parent */
/** Tap values for various lengths, must be at the end */
struct cvmx_vsc7224_tap taps[0];
};
/** Data structure for Microsemi VSC7224 reclocking chip */
struct cvmx_vsc7224 {
const char *name; /** Name */
/** Pointer to cannel data */
struct cvmx_vsc7224_chan *channel[4];
/** I2C bus device is connected to */
struct cvmx_fdt_i2c_bus_info *i2c_bus;
/** Address of VSC7224 on i2c bus */
int i2c_addr;
struct cvmx_fdt_gpio_info *los_gpio; /** LoS GPIO pin */
struct cvmx_fdt_gpio_info *reset_gpio; /** Reset GPIO pin */
int of_offset; /** Offset in device tree */
};
/** Data structure for Avago AVSP5410 gearbox phy */
struct cvmx_avsp5410 {
const char *name; /** Name */
/** I2C bus device is connected to */
struct cvmx_fdt_i2c_bus_info *i2c_bus;
/** Address of AVSP5410 on i2c bus */
int i2c_addr;
int of_offset; /** Offset in device tree */
int ipd_port; /** IPD port this phy belongs to */
int xiface; /** xinterface of SFP */
int index; /** Port index of SFP */
u64 prev_temp; /** Previous temparature recorded on Phy Core */
u64 prev_temp_mins; /** Mininutes when the prev temp check is done */
/** (Q)SFP attached to this phy */
struct cvmx_fdt_sfp_info *sfp_info;
};
struct cvmx_cs4343_info;
/**
* @INTERNAL
*
* Data structure containing Inphi CS4343 slice information
*/
struct cvmx_cs4343_slice_info {
const char *name; /** Name of this slice in device tree */
struct cvmx_cs4343_info *mphy; /** Pointer to mphy cs4343 */
struct cvmx_phy_info *phy_info;
int of_offset; /** Offset in device tree */
int slice_no; /** Slice number */
int reg_offset; /** Offset for this slice */
u16 sr_stx_cmode_res; /** See Rainier device tree */
u16 sr_stx_drv_lower_cm; /** See Rainier device tree */
u16 sr_stx_level; /** See Rainier device tree */
u16 sr_stx_pre_peak; /** See Rainier device tree */
u16 sr_stx_muxsubrate_sel; /** See Rainier device tree */
u16 sr_stx_post_peak; /** See Rainier device tree */
u16 cx_stx_cmode_res; /** See Rainier device tree */
u16 cx_stx_drv_lower_cm; /** See Rainier device tree */
u16 cx_stx_level; /** See Rainier device tree */
u16 cx_stx_pre_peak; /** See Rainier device tree */
u16 cx_stx_muxsubrate_sel; /** See Rainier device tree */
u16 cx_stx_post_peak; /** See Rainier device tree */
u16 basex_stx_cmode_res; /** See Rainier device tree */
u16 basex_stx_drv_lower_cm; /** See Rainier device tree */
u16 basex_stx_level; /** See Rainier device tree */
u16 basex_stx_pre_peak; /** See Rainier device tree */
u16 basex_stx_muxsubrate_sel; /** See Rainier device tree */
u16 basex_stx_post_peak; /** See Rainier device tree */
int link_gpio; /** Link LED gpio pin number, -1 if none */
int error_gpio; /** Error LED GPIO pin or -1 if none */
int los_gpio; /** LoS input GPIO or -1 if none */
bool los_inverted; /** True if LoS input is inverted */
bool link_inverted; /** True if link output is inverted */
bool error_inverted; /** True if error output is inverted */
};
/**
* @INTERNAL
*
* Data structure for Cortina/Inphi CS4343 device
*/
struct cvmx_cs4343_info {
const char *name; /** Name of Inphi/Cortina CS4343 in DT */
struct cvmx_phy_info *phy_info;
struct cvmx_cs4343_slice_info slice[4]; /** Slice information */
int of_offset;
};
/**
* @INTERNAL
* This data structure is used to hold PHY information and is subject to change.
* Please do not use this data structure directly.
*
* NOTE: The U-Boot OCTEON Ethernet drivers depends on this data structure for
* the mux support.
*/
typedef struct cvmx_phy_info {
int phy_addr; /** MDIO address of PHY */
int phy_sub_addr; /** Sub-address (i.e. slice), used by Cortina */
int ipd_port; /** IPD port number for the PHY */
/** MDIO bus PHY connected to (even if behind mux) */
int mdio_unit;
int direct_connect; /** 1 if PHY is directly connected */
int gpio[CVMX_PHY_MUX_MAX_GPIO]; /** GPIOs used to control mux, -1 if not used */
/** Type of GPIO. It can be a local OCTEON GPIO or a TWSI GPIO */
cvmx_phy_gpio_type_t gpio_type[CVMX_PHY_MUX_MAX_GPIO];
/** Address of TWSI GPIO */
int cvmx_gpio_twsi[CVMX_PHY_MUX_MAX_GPIO];
/** Value to put into the GPIO lines to select MDIO bus */
int gpio_value;
int gpio_parent_mux_twsi; /** -1 if not used, parent TWSI mux for ebb6600 */
int gpio_parent_mux_select; /** selector to use on parent TWSI mux */
cvmx_phy_type_t phy_type; /** Type of PHY */
cvmx_phy_mux_type_t mux_type; /** Type of MDIO mux */
int mux_twsi_addr; /** Address of the MDIO mux */
cvmx_phy_host_mode_t host_mode; /** Used by Cortina PHY */
void *phydev; /** Pointer to parent phy device */
int fdt_offset; /** Node in flat device tree */
int phy_i2c_bus; /** I2C bus for reclocking chips */
int phy_i2c_addr; /** I2C address of reclocking chip */
int num_vsc7224; /** Number of Microsemi VSC7224 devices */
struct cvmx_vsc7224 *vsc7224; /** Info for VSC7224 devices */
/** SFP/QSFP descriptor */
struct cvmx_fdt_sfp_info *sfp_info;
/** CS4343 slice information for SGMII/XFI. This is NULL in XLAUI mode */
struct cvmx_cs4343_slice_info *cs4343_slice_info;
/** CS4343 mphy information for XLAUI */
struct cvmx_cs4343_info *cs4343_info;
/** Pointer to function to return link information */
cvmx_helper_link_info_t (*link_function)(struct cvmx_phy_info *phy_info);
/**
* If there are LEDs driven by GPIO lines instead of by a PHY device
* then they are described here, otherwise gpio_leds should be NULL.
*/
struct cvmx_phy_gpio_leds *gpio_leds;
} cvmx_phy_info_t;
/* Fake IPD port, the RGMII/MII interface may use different PHY, use this
macro to return appropriate MIX address to read the PHY. */
#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
* busses the bus number is encoded in bits <15:8>.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param ipd_port Octeon IPD port to get the MII address for.
*
* @return MII PHY address and bus number or -1.
*/
int cvmx_helper_board_get_mii_address(int ipd_port);
/**
* This function as a board specific method of changing the PHY
* speed, duplex, and autonegotiation. This programs the PHY and
* not Octeon. This can be used to force Octeon's links to
* specific settings.
*
* @param phy_addr The address of the PHY to program
* @param link_flags
* Flags to control autonegotiation. Bit 0 is autonegotiation
* enable/disable to maintain backward compatibility.
* @param link_info Link speed to program. If the speed is zero and autonegotiation
* is enabled, all possible negotiation speeds are advertised.
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_board_link_set_phy(int phy_addr,
cvmx_helper_board_set_phy_link_flags_types_t link_flags,
cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* This function is the board specific method of determining an
* ethernet ports link speed. Most Octeon boards have Marvell PHYs
* and are handled by the fall through case. This function must be
* updated for boards that don't have the normal Marvell PHYs.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param ipd_port IPD input port associated with the port we want to get link
* status for.
*
* @return The ports link status. If the link isn't fully resolved, this must
* return zero.
*/
cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
/**
* @INTERNAL
* This function is called by cvmx_helper_interface_probe() after it
* determines the number of ports Octeon can support on a specific
* interface. This function is the per board location to override
* this value. It is called with the number of ports Octeon might
* support and should return the number of actual ports on the
* board.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param interface Interface to probe
* @param supported_ports
* Number of ports Octeon supports.
*
* @return Number of ports the actual board supports. Many times this will
* simple be "support_ports".
*/
int __cvmx_helper_board_interface_probe(int interface, int supported_ports);
/**
* @INTERNAL
* Enable packet input/output from the hardware. This function is
* called after by cvmx_helper_packet_hardware_enable() to
* perform board specific initialization. For most boards
* nothing is needed.
*
* @param interface Interface to enable
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_board_hardware_enable(int interface);
/**
* @INTERNAL
* Gets the clock type used for the USB block based on board type.
* Used by the USB code for auto configuration of clock type.
*
* @return USB clock type enumeration
*/
cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void);
/**
* @INTERNAL
* Adjusts the number of available USB ports on Octeon based on board
* specifics.
*
* @param supported_ports expected number of ports based on chip type;
*
*
* @return number of available usb ports, based on board specifics.
* Return value is supported_ports if function does not
* override.
*/
int __cvmx_helper_board_usb_get_num_ports(int supported_ports);
/**
* @INTERNAL
* Returns if a port is present on an interface
*
* @param fdt_addr - address fo flat device tree
* @param ipd_port - IPD port number
*
* @return 1 if port is present, 0 if not present, -1 if error
*/
int __cvmx_helper_board_get_port_from_dt(void *fdt_addr, int ipd_port);
/**
* Return the host mode for the PHY. Currently only the Cortina CS4321 PHY
* needs this.
*
* @param ipd_port - ipd port number to get the host mode for
*
* @return host mode for phy
*/
cvmx_phy_host_mode_t cvmx_helper_board_get_phy_host_mode(int ipd_port);
/**
* @INTERNAL
* This function outputs the cvmx_phy_info_t data structure for the specified
* port.
*
* @param[out] - phy_info - phy info data structure
* @param ipd_port - port to get phy info for
*
* @return 0 for success, -1 if info not available
*
* NOTE: The phy_info data structure is subject to change.
*/
int cvmx_helper_board_get_phy_info(cvmx_phy_info_t *phy_info, int ipd_port);
/**
* @INTERNAL
* Parse the device tree and set whether a port is valid or not.
*
* @param fdt_addr Pointer to device tree
*
* @return 0 for success, -1 on error.
*/
int __cvmx_helper_parse_bgx_dt(const void *fdt_addr);
/**
* @INTERNAL
* Parse the device tree and set whether a port is valid or not.
*
* @param fdt_addr Pointer to device tree
*
* @return 0 for success, -1 on error.
*/
int __cvmx_helper_parse_bgx_rgmii_dt(const void *fdt_addr);
/**
* @INTERNAL
* Updates any GPIO link LEDs if present
*
* @param xiface Interface number
* @param index Port index
* @param result Link status result
*/
void cvmx_helper_update_link_led(int xiface, int index, cvmx_helper_link_info_t result);
/**
* Update the RX activity LED for the specified interface and port index
*
* @param xiface Interface number
* @param index Port index
* @parma check_time True if we should bail out before the polling interval
*/
void cvmx_update_rx_activity_led(int xiface, int index, bool check_time);
/**
* @INTERNAL
* Figure out which mod_abs changed function to use based on the phy type
*
* @param xiface xinterface number
* @param index port index on interface
*
* @return 0 for success, -1 on error
*
* This function figures out the proper mod_abs_changed function to use and
* registers the appropriate function. This should be called after the device
* tree has been fully parsed for the given port as well as after all SFP
* slots and any Microsemi VSC7224 devices have been parsed in the device tree.
*/
int cvmx_helper_phy_register_mod_abs_changed(int xiface, int index);
/**
* @INTERNAL
* Return loss of signal
*
* @param xiface xinterface number
* @param index port index on interface
*
* @return 0 if signal present, 1 if loss of signal.
*
* @NOTE: A result of 0 is possible in some cases where the signal is
* not present.
*
* This is for use with __cvmx_qlm_rx_equilization
*/
int __cvmx_helper_get_los(int xiface, int index);
/**
* Given the address of the MDIO registers, output the CPU node and MDIO bus
*
* @param addr 64-bit address of MDIO registers (from device tree)
* @param[out] node CPU node number (78xx)
* @param[out] bus MDIO bus number
*/
void __cvmx_mdio_addr_to_node_bus(u64 addr, int *node, int *bus);
/**
* Turn on the error LED
*
* @param leds LEDs belonging to port
* @param error true to turn on LED, false to turn off
*/
void cvmx_helper_leds_show_error(struct cvmx_phy_gpio_leds *leds, bool error);
#endif /* __CVMX_HELPER_BOARD_H__ */

View File

@ -0,0 +1,884 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper Functions for the Configuration Framework
*
* OCTEON_CN68XX introduces a flexible hw interface configuration
* scheme. To cope with this change and the requirements of
* configurability for other system resources, e.g., IPD/PIP pknd and
* PKO ports and queues, a configuration framework for the SDK is
* designed. It has two goals: first to recognize and establish the
* default configuration and, second, to allow the user to define key
* parameters in a high-level language.
*
* The helper functions query the QLM setup to help achieving the
* first goal.
*
* The second goal is accomplished by generating
* cvmx_helper_cfg_init() from a high-level lanaguage.
*/
#ifndef __CVMX_HELPER_CFG_H__
#define __CVMX_HELPER_CFG_H__
#include "cvmx-helper-util.h"
#define CVMX_HELPER_CFG_MAX_PKO_PORT 128
#define CVMX_HELPER_CFG_MAX_PIP_BPID 64
#define CVMX_HELPER_CFG_MAX_PIP_PKND 64
#define CVMX_HELPER_CFG_MAX_PKO_QUEUES 256
#define CVMX_HELPER_CFG_MAX_PORT_PER_IFACE 256
#define CVMX_HELPER_CFG_INVALID_VALUE -1
#define cvmx_helper_cfg_assert(cond) \
do { \
if (!(cond)) { \
debug("cvmx_helper_cfg_assert (%s) at %s:%d\n", #cond, __FILE__, \
__LINE__); \
} \
} while (0)
extern int cvmx_npi_max_pknds;
/*
* Config Options
*
* These options have to be set via cvmx_helper_cfg_opt_set() before calling the
* routines that set up the hw. These routines process the options and set them
* correctly to take effect at runtime.
*/
enum cvmx_helper_cfg_option {
CVMX_HELPER_CFG_OPT_USE_DWB, /*
* Global option to control if
* the SDK configures units (DMA,
* SSO, and PKO) to send don't
* write back (DWB) requests for
* freed buffers. Set to 1/0 to
* enable/disable DWB.
*
* For programs that fit inside
* L2, sending DWB just causes
* more L2 operations without
* benefit.
*/
CVMX_HELPER_CFG_OPT_MAX
};
typedef enum cvmx_helper_cfg_option cvmx_helper_cfg_option_t;
struct cvmx_phy_info;
struct cvmx_fdt_sfp_info;
struct cvmx_vsc7224_chan;
struct phy_device;
struct cvmx_srio_port_param {
/** True to override SRIO CTLE zero setting */
bool srio_rx_ctle_zero_override : 1;
/** Equalization peaking control dft: 6 */
u8 srio_rx_ctle_zero : 4;
/** Set true to override CTLE taps */
bool srio_rx_ctle_agc_override : 1;
u8 srio_rx_agc_pre_ctle : 4; /** AGC pre-CTLE gain */
u8 srio_rx_agc_post_ctle : 4; /** AGC post-CTLE gain */
bool srio_tx_swing_override : 1; /** True to override TX Swing */
u8 srio_tx_swing : 5; /** TX Swing */
bool srio_tx_gain_override : 1; /** True to override TX gain */
u8 srio_tx_gain : 3; /** TX gain */
bool srio_tx_premptap_override : 1; /** True to override premptap values */
u8 srio_tx_premptap_pre : 4; /** Pre premptap value */
u8 srio_tx_premptap_post : 5; /** Post premptap value */
bool srio_tx_vboost_override : 1; /** True to override TX vboost setting */
bool srio_tx_vboost : 1; /** vboost setting (default 1) */
};
/*
* Per physical port
* Note: This struct is passed between linux and SE apps.
*/
struct cvmx_cfg_port_param {
int port_fdt_node; /** Node offset in FDT of node */
int phy_fdt_node; /** Node offset in FDT of PHY */
struct cvmx_phy_info *phy_info; /** Data structure with PHY information */
int8_t ccpp_pknd;
int8_t ccpp_bpid;
int8_t ccpp_pko_port_base;
int8_t ccpp_pko_num_ports;
u8 agl_rx_clk_skew; /** AGL rx clock skew setting (default 0) */
u8 rgmii_tx_clk_delay; /** RGMII TX clock delay value if not bypassed */
bool valid : 1; /** 1 = port valid, 0 = invalid */
bool sgmii_phy_mode : 1; /** 1 = port in PHY mode, 0 = MAC mode */
bool sgmii_1000x_mode : 1; /** 1 = 1000Base-X mode, 0 = SGMII mode */
bool agl_rx_clk_delay_bypass : 1; /** 1 = use rx clock delay bypass for AGL mode */
bool force_link_up : 1; /** Ignore PHY and always report link up */
bool disable_an : 1; /** true to disable autonegotiation */
bool link_down_pwr_dn : 1; /** Power PCS off when link is down */
bool phy_present : 1; /** true if PHY is present */
bool tx_clk_delay_bypass : 1; /** True to bypass the TX clock delay */
bool enable_fec : 1; /** True to enable FEC for 10/40G links */
/** Settings for short-run SRIO host */
struct cvmx_srio_port_param srio_short;
/** Settings for long-run SRIO host */
struct cvmx_srio_port_param srio_long;
u8 agl_refclk_sel; /** RGMII refclk select to use */
/** Set if local (non-PHY) LEDs are used */
struct cvmx_phy_gpio_leds *gpio_leds;
struct cvmx_fdt_sfp_info *sfp_info; /** SFP+/QSFP info for port */
/** Offset of SFP/SFP+/QSFP slot in device tree */
int sfp_of_offset;
/** Microsemi VSC7224 channel info data structure */
struct cvmx_vsc7224_chan *vsc7224_chan;
/** Avago AVSP-5410 Phy */
struct cvmx_avsp5410 *avsp5410;
struct phy_device *phydev;
};
/*
* Per pko_port
*/
struct cvmx_cfg_pko_port_param {
s16 ccppp_queue_base;
s16 ccppp_num_queues;
};
/*
* A map from pko_port to
* interface,
* index, and
* pko engine id
*/
struct cvmx_cfg_pko_port_map {
s16 ccppl_interface;
s16 ccppl_index;
s16 ccppl_eid;
};
/*
* This is for looking up pko_base_port and pko_nport for ipd_port
*/
struct cvmx_cfg_pko_port_pair {
int8_t ccppp_base_port;
int8_t ccppp_nports;
};
typedef union cvmx_user_static_pko_queue_config {
struct {
struct pko_queues_cfg {
unsigned queues_per_port : 11, qos_enable : 1, pfc_enable : 1;
} pko_cfg_iface[6];
struct pko_queues_cfg pko_cfg_loop;
struct pko_queues_cfg pko_cfg_npi;
} pknd;
struct {
u8 pko_ports_per_interface[5];
u8 pko_queues_per_port_interface[5];
u8 pko_queues_per_port_loop;
u8 pko_queues_per_port_pci;
u8 pko_queues_per_port_srio[4];
} non_pknd;
} cvmx_user_static_pko_queue_config_t;
extern cvmx_user_static_pko_queue_config_t __cvmx_pko_queue_static_config[CVMX_MAX_NODES];
extern struct cvmx_cfg_pko_port_map cvmx_cfg_pko_port_map[CVMX_HELPER_CFG_MAX_PKO_PORT];
extern struct cvmx_cfg_port_param cvmx_cfg_port[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE];
extern struct cvmx_cfg_pko_port_param cvmx_pko_queue_table[];
extern int cvmx_enable_helper_flag;
/*
* @INTERNAL
* Return configured pknd for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the pknd
*/
int __cvmx_helper_cfg_pknd(int interface, int index);
/*
* @INTERNAL
* Return the configured bpid for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the bpid
*/
int __cvmx_helper_cfg_bpid(int interface, int index);
/**
* @INTERNAL
* Return the configured pko_port base for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the pko_port base
*/
int __cvmx_helper_cfg_pko_port_base(int interface, int index);
/*
* @INTERNAL
* Return the configured number of pko_ports for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the number of pko_ports
*/
int __cvmx_helper_cfg_pko_port_num(int interface, int index);
/*
* @INTERNAL
* Return the configured pko_queue base for the pko_port
*
* @param pko_port
* @return the pko_queue base
*/
int __cvmx_helper_cfg_pko_queue_base(int pko_port);
/*
* @INTERNAL
* Return the configured number of pko_queues for the pko_port
*
* @param pko_port
* @return the number of pko_queues
*/
int __cvmx_helper_cfg_pko_queue_num(int pko_port);
/*
* @INTERNAL
* Return the interface the pko_port is configured for
*
* @param pko_port
* @return the interface for the pko_port
*/
int __cvmx_helper_cfg_pko_port_interface(int pko_port);
/*
* @INTERNAL
* Return the index of the port the pko_port is configured for
*
* @param pko_port
* @return the index of the port
*/
int __cvmx_helper_cfg_pko_port_index(int pko_port);
/*
* @INTERNAL
* Return the pko_eid of the pko_port
*
* @param pko_port
* @return the pko_eid
*/
int __cvmx_helper_cfg_pko_port_eid(int pko_port);
/*
* @INTERNAL
* Return the max# of pko queues allocated.
*
* @return the max# of pko queues
*
* Note: there might be holes in the queue space depending on user
* configuration. The function returns the highest queue's index in
* use.
*/
int __cvmx_helper_cfg_pko_max_queue(void);
/*
* @INTERNAL
* Return the max# of PKO DMA engines allocated.
*
* @return the max# of DMA engines
*
* NOTE: the DMA engines are allocated contiguously and starting from
* 0.
*/
int __cvmx_helper_cfg_pko_max_engine(void);
/*
* Get the value set for the config option ``opt''.
*
* @param opt is the config option.
* @return the value set for the option
*
* LR: only used for DWB in NPI, POW, PKO1
*/
u64 cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt);
/*
* Set the value for a config option.
*
* @param opt is the config option.
* @param val is the value to set for the opt.
* @return 0 for success and -1 on error
*
* Note an option here is a config-time parameter and this means that
* it has to be set before calling the corresponding setup functions
* that actually sets the option in hw.
*
* LR: Not used.
*/
int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, u64 val);
/*
* Retrieve the pko_port base given ipd_port.
*
* @param ipd_port is the IPD eport
* @return the corresponding PKO port base for the physical port
* represented by the IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
*/
int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port);
/*
* Retrieve the number of pko_ports given ipd_port.
*
* @param ipd_port is the IPD eport
* @return the corresponding number of PKO ports for the physical port
* represented by IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
*/
int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port);
/*
* @INTERNAL
* The init function
*
* @param node
* @return 0 for success.
*
* Note: this function is meant to be called to set the ``configured
* parameters,'' e.g., pknd, bpid, etc. and therefore should be before
* any of the corresponding cvmx_helper_cfg_xxxx() functions are
* called.
*/
int __cvmx_helper_init_port_config_data(int node);
/*
* @INTERNAL
* The local init function
*
* @param none
* @return 0 for success.
*
* Note: this function is meant to be called to set the ``configured
* parameters locally,'' e.g., pknd, bpid, etc. and therefore should be before
* any of the corresponding cvmx_helper_cfg_xxxx() functions are
* called.
*/
int __cvmx_helper_init_port_config_data_local(void);
/*
* Set the frame max size and jabber size to 65535.
*
*/
void cvmx_helper_cfg_set_jabber_and_frame_max(void);
/*
* Enable storing short packets only in the WQE.
*/
void cvmx_helper_cfg_store_short_packets_in_wqe(void);
/*
* Allocated a block of internal ports and queues for the specified
* interface/port
*
* @param interface the interface for which the internal ports and queues
* are requested
* @param port the index of the port within in the interface for which
the internal ports and queues are requested.
* @param pot_count the number of internal ports requested
* @param queue_cnt the number of queues requested for each of the internal
* port. This call will allocate a total of
* (port_cnt * queue_cnt) queues
*
* @return 0 on success
* -1 on failure
*
* LR: Called ONLY from comfig-parse!
*/
int cvmx_pko_alloc_iport_and_queues(int interface, int port, int port_cnt, int queue_cnt);
/*
* Free the queues that are associated with the specified port
*
* @param port the internal port for which the queues are freed.
*
* @return 0 on success
* -1 on failure
*/
int cvmx_pko_queue_free(u64 port);
/*
* Initializes the pko queue range data structure.
* @return 0 on success
* -1 on failure
*/
int init_cvmx_pko_que_range(void);
/*
* Frees up all the allocated ques.
*/
void cvmx_pko_queue_free_all(void);
/**
* Returns if port is valid for a given interface
*
* @param xiface interface to check
* @param index port index in the interface
*
* @return status of the port present or not.
*/
int cvmx_helper_is_port_valid(int xiface, int index);
/**
* Set whether or not a port is valid
*
* @param interface interface to set
* @param index port index to set
* @param valid set 0 to make port invalid, 1 for valid
*/
void cvmx_helper_set_port_valid(int interface, int index, bool valid);
/**
* @INTERNAL
* Return if port is in PHY mode
*
* @param interface the interface number
* @param index the port's index number
*
* @return 1 if port is in PHY mode, 0 if port is in MAC mode
*/
bool cvmx_helper_get_mac_phy_mode(int interface, int index);
void cvmx_helper_set_mac_phy_mode(int interface, int index, bool valid);
/**
* @INTERNAL
* Return if port is in 1000Base X mode
*
* @param interface the interface number
* @param index the port's index number
*
* @return 1 if port is in 1000Base X mode, 0 if port is in SGMII mode
*/
bool cvmx_helper_get_1000x_mode(int interface, int index);
void cvmx_helper_set_1000x_mode(int interface, int index, bool valid);
/**
* @INTERNAL
* Return if an AGL port should bypass the RX clock delay
*
* @param interface the interface number
* @param index the port's index number
*/
bool cvmx_helper_get_agl_rx_clock_delay_bypass(int interface, int index);
void cvmx_helper_set_agl_rx_clock_delay_bypass(int interface, int index, bool valid);
/**
* @INTERNAL
* Forces a link to always return that it is up ignoring the PHY (if present)
*
* @param interface the interface number
* @param index the port's index
*/
bool cvmx_helper_get_port_force_link_up(int interface, int index);
void cvmx_helper_set_port_force_link_up(int interface, int index, bool value);
/**
* @INTERNAL
* Return true if PHY is present to the passed xiface
*
* @param xiface the interface number
* @param index the port's index
*/
bool cvmx_helper_get_port_phy_present(int xiface, int index);
void cvmx_helper_set_port_phy_present(int xiface, int index, bool value);
/**
* @INTERNAL
* Return the AGL port rx clock skew, only used
* if agl_rx_clock_delay_bypass is set.
*
* @param interface the interface number
* @param index the port's index number
*/
u8 cvmx_helper_get_agl_rx_clock_skew(int interface, int index);
void cvmx_helper_set_agl_rx_clock_skew(int interface, int index, u8 value);
u8 cvmx_helper_get_agl_refclk_sel(int interface, int index);
void cvmx_helper_set_agl_refclk_sel(int interface, int index, u8 value);
/**
* @INTERNAL
* Store the FDT node offset in the device tree of a port
*
* @param xiface node and interface
* @param index port index
* @param node_offset node offset to store
*/
void cvmx_helper_set_port_fdt_node_offset(int xiface, int index, int node_offset);
/**
* @INTERNAL
* Return the FDT node offset in the device tree of a port
*
* @param xiface node and interface
* @param index port index
* @return node offset of port or -1 if invalid
*/
int cvmx_helper_get_port_fdt_node_offset(int xiface, int index);
/**
* @INTERNAL
* Store the FDT node offset in the device tree of a phy
*
* @param xiface node and interface
* @param index port index
* @param node_offset node offset to store
*/
void cvmx_helper_set_phy_fdt_node_offset(int xiface, int index, int node_offset);
/**
* @INTERNAL
* Return the FDT node offset in the device tree of a phy
*
* @param xiface node and interface
* @param index port index
* @return node offset of phy or -1 if invalid
*/
int cvmx_helper_get_phy_fdt_node_offset(int xiface, int index);
/**
* @INTERNAL
* Override default autonegotiation for a port
*
* @param xiface node and interface
* @param index port index
* @param enable true to enable autonegotiation, false to force full
* duplex, full speed.
*/
void cvmx_helper_set_port_autonegotiation(int xiface, int index, bool enable);
/**
* @INTERNAL
* Returns if autonegotiation is enabled or not.
*
* @param xiface node and interface
* @param index port index
*
* @return 0 if autonegotiation is disabled, 1 if enabled.
*/
bool cvmx_helper_get_port_autonegotiation(int xiface, int index);
/**
* @INTERNAL
* Returns if forward error correction is enabled or not.
*
* @param xiface node and interface
* @param index port index
*
* @return 0 if fec is disabled, 1 if enabled.
*/
bool cvmx_helper_get_port_fec(int xiface, int index);
/**
* @INTERNAL
* Override default forward error correction for a port
*
* @param xiface node and interface
* @param index port index
* @param enable true to enable fec, false to disable.
*/
void cvmx_helper_set_port_fec(int xiface, int index, bool enable);
/**
* @INTERNAL
* Configure the SRIO RX interface AGC settings in host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param agc_override true to put AGC in manual mode
* @param ctle_zero RX equalizer peaking control (default 0x6)
* @param agc_pre_ctle AGC pre-CTLE gain (default 0x5)
* @param agc_post_ctle AGC post-CTLE gain (default 0x4)
*
* NOTE: This must be called before SRIO is initialized to take effect
*/
void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
u8 ctle_zero, bool agc_override, u8 agc_pre_ctle, u8 agc_post_ctle);
/**
* @INTERNAL
* Get the SRIO RX interface AGC settings for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param[out] ctle_zero_override true if overridden
* @param[out] ctle_zero RX equalizer peaking control (default 0x6)
* @param[out] agc_override true to put AGC in manual mode
* @param[out] agc_pre_ctle AGC pre-CTLE gain (default 0x5)
* @param[out] agc_post_ctle AGC post-CTLE gain (default 0x4)
*/
void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
u8 *ctle_zero, bool *agc_override, u8 *agc_pre_ctle,
u8 *agc_post_ctle);
/**
* @INTERNAL
* Configure the SRIO TX interface for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param tx_swing tx swing value to use (default 0x7), -1 to not
* override.
* @param tx_gain PCS SDS TX gain (default 0x3), -1 to not
* override
* @param tx_premptap_override true to override preemphasis control
* @param tx_premptap_pre preemphasis pre tap value (default 0x0)
* @param tx_premptap_post preemphasis post tap value (default 0xF)
* @param tx_vboost vboost enable (1 = enable, -1 = don't override)
* hardware default is 1.
*
* NOTE: This must be called before SRIO is initialized to take effect
*/
void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
bool tx_premptap_override, u8 tx_premptap_pre, u8 tx_premptap_post,
int tx_vboost);
/**
* @INTERNAL
* Get the SRIO TX interface settings for host mode
*
* @param xiface node and interface
* @param index lane
* @param long_run true for long run, false for short run
* @param[out] tx_swing_override true to override pcs_sds_txX_swing
* @param[out] tx_swing tx swing value to use (default 0x7)
* @param[out] tx_gain_override true to override default gain
* @param[out] tx_gain PCS SDS TX gain (default 0x3)
* @param[out] tx_premptap_override true to override preemphasis control
* @param[out] tx_premptap_pre preemphasis pre tap value (default 0x0)
* @param[out] tx_premptap_post preemphasis post tap value (default 0xF)
* @param[out] tx_vboost_override override vboost setting
* @param[out] tx_vboost vboost enable (default true)
*/
void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
u8 *tx_swing, bool *tx_gain_override, u8 *tx_gain,
bool *tx_premptap_override, u8 *tx_premptap_pre, u8 *tx_premptap_post,
bool *tx_vboost_override, bool *tx_vboost);
/**
* @INTERNAL
* Sets the PHY info data structure
*
* @param xiface node and interface
* @param index port index
* @param[in] phy_info phy information data structure pointer
*/
void cvmx_helper_set_port_phy_info(int xiface, int index, struct cvmx_phy_info *phy_info);
/**
* @INTERNAL
* Returns the PHY information data structure for a port
*
* @param xiface node and interface
* @param index port index
*
* @return pointer to PHY information data structure or NULL if not set
*/
struct cvmx_phy_info *cvmx_helper_get_port_phy_info(int xiface, int index);
/**
* @INTERNAL
* Returns a pointer to the PHY LED configuration (if local GPIOs drive them)
*
* @param xiface node and interface
* @param index portindex
*
* @return pointer to the PHY LED information data structure or NULL if not
* present
*/
struct cvmx_phy_gpio_leds *cvmx_helper_get_port_phy_leds(int xiface, int index);
/**
* @INTERNAL
* Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
*
* @param xiface node and interface
* @param index portindex
* @param leds pointer to led data structure
*/
void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds);
/**
* @INTERNAL
* Disables RGMII TX clock bypass and sets delay value
*
* @param xiface node and interface
* @param index portindex
* @param bypass Set true to enable the clock bypass and false
* to sync clock and data synchronously.
* Default is false.
* @param clk_delay Delay value to skew TXC from TXD
*/
void cvmx_helper_cfg_set_rgmii_tx_clk_delay(int xiface, int index, bool bypass, int clk_delay);
/**
* @INTERNAL
* Gets RGMII TX clock bypass and delay value
*
* @param xiface node and interface
* @param index portindex
* @param bypass Set true to enable the clock bypass and false
* to sync clock and data synchronously.
* Default is false.
* @param clk_delay Delay value to skew TXC from TXD, default is 0.
*/
void cvmx_helper_cfg_get_rgmii_tx_clk_delay(int xiface, int index, bool *bypass, int *clk_delay);
/**
* @INTERNAL
* Retrieve node-specific PKO Queue configuration.
*
* @param node OCTEON3 node.
* @param pkocfg PKO Queue static configuration.
*/
int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg);
/**
* @INTERNAL
* Update node-specific PKO Queue configuration.
*
* @param node OCTEON3 node.
* @param pkocfg PKO Queue static configuration.
*/
int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg);
/**
* @INTERNAL
* Retrieve the SFP node offset in the device tree
*
* @param xiface node and interface
* @param index port index
*
* @return offset in device tree or -1 if error or not defined.
*/
int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index);
/**
* Search for a port based on its FDT node offset
*
* @param of_offset Node offset of port to search for
*
* @return ipd_port or -1 if not found
*/
int cvmx_helper_cfg_get_ipd_port_by_fdt_node_offset(int of_offset);
/**
* @INTERNAL
* Sets the SFP node offset
*
* @param xiface node and interface
* @param index port index
* @param sfp_of_offset Offset of SFP node in device tree
*/
void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset);
/**
* Search for a port based on its FDT node offset
*
* @param of_offset Node offset of port to search for
* @param[out] xiface xinterface of match
* @param[out] index port index of match
*
* @return 0 if found, -1 if not found
*/
int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index);
/**
* Get data structure defining the Microsemi VSC7224 channel info
* or NULL if not present
*
* @param xiface node and interface
* @param index port index
*
* @return pointer to vsc7224 data structure or NULL if not present
*/
struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index);
/**
* Sets the Microsemi VSC7224 channel data structure
*
* @param xiface node and interface
* @param index port index
* @param[in] vsc7224_info Microsemi VSC7224 data structure
*/
void cvmx_helper_cfg_set_vsc7224_chan_info(int xiface, int index,
struct cvmx_vsc7224_chan *vsc7224_chan_info);
/**
* Get data structure defining the Avago AVSP5410 phy info
* or NULL if not present
*
* @param xiface node and interface
* @param index port index
*
* @return pointer to avsp5410 data structure or NULL if not present
*/
struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index);
/**
* Sets the Avago AVSP5410 phy info data structure
*
* @param xiface node and interface
* @param index port index
* @param[in] avsp5410_info Avago AVSP5410 data structure
*/
void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info);
/**
* Gets the SFP data associated with a port
*
* @param xiface node and interface
* @param index port index
*
* @return pointer to SFP data structure or NULL if none
*/
struct cvmx_fdt_sfp_info *cvmx_helper_cfg_get_sfp_info(int xiface, int index);
/**
* Sets the SFP data associated with a port
*
* @param xiface node and interface
* @param index port index
* @param[in] sfp_info port SFP data or NULL for none
*/
void cvmx_helper_cfg_set_sfp_info(int xiface, int index, struct cvmx_fdt_sfp_info *sfp_info);
/*
* Initializes cvmx with user specified config info.
*/
int cvmx_user_static_config(void);
void cvmx_pko_queue_show(void);
int cvmx_fpa_pool_init_from_cvmx_config(void);
int __cvmx_helper_init_port_valid(void);
/**
* Returns a pointer to the phy device associated with a port
*
* @param xiface node and interface
* @param index port index
*
* return pointer to phy device or NULL if none
*/
struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index);
/**
* Sets the phy device associated with a port
*
* @param xiface node and interface
* @param index port index
* @param[in] phydev phy device to assiciate
*/
void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev);
#endif /* __CVMX_HELPER_CFG_H__ */

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Fixes and workaround for Octeon chip errata. This file
* contains functions called by cvmx-helper to workaround known
* chip errata. For the most part, code doesn't need to call
* these functions directly.
*/
#ifndef __CVMX_HELPER_ERRATA_H__
#define __CVMX_HELPER_ERRATA_H__
#include "cvmx-wqe.h"
/**
* @INTERNAL
* Function to adjust internal IPD pointer alignments
*
* @return 0 on success
* !0 on failure
*/
int __cvmx_helper_errata_fix_ipd_ptr_alignment(void);
/**
* This function needs to be called on all Octeon chips with
* errata PKI-100.
*
* The Size field is 8 too large in WQE and next pointers
*
* The Size field generated by IPD is 8 larger than it should
* be. The Size field is <55:40> of both:
* - WORD3 in the work queue entry, and
* - the next buffer pointer (which precedes the packet data
* in each buffer).
*
* @param work Work queue entry to fix
* @return Zero on success. Negative on failure
*/
int cvmx_helper_fix_ipd_packet_chain(cvmx_wqe_t *work);
/**
* Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
* 1 doesn't work properly. The following code disables 2nd order
* CDR for the specified QLM.
*
* @param qlm QLM to disable 2nd order CDR for.
*/
void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
#endif

View File

@ -0,0 +1,568 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* FDT Helper functions similar to those provided to U-Boot.
* If compiled for U-Boot, just provide wrappers to the equivalent U-Boot
* functions.
*/
#ifndef __CVMX_HELPER_FDT_H__
#define __CVMX_HELPER_FDT_H__
#include <fdt_support.h>
#include <fdtdec.h>
#include <time.h>
#include <asm/global_data.h>
#include <linux/libfdt.h>
#include <mach/cvmx-helper-sfp.h>
enum cvmx_i2c_bus_type {
CVMX_I2C_BUS_OCTEON,
CVMX_I2C_MUX_PCA9540,
CVMX_I2C_MUX_PCA9542,
CVMX_I2C_MUX_PCA9543,
CVMX_I2C_MUX_PCA9544,
CVMX_I2C_MUX_PCA9545,
CVMX_I2C_MUX_PCA9546,
CVMX_I2C_MUX_PCA9547,
CVMX_I2C_MUX_PCA9548,
CVMX_I2C_MUX_OTHER
};
struct cvmx_sfp_mod_info; /** Defined in cvmx-helper-sfp.h */
struct cvmx_phy_info; /** Defined in cvmx-helper-board.h */
/**
* This data structure holds information about various I2C muxes and switches
* that may be between a device and the Octeon chip.
*/
struct cvmx_fdt_i2c_bus_info {
/** Parent I2C bus, NULL if root */
struct cvmx_fdt_i2c_bus_info *parent;
/** Child I2C bus or NULL if last entry in the chain */
struct cvmx_fdt_i2c_bus_info *child;
/** Offset in device tree */
int of_offset;
/** Type of i2c bus or mux */
enum cvmx_i2c_bus_type type;
/** I2C address of mux */
u8 i2c_addr;
/** Mux channel number */
u8 channel;
/** For muxes, the bit(s) to set to enable them */
u8 enable_bit;
/** True if mux, false if switch */
bool is_mux;
};
/**
* Data structure containing information about SFP/QSFP slots
*/
struct cvmx_fdt_sfp_info {
/** Used for a linked list of slots */
struct cvmx_fdt_sfp_info *next, *prev;
/** Used when multiple SFP ports share the same IPD port */
struct cvmx_fdt_sfp_info *next_iface_sfp;
/** Name from device tree of slot */
const char *name;
/** I2C bus for slot EEPROM */
struct cvmx_fdt_i2c_bus_info *i2c_bus;
/** Data from SFP or QSFP EEPROM */
struct cvmx_sfp_mod_info sfp_info;
/** Data structure with PHY information */
struct cvmx_phy_info *phy_info;
/** IPD port(s) slot is connected to */
int ipd_port[4];
/** Offset in device tree of slot */
int of_offset;
/** EEPROM address of SFP module (usually 0x50) */
u8 i2c_eeprom_addr;
/** Diagnostic address of SFP module (usually 0x51) */
u8 i2c_diag_addr;
/** True if QSFP module */
bool is_qsfp;
/** True if EEPROM data is valid */
bool valid;
/** SFP tx_disable GPIO descriptor */
struct cvmx_fdt_gpio_info *tx_disable;
/** SFP mod_abs/QSFP mod_prs GPIO descriptor */
struct cvmx_fdt_gpio_info *mod_abs;
/** SFP tx_error GPIO descriptor */
struct cvmx_fdt_gpio_info *tx_error;
/** SFP rx_los GPIO discriptor */
struct cvmx_fdt_gpio_info *rx_los;
/** QSFP select GPIO descriptor */
struct cvmx_fdt_gpio_info *select;
/** QSFP reset GPIO descriptor */
struct cvmx_fdt_gpio_info *reset;
/** QSFP interrupt GPIO descriptor */
struct cvmx_fdt_gpio_info *interrupt;
/** QSFP lp_mode GPIO descriptor */
struct cvmx_fdt_gpio_info *lp_mode;
/** Last mod_abs value */
int last_mod_abs;
/** Last rx_los value */
int last_rx_los;
/** Function to call to check mod_abs */
int (*check_mod_abs)(struct cvmx_fdt_sfp_info *sfp_info, void *data);
/** User-defined data to pass to check_mod_abs */
void *mod_abs_data;
/** Function to call when mod_abs changes */
int (*mod_abs_changed)(struct cvmx_fdt_sfp_info *sfp_info, int val, void *data);
/** User-defined data to pass to mod_abs_changed */
void *mod_abs_changed_data;
/** Function to call when rx_los changes */
int (*rx_los_changed)(struct cvmx_fdt_sfp_info *sfp_info, int val, void *data);
/** User-defined data to pass to rx_los_changed */
void *rx_los_changed_data;
/** True if we're connected to a Microsemi VSC7224 reclocking chip */
bool is_vsc7224;
/** Data structure for first vsc7224 channel we're attached to */
struct cvmx_vsc7224_chan *vsc7224_chan;
/** True if we're connected to a Avago AVSP5410 phy */
bool is_avsp5410;
/** Data structure for avsp5410 phy we're attached to */
struct cvmx_avsp5410 *avsp5410;
/** xinterface we're on */
int xiface;
/** port index */
int index;
};
/**
* Look up a phandle and follow it to its node then return the offset of that
* node.
*
* @param[in] fdt_addr pointer to FDT blob
* @param node node to read phandle from
* @param[in] prop_name name of property to find
* @param[in,out] lenp Number of phandles, input max number
* @param[out] nodes Array of phandle nodes
*
* @return -ve error code on error or 0 for success
*/
int cvmx_fdt_lookup_phandles(const void *fdt_addr, int node, const char *prop_name, int *lenp,
int *nodes);
/**
* Helper to return the address property
*
* @param[in] fdt_addr pointer to FDT blob
* @param node node to read address from
* @param prop_name property name to read
*
* @return address of property or FDT_ADDR_T_NONE if not found
*/
static inline fdt_addr_t cvmx_fdt_get_addr(const void *fdt_addr, int node, const char *prop_name)
{
return fdtdec_get_addr(fdt_addr, node, prop_name);
}
/**
* Helper function to return an integer property
*
* @param[in] fdt_addr pointer to FDT blob
* @param node node to read integer from
* @param[in] prop_name property name to read
* @param default_val default value to return if property doesn't exist
*
* @return integer value of property or default_val if it doesn't exist.
*/
static inline int cvmx_fdt_get_int(const void *fdt_addr, int node, const char *prop_name,
int default_val)
{
return fdtdec_get_int(fdt_addr, node, prop_name, default_val);
}
static inline bool cvmx_fdt_get_bool(const void *fdt_addr, int node, const char *prop_name)
{
return fdtdec_get_bool(fdt_addr, node, prop_name);
}
static inline u64 cvmx_fdt_get_uint64(const void *fdt_addr, int node, const char *prop_name,
u64 default_val)
{
return fdtdec_get_uint64(fdt_addr, node, prop_name, default_val);
}
/**
* Look up a phandle and follow it to its node then return the offset of that
* node.
*
* @param[in] fdt_addr pointer to FDT blob
* @param node node to read phandle from
* @param[in] prop_name name of property to find
*
* @return node offset if found, -ve error code on error
*/
static inline int cvmx_fdt_lookup_phandle(const void *fdt_addr, int node, const char *prop_name)
{
return fdtdec_lookup_phandle(fdt_addr, node, prop_name);
}
/**
* Translate an address from the device tree into a CPU physical address by
* walking up the device tree and applying bus mappings along the way.
*
* This uses #size-cells and #address-cells.
*
* @param[in] fdt_addr Address of flat device tree
* @param node node to start translating from
* @param[in] in_addr Address to translate
* NOTE: in_addr must be in the native ENDIAN
* format.
*
* @return Translated address or FDT_ADDR_T_NONE if address cannot be
* translated.
*/
static inline u64 cvmx_fdt_translate_address(const void *fdt_addr, int node, const u32 *in_addr)
{
return fdt_translate_address((void *)fdt_addr, node, in_addr);
}
/**
* Compare compatibile strings in the flat device tree.
*
* @param[in] s1 First string to compare
* @param[in] sw Second string to compare
*
* @return 0 if no match
* 1 if only the part number matches and not the manufacturer
* 2 if both the part number and manufacturer match
*/
int cvmx_fdt_compat_match(const char *s1, const char *s2);
/**
* Returns whether a list of strings contains the specified string
*
* @param[in] slist String list
* @param llen string list total length
* @param[in] str string to search for
*
* @return 1 if string list contains string, 0 if it does not.
*/
int cvmx_fdt_compat_list_contains(const char *slist, int llen, const char *str);
/**
* Check if a node is compatible with the specified compat string
*
* @param[in] fdt_addr FDT address
* @param node node offset to check
* @param[in] compat compatible string to check
*
* @return 0 if compatible, 1 if not compatible, error if negative
*/
int cvmx_fdt_node_check_compatible(const void *fdt_addr, int node, const char *compat);
/**
* @INTERNAL
* Compares a string to a compatible field.
*
* @param[in] compat compatible string
* @param[in] str string to check
*
* @return 0 if not compatible, 1 if manufacturer compatible, 2 if
* part is compatible, 3 if both part and manufacturer are
* compatible.
*/
int __cvmx_fdt_compat_match(const char *compat, const char *str);
/**
* Given a phandle to a GPIO device return the type of GPIO device it is.
*
* @param[in] fdt_addr Address of flat device tree
* @param phandle phandle to GPIO
* @param[out] size Number of pins (optional, may be NULL)
*
* @return Type of GPIO device or PIN_ERROR if error
*/
enum cvmx_gpio_type cvmx_fdt_get_gpio_type(const void *fdt_addr, int phandle, int *size);
/**
* Given a phandle to a GPIO node output the i2c bus and address
*
* @param[in] fdt_addr Address of FDT
* @param phandle phandle of GPIO device
* @param[out] bus TWSI bus number with node in bits 1-3, can be
* NULL for none.
* @param[out] addr TWSI address number, can be NULL for none
*
* @return 0 for success, error otherwise
*/
int cvmx_fdt_get_twsi_gpio_bus_addr(const void *fdt_addr, int phandle, int *bus, int *addr);
/**
* Given a FDT node return the CPU node number
*
* @param[in] fdt_addr Address of FDT
* @param node FDT node number
*
* @return CPU node number or error if negative
*/
int cvmx_fdt_get_cpu_node(const void *fdt_addr, int node);
/**
* Get the total size of the flat device tree
*
* @param[in] fdt_addr Address of FDT
*
* @return Size of flat device tree in bytes or -1 if error.
*/
int cvmx_fdt_get_fdt_size(const void *fdt_addr);
/**
* Returns if a node is compatible with one of the items in the string list
*
* @param[in] fdt_addr Pointer to flat device tree
* @param node Node offset to check
* @param[in] strlist Array of FDT device compatibility strings,
* must end with NULL or empty string.
*
* @return 0 if at least one item matches, 1 if no matches
*/
int cvmx_fdt_node_check_compatible_list(const void *fdt_addr, int node, const char *const *strlist);
/**
* Given a FDT node, return the next compatible node.
*
* @param[in] fdt_addr Pointer to flat device tree
* @param start_offset Starting node offset or -1 to find the first
* @param strlist Array of FDT device compatibility strings, must
* end with NULL or empty string.
*
* @return next matching node or -1 if no more matches.
*/
int cvmx_fdt_node_offset_by_compatible_list(const void *fdt_addr, int startoffset,
const char *const *strlist);
/**
* Given the parent offset of an i2c device build up a list describing the bus
* which can contain i2c muxes and switches.
*
* @param[in] fdt_addr address of device tree
* @param of_offset Offset of the parent node of a GPIO device in
* the device tree.
*
* @return pointer to list of i2c devices starting from the root which
* can include i2c muxes and switches or NULL if error. Note that
* all entries are allocated on the heap.
*
* @see cvmx_fdt_free_i2c_bus()
*/
struct cvmx_fdt_i2c_bus_info *cvmx_fdt_get_i2c_bus(const void *fdt_addr, int of_offset);
/**
* Return the Octeon bus number for a bus descriptor
*
* @param[in] bus bus descriptor
*
* @return Octeon twsi bus number or -1 on error
*/
int cvmx_fdt_i2c_get_root_bus(const struct cvmx_fdt_i2c_bus_info *bus);
/**
* Frees all entries for an i2c bus descriptor
*
* @param bus bus to free
*
* @return 0
*/
int cvmx_fdt_free_i2c_bus(struct cvmx_fdt_i2c_bus_info *bus);
/**
* Given the bus to a device, enable it.
*
* @param[in] bus i2c bus descriptor to enable or disable
* @param enable set to true to enable, false to disable
*
* @return 0 for success or -1 for invalid bus
*
* This enables the entire bus including muxes and switches in the path.
*/
int cvmx_fdt_enable_i2c_bus(const struct cvmx_fdt_i2c_bus_info *bus, bool enable);
/**
* Return a GPIO handle given a GPIO phandle of the form <&gpio pin flags>
*
* @param[in] fdt_addr Address of flat device tree
* @param of_offset node offset for property
* @param prop_name name of property
*
* @return pointer to GPIO handle or NULL if error
*/
struct cvmx_fdt_gpio_info *cvmx_fdt_gpio_get_info_phandle(const void *fdt_addr, int of_offset,
const char *prop_name);
/**
* Sets a GPIO pin given the GPIO descriptor
*
* @param pin GPIO pin descriptor
* @param value value to set it to, 0 or 1
*
* @return 0 on success, -1 on error.
*
* NOTE: If the CVMX_GPIO_ACTIVE_LOW flag is set then the output value will be
* inverted.
*/
int cvmx_fdt_gpio_set(struct cvmx_fdt_gpio_info *pin, int value);
/**
* Given a GPIO pin descriptor, input the value of that pin
*
* @param pin GPIO pin descriptor
*
* @return 0 if low, 1 if high, -1 on error. Note that the input will be
* inverted if the CVMX_GPIO_ACTIVE_LOW flag bit is set.
*/
int cvmx_fdt_gpio_get(struct cvmx_fdt_gpio_info *pin);
/**
* Assigns an IPD port to a SFP slot
*
* @param sfp Handle to SFP data structure
* @param ipd_port Port to assign it to
*
* @return 0 for success, -1 on error
*/
int cvmx_sfp_set_ipd_port(struct cvmx_fdt_sfp_info *sfp, int ipd_port);
/**
* Get the IPD port of a SFP slot
*
* @param[in] sfp Handle to SFP data structure
*
* @return IPD port number for SFP slot
*/
static inline int cvmx_sfp_get_ipd_port(const struct cvmx_fdt_sfp_info *sfp)
{
return sfp->ipd_port[0];
}
/**
* Get the IPD ports for a QSFP port
*
* @param[in] sfp Handle to SFP data structure
* @param[out] ipd_ports IPD ports for each lane, if running as 40G then
* only ipd_ports[0] is valid and the others will
* be set to -1.
*/
static inline void cvmx_qsfp_get_ipd_ports(const struct cvmx_fdt_sfp_info *sfp, int ipd_ports[4])
{
int i;
for (i = 0; i < 4; i++)
ipd_ports[i] = sfp->ipd_port[i];
}
/**
* Attaches a PHY to a SFP or QSFP.
*
* @param sfp sfp to attach PHY to
* @param phy_info phy descriptor to attach or NULL to detach
*/
void cvmx_sfp_attach_phy(struct cvmx_fdt_sfp_info *sfp, struct cvmx_phy_info *phy_info);
/**
* Returns a phy descriptor for a SFP slot
*
* @param[in] sfp SFP to get phy info from
*
* @return phy descriptor or NULL if none.
*/
static inline struct cvmx_phy_info *cvmx_sfp_get_phy_info(const struct cvmx_fdt_sfp_info *sfp)
{
return sfp->phy_info;
}
/**
* @INTERNAL
* Parses all instances of the Vitesse VSC7224 reclocking chip
*
* @param[in] fdt_addr Address of flat device tree
*
* @return 0 for success, error otherwise
*/
int __cvmx_fdt_parse_vsc7224(const void *fdt_addr);
/**
* @INTERNAL
* Parses all instances of the Avago AVSP5410 gearbox phy
*
* @param[in] fdt_addr Address of flat device tree
*
* @return 0 for success, error otherwise
*/
int __cvmx_fdt_parse_avsp5410(const void *fdt_addr);
/**
* Parse SFP information from device tree
*
* @param[in] fdt_addr Address of flat device tree
*
* @return pointer to sfp info or NULL if error
*/
struct cvmx_fdt_sfp_info *cvmx_helper_fdt_parse_sfp_info(const void *fdt_addr, int of_offset);
/**
* @INTERNAL
* Parses either a CS4343 phy or a slice of the phy from the device tree
* @param[in] fdt_addr Address of FDT
* @param of_offset offset of slice or phy in device tree
* @param phy_info phy_info data structure to fill in
*
* @return 0 for success, -1 on error
*/
int cvmx_fdt_parse_cs4343(const void *fdt_addr, int of_offset, struct cvmx_phy_info *phy_info);
/**
* Given an i2c bus and device address, write an 8 bit value
*
* @param bus i2c bus number
* @param addr i2c device address (7 bits)
* @param val 8-bit value to write
*
* This is just an abstraction to ease support in both U-Boot and SE.
*/
void cvmx_fdt_i2c_reg_write(int bus, int addr, u8 val);
/**
* Read an 8-bit value from an i2c bus and device address
*
* @param bus i2c bus number
* @param addr i2c device address (7 bits)
*
* @return 8-bit value or error if negative
*/
int cvmx_fdt_i2c_reg_read(int bus, int addr);
/**
* Write an 8-bit value to a register indexed i2c device
*
* @param bus i2c bus number to write to
* @param addr i2c device address (7 bits)
* @param reg i2c 8-bit register address
* @param val 8-bit value to write
*
* @return 0 for success, otherwise error
*/
int cvmx_fdt_i2c_write8(int bus, int addr, int reg, u8 val);
/**
* Read an 8-bit value from a register indexed i2c device
*
* @param bus i2c bus number to write to
* @param addr i2c device address (7 bits)
* @param reg i2c 8-bit register address
*
* @return value or error if negative
*/
int cvmx_fdt_i2c_read8(int bus, int addr, int reg);
int cvmx_sfp_vsc7224_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp_info,
int val, void *data);
int cvmx_sfp_avsp5410_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp_info,
int val, void *data);
#endif /* CVMX_HELPER_FDT_H__ */

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions for FPA setup.
*/
#ifndef __CVMX_HELPER_H_FPA__
#define __CVMX_HELPER_H_FPA__
/**
* Allocate memory and initialize the FPA pools using memory
* from cvmx-bootmem. Sizes of each element in the pools is
* controlled by the cvmx-config.h header file. Specifying
* zero for any parameter will cause that FPA pool to not be
* setup. This is useful if you aren't using some of the
* hardware and want to save memory.
*
* @param packet_buffers
* Number of packet buffers to allocate
* @param work_queue_entries
* Number of work queue entries
* @param pko_buffers
* PKO Command buffers. You should at minimum have two per
* each PKO queue.
* @param tim_buffers
* TIM ring buffer command queues. At least two per timer bucket
* is recommended.
* @param dfa_buffers
* DFA command buffer. A relatively small (32 for example)
* number should work.
* @return Zero on success, non-zero if out of memory
*/
int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries, int pko_buffers,
int tim_buffers, int dfa_buffers);
int __cvmx_helper_initialize_fpa_pool(int pool, u64 buffer_size, u64 buffers, const char *name);
int cvmx_helper_shutdown_fpa_pools(int node);
void cvmx_helper_fpa_dump(int node);
#endif /* __CVMX_HELPER_H__ */

View File

@ -0,0 +1,427 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Defines some GPIO information used in multiple places
*/
#ifndef __CVMX_HELPER_GPIO_H__
#define __CVMX_HELPER_GPIO_H__
#define CVMX_GPIO_NAME_LEN 32 /** Length of name */
enum cvmx_gpio_type {
CVMX_GPIO_PIN_OCTEON, /** GPIO pin is directly connected to OCTEON */
CVMX_GPIO_PIN_PCA953X, /** GPIO pin is NXP PCA953X compat chip */
CVMX_GPIO_PIN_PCA957X,
CVMX_GPIO_PIN_PCF857X, /** GPIO pin is NXP PCF857X compat chip */
CVMX_GPIO_PIN_PCA9698, /** GPIO pin is NXP PCA9698 compat chip */
CVMX_GPIO_PIN_CS4343, /** Inphi/Cortina CS4343 GPIO pins */
CVMX_GPIO_PIN_OTHER, /** GPIO pin is something else */
};
enum cvmx_gpio_operation {
CVMX_GPIO_OP_CONFIG, /** Initial configuration of the GPIO pin */
CVMX_GPIO_OP_SET, /** Set pin */
CVMX_GPIO_OP_CLEAR, /** Clear pin */
CVMX_GPIO_OP_READ, /** Read pin */
CVMX_GPIO_OP_TOGGLE, /** Toggle pin */
CVMX_GPIO_OP_BLINK_START, /** Put in blink mode (if supported) */
CVMX_GPIO_OP_BLINK_STOP, /** Takes the pin out of blink mode */
CVMX_GPIO_OP_SET_LINK, /** Put in link monitoring mode */
CVMX_GPIO_OP_SET_ACT, /** Put in RX activity mode */
};
/**
* Inphi CS4343 output source select values for the GPIO_GPIOX output_src_sel.
*/
enum cvmx_inphi_cs4343_gpio_gpio_output_src_sel {
GPIO_SEL_DRIVE = 0, /** Value of GPIOX_DRIVE */
GPIO_SEL_DELAY = 1, /** Drive delayed */
GPIO_SEL_TOGGLE = 2, /** Used for blinking */
GPIO_SEL_EXT = 3, /** External function */
GPIO_SEL_EXT_DELAY = 4, /** External function delayed */
};
/** Inphi GPIO_GPIOX configuration register */
union cvmx_inphi_cs4343_gpio_cfg_reg {
u16 u;
struct {
u16: 4;
/** Data source for the GPIO output */
u16 output_src_sel : 3;
/** 1 = GPIO output is inverted before being output */
u16 invert_output : 1;
/** 1 = GPIO input is inverted before being processed */
u16 invert_input : 1;
/** 0 = 2.5v/1.8v signalling, 1 = 1.2v signalling */
u16 iovddsel_1v2 : 1;
/**
* 0 = output selected by outen bit
* 1 = output controlled by selected GPIO output source
*/
u16 outen_ovr : 1;
/** 0 = GPIO is input only, 1 = GPIO output driver enabled */
u16 outen : 1;
u16: 2;
u16 pullup_1k; /** 1 = enable 1K pad pullup */
u16 pullup_10k; /** 1 = enable 10K pad pullup */
} s;
};
#define CVMX_INPHI_CS4343_GPIO_CFG_OFFSET 0x0
/**
* This selects which port the GPIO gets its signals from when configured
* as an output.
*/
enum cvmx_inphi_cs4343_gpio_output_cfg_port {
PORT_0_HOST_RX = 0, /** Port pair 0 host RX */
PORT_0_LINE_RX = 1, /** Port pair 0 line RX */
PORT_1_HOST_RX = 2, /** Port pair 1 host RX */
PORT_1_LINE_RX = 3, /** Port pair 1 line RX */
PORT_3_HOST_RX = 4, /** Port pair 3 host RX */
PORT_3_LINE_RX = 5, /** Port pair 3 line RX */
PORT_2_HOST_RX = 6, /** Port pair 2 host RX */
PORT_2_LINE_RX = 7, /** Port pair 2 line RX */
COMMON = 8, /** Common */
};
enum cvmx_inphi_cs4343_gpio_output_cfg_function {
RX_LOS = 0, /** Port - 1 = Receive LOS (from DSP) */
RX_LOL = 1, /** Port - 1 = Receive LOL (inverted from MSEQ) */
EDC_CONVERGED = 2, /** Port - 1 = EDC converged (from DSP) */
/** Port - 1 = PRBS checker in sync (inverted from SDS) */
RX_PRBS_SYNC = 3,
COMMON_LOGIC_0 = 0, /** Common - Logic 0 */
COMMON_GPIO1_INPUT = 1, /** Common - GPIO 1 input */
COMMON_GPIO2_INPUT = 2, /** Common - GPIO 2 input */
COMMON_GPIO3_INPUT = 3, /** Common - GPIO 3 input */
COMMON_GPIO4_INPUT = 4, /** Common - GPIO 4 input */
COMMON_INTERR_INPUT = 5, /** Common - INTERR input */
/** Common - Interrupt output from GLOBAL_INT register */
COMMON_GLOBAL_INT = 6,
/** Common - Interrupt output from GPIO_INT register */
COMMON_GPIO_INT = 7,
/** Common - Temp/voltage monitor interrupt */
COMMON_MONITOR_INT = 8,
/** Common - Selected clock output of global clock monitor */
COMMON_GBL_CLKMON_CLK = 9,
};
union cvmx_inphi_cs4343_gpio_output_cfg {
u16 u;
struct {
u16: 8;
u16 port : 4; /** port */
u16 function : 4; /** function */
} s;
};
#define CVMX_INPHI_CS4343_GPIO_OUTPUT_CFG_OFFSET 0x1
union cvmx_inphi_cs4343_gpio_drive {
u16 u;
struct {
u16: 15;
u16 value : 1; /** output value */
} s;
};
#define CVMX_INPHI_CS4343_GPIO_DRIVE_OFFSET 0x2
union cvmx_inphi_cs4343_gpio_value {
u16 u;
struct {
u16: 15;
u16 value : 1; /** input value (read-only) */
} s;
};
#define CVMX_INPHI_CS4343_GPIO_VALUE_OFFSET 0x3
union cvmx_inphi_cs4343_gpio_toggle {
u16 u;
struct {
/** Toggle rate in ms, multiply by 2 to get period in ms */
u16 rate : 16;
} s;
};
#define CVMX_INPHI_CS4343_GPIO_TOGGLE_OFFSET 0x4
union cvmx_inphi_cs4343_gpio_delay {
u16 u;
struct {
/** On delay for GPIO output in ms when enabled */
u16 on_delay : 16;
} s;
};
#define CVMX_INPHI_CS4343_GPIO_DELAY_OFFSET 0x5
/**
* GPIO flags associated with a GPIO pin (can be combined)
*/
enum cvmx_gpio_flags {
CVMX_GPIO_ACTIVE_HIGH = 0, /** Active high (default) */
CVMX_GPIO_ACTIVE_LOW = 1, /** Active low (inverted) */
CVMX_GPIO_OPEN_COLLECTOR = 2, /** Output is open-collector */
};
/** Default timer number to use for outputting a frequency [0..3] */
#define CVMX_GPIO_DEFAULT_TIMER 3
/** Configuration data for native Octeon GPIO pins */
struct cvmx_octeon_gpio_data {
int cpu_node; /** CPU node for GPIO pin */
int timer; /** Timer number used when in toggle mode, 0-3 */
};
struct cvmx_pcf857x_gpio_data {
unsigned int latch_out;
};
#define CVMX_INPHI_CS4343_EFUSE_PDF_SKU_REG 0x19f
#define CVMX_INPHI_CS4343_SKU_CS4223 0x10
#define CVMX_INPHI_CS4343_SKU_CS4224 0x11
#define CVMX_INPHI_CS4343_SKU_CS4343 0x12
#define CVMX_INPHI_CS4343_SKU_CS4221 0x13
#define CVMX_INPHI_CS4343_SKU_CS4227 0x14
#define CVMX_INPHI_CS4343_SKU_CS4341 0x16
struct cvmx_cs4343_gpio_data {
int reg_offset; /** Base register address for GPIO */
enum cvmx_gpio_operation last_op;
u8 link_port; /** Link port number for link status */
u16 sku; /** Value from CS4224_EFUSE_PDF_SKU register */
u8 out_src_sel;
u8 field_func;
bool out_en;
bool is_cs4343; /** True if dual package */
struct phy_device *phydev;
};
struct cvmx_fdt_gpio_info;
/** Function called for GPIO operations */
typedef int (*cvmx_fdt_gpio_op_func_t)(struct cvmx_fdt_gpio_info *, enum cvmx_gpio_operation);
/**
* GPIO descriptor
*/
struct cvmx_fdt_gpio_info {
struct cvmx_fdt_gpio_info *next; /** For list of GPIOs */
char name[CVMX_GPIO_NAME_LEN]; /** Name of GPIO */
int pin; /** GPIO pin number */
enum cvmx_gpio_type gpio_type; /** Type of GPIO controller */
int of_offset; /** Offset in device tree */
int phandle;
struct cvmx_fdt_i2c_bus_info *i2c_bus; /** I2C bus descriptor */
int i2c_addr; /** Address on i2c bus */
enum cvmx_gpio_flags flags; /** Flags associated with pin */
int num_pins; /** Total number of pins */
unsigned int latch_out; /** Latched output for 857x */
/** Rate in ms between toggle states */
int toggle_rate;
/** Pointer to user data for user-defined functions */
void *data;
/** Function to set, clear, toggle, etc. */
cvmx_fdt_gpio_op_func_t op_func;
/* Two values are used to detect the initial case where nothing has
* been configured. Initially, all of the following will be false
* which will force the initial state to be properly set.
*/
/** True if the GPIO pin is currently set, useful for toggle */
bool is_set;
/** Set if configured to invert */
bool invert_set;
/** Set if input is to be inverted */
bool invert_input;
/** Set if direction is configured as output */
bool dir_out;
/** Set if direction is configured as input */
bool dir_in;
/** Pin is set to toggle periodically */
bool toggle;
/** True if LED is used to indicate link status */
bool link_led;
/** True if LED is used to indicate rx activity */
bool rx_act_led;
/** True if LED is used to indicate tx activity */
bool tx_act_led;
/** True if LED is used to indicate networking errors */
bool error_led;
/** True if LED can automatically show link */
bool hw_link;
};
/** LED datastructure */
struct cvmx_fdt_gpio_led {
struct cvmx_fdt_gpio_led *next, *prev; /** List of LEDs */
char name[CVMX_GPIO_NAME_LEN]; /** Name */
struct cvmx_fdt_gpio_info *gpio; /** GPIO for LED */
int of_offset; /** Device tree node */
/** True if active low, note that GPIO contains this info */
bool active_low;
};
/**
* Returns the operation function for the GPIO phandle
*
* @param[in] fdt_addr Pointer to FDT
* @param phandle phandle of GPIO entry
*
* @return Pointer to op function or NULL if not found.
*/
cvmx_fdt_gpio_op_func_t cvmx_fdt_gpio_get_op_func(const void *fdt_addr, int phandle);
/**
* Given a phandle to a GPIO device return the type of GPIO device it is.
*
* @param[in] fdt_addr Address of flat device tree
* @param phandle phandle to GPIO
* @param[out] size Number of pins (optional, may be NULL)
*
* @return Type of GPIO device or PIN_ERROR if error
*/
enum cvmx_gpio_type cvmx_fdt_get_gpio_type(const void *fdt_addr, int phandle, int *size);
/**
* Return a GPIO handle given a GPIO phandle of the form <&gpio pin flags>
*
* @param[in] fdt_addr Address of flat device tree
* @param of_offset node offset of GPIO device
* @param prop_name name of property
*
* @return pointer to GPIO handle or NULL if error
*/
struct cvmx_fdt_gpio_info *cvmx_fdt_gpio_get_info(const void *fdt_addr, int of_offset,
const char *prop_name);
/**
* Return a GPIO handle given a GPIO phandle of the form <&gpio pin flags>
*
* @param[in] fdt_addr Address of flat device tree
* @param of_offset node offset for property
* @param prop_name name of property
*
* @return pointer to GPIO handle or NULL if error
*/
struct cvmx_fdt_gpio_info *cvmx_fdt_gpio_get_info_phandle(const void *fdt_addr, int of_offset,
const char *prop_name);
/**
* Parses a GPIO entry and fills in the gpio info data structure
*
* @param[in] fdt_addr Address of FDT
* @param phandle phandle for GPIO
* @param pin pin number
* @param flags flags set (1 = invert)
* @param[out] gpio GPIO info data structure
*
* @return 0 for success, -1 on error
*/
int cvmx_fdt_parse_gpio(const void *fdt_addr, int phandle, int pin, u32 flags,
struct cvmx_fdt_gpio_info *gpio);
/**
* @param gpio GPIO descriptor to assign timer to
* @param timer Octeon hardware timer number [0..3]
*/
void cvmx_fdt_gpio_set_timer(struct cvmx_fdt_gpio_info *gpio, int timer);
/**
* Given a GPIO pin descriptor, input the value of that pin
*
* @param pin GPIO pin descriptor
*
* @return 0 if low, 1 if high, -1 on error. Note that the input will be
* inverted if the CVMX_GPIO_ACTIVE_LOW flag bit is set.
*/
int cvmx_fdt_gpio_get(struct cvmx_fdt_gpio_info *pin);
/**
* Sets a GPIO pin given the GPIO descriptor
*
* @param gpio GPIO pin descriptor
* @param value value to set it to, 0 or 1
*
* @return 0 on success, -1 on error.
*
* NOTE: If the CVMX_GPIO_ACTIVE_LOW flag is set then the output value will be
* inverted.
*/
int cvmx_fdt_gpio_set(struct cvmx_fdt_gpio_info *gpio, int value);
/**
* Sets the blink frequency for a GPIO pin
*
* @param gpio GPIO handle
* @param freq Frequency in hz [0..500]
*/
void cvmx_fdt_gpio_set_freq(struct cvmx_fdt_gpio_info *gpio, int freq);
/**
* Enables or disables blinking a GPIO pin
*
* @param gpio GPIO handle
* @param blink True to start blinking, false to stop
*
* @return 0 for success, -1 on error
* NOTE: Not all GPIO types support blinking.
*/
int cvmx_fdt_gpio_set_blink(struct cvmx_fdt_gpio_info *gpio, bool blink);
/**
* Alternates between link and blink mode
*
* @param gpio GPIO handle
* @param blink True to start blinking, false to use link status
*
* @return 0 for success, -1 on error
* NOTE: Not all GPIO types support this.
*/
int cvmx_fdt_gpio_set_link_blink(struct cvmx_fdt_gpio_info *gpio, bool blink);
static inline bool cvmx_fdt_gpio_hw_link_supported(const struct cvmx_fdt_gpio_info *gpio)
{
return gpio->hw_link;
}
/**
* Configures a GPIO pin as input or output
*
* @param gpio GPIO pin to configure
* @param output Set to true to make output, false for input
*/
void cvmx_fdt_gpio_set_output(struct cvmx_fdt_gpio_info *gpio, bool output);
/**
* Allocates an LED data structure
* @param[in] name name to assign LED
* @param of_offset Device tree offset
* @param gpio GPIO assigned to LED (can be NULL)
* @param last Previous LED to build a list
*
* @return pointer to LED data structure or NULL if out of memory
*/
struct cvmx_fdt_gpio_led *cvmx_alloc_led(const char *name, int of_offset,
struct cvmx_fdt_gpio_info *gpio,
struct cvmx_fdt_gpio_led *last);
/**
* Parses an LED in the device tree
*
* @param[in] fdt_addr Pointer to flat device tree
* @param led_of_offset Device tree offset of LED
* @param gpio GPIO data structure to use (can be NULL)
* @param last Previous LED if this is a group of LEDs
*
* @return Pointer to LED data structure or NULL if error
*/
struct cvmx_fdt_gpio_led *cvmx_fdt_parse_led(const void *fdt_addr, int led_of_offset,
struct cvmx_fdt_gpio_info *gpio,
struct cvmx_fdt_gpio_led *last);
#endif /* __CVMX_HELPER_GPIO_H__ */

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for ILK initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_ILK_H__
#define __CVMX_HELPER_ILK_H__
int __cvmx_helper_ilk_enumerate(int interface);
/**
* @INTERNAL
* Clear all calendar entries to the xoff state. This
* means no data is sent or received.
*
* @param interface Interface whose calendar are to be initialized.
*/
void __cvmx_ilk_clear_cal(int interface);
/**
* @INTERNAL
* Setup the channel's tx calendar entry.
*
* @param interface Interface channel belongs to
* @param channel Channel whose calendar entry is to be updated
* @param bpid Bpid assigned to the channel
*/
void __cvmx_ilk_write_tx_cal_entry(int interface, int channel, unsigned char bpid);
/**
* @INTERNAL
* Setup the channel's rx calendar entry.
*
* @param interface Interface channel belongs to
* @param channel Channel whose calendar entry is to be updated
* @param pipe PKO assigned to the channel
*/
void __cvmx_ilk_write_rx_cal_entry(int interface, int channel, unsigned char pipe);
/**
* @INTERNAL
* Probe a ILK interface and determine the number of ports
* connected to it. The ILK interface should still be down after
* this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_ilk_probe(int xiface);
/**
* @INTERNAL
* Bringup and enable a ILK interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by ILK link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
void __cvmx_helper_ilk_show_stats(void);
#endif

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions for IPD
*/
#ifndef __CVMX_HELPER_IPD_H__
#define __CVMX_HELPER_IPD_H__
void cvmx_helper_ipd_set_wqe_no_ptr_mode(bool mode);
void cvmx_helper_ipd_pkt_wqe_le_mode(bool mode);
int __cvmx_helper_ipd_global_setup(void);
int __cvmx_helper_ipd_setup_interface(int interface);
#endif /* __CVMX_HELPER_PKI_H__ */

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper utilities for qlm_jtag.
*/
#ifndef __CVMX_HELPER_JTAG_H__
#define __CVMX_HELPER_JTAG_H__
/**
* The JTAG chain for CN52XX and CN56XX is 4 * 268 bits long, or 1072.
* CN5XXX full chain shift is:
* new data => lane 3 => lane 2 => lane 1 => lane 0 => data out
* The JTAG chain for CN63XX is 4 * 300 bits long, or 1200.
* The JTAG chain for CN68XX is 4 * 304 bits long, or 1216.
* The JTAG chain for CN66XX/CN61XX/CNF71XX is 4 * 304 bits long, or 1216.
* CN6XXX full chain shift is:
* new data => lane 0 => lane 1 => lane 2 => lane 3 => data out
* Shift LSB first, get LSB out
*/
extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn63xx[];
extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn66xx[];
extern const __cvmx_qlm_jtag_field_t __cvmx_qlm_jtag_field_cn68xx[];
#define CVMX_QLM_JTAG_UINT32 40
typedef u32 qlm_jtag_uint32_t[CVMX_QLM_JTAG_UINT32 * 8];
/**
* Initialize the internal QLM JTAG logic to allow programming
* of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
* These functions should only be used at the direction of Cavium
* Networks. Programming incorrect values into the JTAG chain
* can cause chip damage.
*/
void cvmx_helper_qlm_jtag_init(void);
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain for
* CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
* for CN63XX is 4 * 300 bits long, or 1200.
*
* @param qlm QLM to shift value into
* @param bits Number of bits to shift in (1-32).
* @param data Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* @return The low order bits of the JTAG chain that shifted out of the
* circle.
*/
u32 cvmx_helper_qlm_jtag_shift(int qlm, int bits, u32 data);
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @param qlm QLM to shift zeros into
* @param bits
*/
void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in the proper number of bits into the
* JTAG chain. Updating invalid values can possibly cause chip damage.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_update(int qlm);
/**
* Load the QLM JTAG chain with data from all lanes of the QLM.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_capture(int qlm);
#endif /* __CVMX_HELPER_JTAG_H__ */

View File

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for LOOP initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_LOOP_H__
#define __CVMX_HELPER_LOOP_H__
/**
* @INTERNAL
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down after
* this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_loop_probe(int xiface);
int __cvmx_helper_loop_enumerate(int xiface);
/**
* @INTERNAL
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_loop_enable(int xiface);
#endif

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for NPI initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_NPI_H__
#define __CVMX_HELPER_NPI_H__
/**
* @INTERNAL
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_npi_probe(int interface);
/**
* @INTERNAL
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_npi_enable(int xiface);
/**
* Sets the number of pipe used by SLI packet output in the variable,
* which then later used for setting it up in HW
*/
void cvmx_npi_config_set_num_pipes(int num_pipes);
#endif

View File

@ -0,0 +1,319 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions for PKI
*/
#ifndef __CVMX_HELPER_PKI_H__
#define __CVMX_HELPER_PKI_H__
#include "cvmx-pki.h"
/* Modify this if more than 8 ilk channels need to be supported */
#define CVMX_MAX_PORT_PER_INTERFACE 64
#define CVMX_MAX_QOS_PRIORITY 64
#define CVMX_PKI_FIND_AVAILABLE_RSRC (-1)
struct cvmx_pki_qos_schd {
cvmx_fpa3_gaura_t _aura;
cvmx_fpa3_pool_t _pool;
bool pool_per_qos;
int pool_num;
char *pool_name;
u64 pool_buff_size;
u64 pool_max_buff;
bool aura_per_qos;
int aura_num;
char *aura_name;
u64 aura_buff_cnt;
bool sso_grp_per_qos;
int sso_grp;
u16 port_add;
int qpg_base;
};
struct cvmx_pki_prt_schd {
cvmx_fpa3_pool_t _pool;
cvmx_fpa3_gaura_t _aura;
bool cfg_port;
int style;
bool pool_per_prt;
int pool_num;
char *pool_name;
u64 pool_buff_size;
u64 pool_max_buff;
bool aura_per_prt;
int aura_num;
char *aura_name;
u64 aura_buff_cnt;
bool sso_grp_per_prt;
int sso_grp;
enum cvmx_pki_qpg_qos qpg_qos;
int qpg_base;
struct cvmx_pki_qos_schd qos_s[CVMX_MAX_QOS_PRIORITY];
};
struct cvmx_pki_intf_schd {
cvmx_fpa3_pool_t _pool;
cvmx_fpa3_gaura_t _aura;
bool style_per_intf;
int style;
bool pool_per_intf;
int pool_num;
char *pool_name;
u64 pool_buff_size;
u64 pool_max_buff;
bool aura_per_intf;
int aura_num;
char *aura_name;
u64 aura_buff_cnt;
bool sso_grp_per_intf;
int sso_grp;
bool qos_share_aura;
bool qos_share_grp;
int qpg_base;
struct cvmx_pki_prt_schd prt_s[CVMX_MAX_PORT_PER_INTERFACE];
};
struct cvmx_pki_global_schd {
bool setup_pool;
int pool_num;
char *pool_name;
u64 pool_buff_size;
u64 pool_max_buff;
bool setup_aura;
int aura_num;
char *aura_name;
u64 aura_buff_cnt;
bool setup_sso_grp;
int sso_grp;
cvmx_fpa3_pool_t _pool;
cvmx_fpa3_gaura_t _aura;
};
struct cvmx_pki_legacy_qos_watcher {
bool configured;
enum cvmx_pki_term field;
u32 data;
u32 data_mask;
u8 advance;
u8 sso_grp;
};
extern bool cvmx_pki_dflt_init[CVMX_MAX_NODES];
extern struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES];
extern struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES];
extern struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES];
extern struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
extern u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND];
extern struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
extern struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
/**
* This function Enabled the PKI hardware to
* start accepting/processing packets.
* @param node node number
*/
void cvmx_helper_pki_enable(int node);
/**
* This function frees up PKI resources consumed by that port.
* This function should only be called if port resources
* (fpa pools aura, style qpg entry pcam entry etc.) are not shared
* @param xipd_port ipd port number for which resources need to
* be freed.
*/
int cvmx_helper_pki_port_shutdown(int xipd_port);
/**
* This function shuts down complete PKI hardware
* and software resources.
* @param node node number where PKI needs to shutdown.
*/
void cvmx_helper_pki_shutdown(int node);
/**
* This function calculates how mant qpf entries will be needed for
* a particular QOS.
* @param qpg_qos qos value for which entries need to be calculated.
*/
int cvmx_helper_pki_get_num_qpg_entry(enum cvmx_pki_qpg_qos qpg_qos);
/**
* This function setups the qos table by allocating qpg entry and writing
* the provided parameters to that entry (offset).
* @param node node number.
* @param qpg_cfg pointer to struct containing qpg configuration
*/
int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg);
/**
* This function sets up aura QOS for RED, backpressure and tail-drop.
*
* @param node node number.
* @param aura aura to configure.
* @param ena_red enable RED based on [DROP] and [PASS] levels
* 1: enable 0:disable
* @param pass_thresh pass threshold for RED.
* @param drop_thresh drop threshold for RED
* @param ena_bp enable backpressure based on [BP] level.
* 1:enable 0:disable
* @param bp_thresh backpressure threshold.
* @param ena_drop enable tail drop.
* 1:enable 0:disable
* @return Zero on success. Negative on failure
*/
int cvmx_helper_setup_aura_qos(int node, int aura, bool ena_red, bool ena_drop, u64 pass_thresh,
u64 drop_thresh, bool ena_bp, u64 bp_thresh);
/**
* This function maps specified bpid to all the auras from which it can receive bp and
* then maps that bpid to all the channels, that bpid can asserrt bp on.
*
* @param node node number.
* @param aura aura number which will back pressure specified bpid.
* @param bpid bpid to map.
* @param chl_map array of channels to map to that bpid.
* @param chl_cnt number of channel/ports to map to that bpid.
* @return Zero on success. Negative on failure
*/
int cvmx_helper_pki_map_aura_chl_bpid(int node, u16 aura, u16 bpid, u16 chl_map[], u16 chl_cnt);
/**
* This function sets up the global pool, aura and sso group
* resources which application can use between any interfaces
* and ports.
* @param node node number
* @param gblsch pointer to struct containing global
* scheduling parameters.
*/
int cvmx_helper_pki_set_gbl_schd(int node, struct cvmx_pki_global_schd *gblsch);
/**
* This function sets up scheduling parameters (pool, aura, sso group etc)
* of an ipd port.
* @param xipd_port ipd port number
* @param prtsch pointer to struct containing port's
* scheduling parameters.
*/
int cvmx_helper_pki_init_port(int xipd_port, struct cvmx_pki_prt_schd *prtsch);
/**
* This function sets up scheduling parameters (pool, aura, sso group etc)
* of an interface (all ports/channels on that interface).
* @param xiface interface number with node.
* @param intfsch pointer to struct containing interface
* scheduling parameters.
* @param gblsch pointer to struct containing global scheduling parameters
* (can be NULL if not used)
*/
int cvmx_helper_pki_init_interface(const int xiface, struct cvmx_pki_intf_schd *intfsch,
struct cvmx_pki_global_schd *gblsch);
/**
* This function gets all the PKI parameters related to that
* particular port from hardware.
* @param xipd_port ipd port number to get parameter of
* @param port_cfg pointer to structure where to store read parameters
*/
void cvmx_pki_get_port_config(int xipd_port, struct cvmx_pki_port_config *port_cfg);
/**
* This function sets all the PKI parameters related to that
* particular port in hardware.
* @param xipd_port ipd port number to get parameter of
* @param port_cfg pointer to structure containing port parameters
*/
void cvmx_pki_set_port_config(int xipd_port, struct cvmx_pki_port_config *port_cfg);
/**
* This function displays all the PKI parameters related to that
* particular port.
* @param xipd_port ipd port number to display parameter of
*/
void cvmx_pki_show_port_config(int xipd_port);
/**
* Modifies maximum frame length to check.
* It modifies the global frame length set used by this port, any other
* port using the same set will get affected too.
* @param xipd_port ipd port for which to modify max len.
* @param max_size maximum frame length
*/
void cvmx_pki_set_max_frm_len(int xipd_port, u32 max_size);
/**
* This function sets up all the ports of particular interface
* for chosen fcs mode. (only use for backward compatibility).
* New application can control it via init_interface calls.
* @param node node number.
* @param interface interface number.
* @param nports number of ports
* @param has_fcs 1 -- enable fcs check and fcs strip.
* 0 -- disable fcs check.
*/
void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports, int has_fcs);
/**
* This function sets the wqe buffer mode of all ports. First packet data buffer can reside
* either in same buffer as wqe OR it can go in separate buffer. If used the later mode,
* make sure software allocate enough buffers to now have wqe separate from packet data.
* @param node node number.
* @param pkt_outside_wqe 0 = The packet link pointer will be at word [FIRST_SKIP]
* immediately followed by packet data, in the same buffer
* as the work queue entry.
* 1 = The packet link pointer will be at word [FIRST_SKIP] in a new
* buffer separate from the work queue entry. Words following the
* WQE in the same cache line will be zeroed, other lines in the
* buffer will not be modified and will retain stale data (from the
* buffers previous use). This setting may decrease the peak PKI
* performance by up to half on small packets.
*/
void cvmx_helper_pki_set_wqe_mode(int node, bool pkt_outside_wqe);
/**
* This function sets the Packet mode of all ports and styles to little-endian.
* It Changes write operations of packet data to L2C to
* be in little-endian. Does not change the WQE header format, which is
* properly endian neutral.
* @param node node number.
*/
void cvmx_helper_pki_set_little_endian(int node);
void cvmx_helper_pki_set_dflt_pool(int node, int pool, int buffer_size, int buffer_count);
void cvmx_helper_pki_set_dflt_aura(int node, int aura, int pool, int buffer_count);
void cvmx_helper_pki_set_dflt_pool_buffer(int node, int buffer_count);
void cvmx_helper_pki_set_dflt_aura_buffer(int node, int buffer_count);
void cvmx_helper_pki_set_dflt_pkind_map(int node, int pkind, int style);
void cvmx_helper_pki_get_dflt_style(int node, struct cvmx_pki_style_config *style_cfg);
void cvmx_helper_pki_set_dflt_style(int node, struct cvmx_pki_style_config *style_cfg);
void cvmx_helper_pki_get_dflt_qpg(int node, struct cvmx_pki_qpg_config *qpg_cfg);
void cvmx_helper_pki_set_dflt_qpg(int node, struct cvmx_pki_qpg_config *qpg_cfg);
void cvmx_helper_pki_no_dflt_init(int node);
void cvmx_helper_pki_set_dflt_bp_en(int node, bool bp_en);
void cvmx_pki_dump_wqe(const cvmx_wqe_78xx_t *wqp);
int __cvmx_helper_pki_port_setup(int node, int xipd_port);
int __cvmx_helper_pki_global_setup(int node);
void cvmx_helper_pki_show_port_config(int xipd_port);
int __cvmx_helper_pki_install_dflt_vlan(int node);
void __cvmx_helper_pki_set_dflt_ltype_map(int node);
int cvmx_helper_pki_route_dmac(int node, int style, u64 mac_addr, u64 mac_addr_mask,
int final_style);
int cvmx_pki_clone_style(int node, int style, u64 cluster_mask);
void cvmx_helper_pki_modify_prtgrp(int xipd_port, int grp_ok, int grp_bad);
int cvmx_helper_pki_route_prt_dmac(int xipd_port, u64 mac_addr, u64 mac_addr_mask, int grp);
void cvmx_helper_pki_errata(int node);
#endif /* __CVMX_HELPER_PKI_H__ */

View File

@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* PKO helper, configuration API
*/
#ifndef __CVMX_HELPER_PKO_H__
#define __CVMX_HELPER_PKO_H__
/* CSR typedefs have been moved to cvmx-pko-defs.h */
/**
* cvmx_override_pko_queue_priority(int ipd_port, u64
* priorities[16]) is a function pointer. It is meant to allow
* customization of the PKO queue priorities based on the port
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
void (*cvmx_override_pko_queue_priority)(int ipd_port, u8 *priorities);
/**
* Gets the fpa pool number of pko pool
*/
s64 cvmx_fpa_get_pko_pool(void);
/**
* Gets the buffer size of pko pool
*/
u64 cvmx_fpa_get_pko_pool_block_size(void);
/**
* Gets the buffer size of pko pool
*/
u64 cvmx_fpa_get_pko_pool_buffer_count(void);
int cvmx_helper_pko_init(void);
/*
* This function is a no-op
* included here for backwards compatibility only.
*/
static inline int cvmx_pko_initialize_local(void)
{
return 0;
}
int __cvmx_helper_pko_drain(void);
int __cvmx_helper_interface_setup_pko(int interface);
#endif /* __CVMX_HELPER_PKO_H__ */

View File

@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_HELPER_PKO3_H__
#define __CVMX_HELPER_PKO3_H__
/*
* Initialize PKO3 unit on the current node.
*
* Covers the common hardware, memory and global configuration.
* Per-interface initialization is performed separately.
*
* @return 0 on success.
*
*/
int cvmx_helper_pko3_init_global(unsigned int node);
int __cvmx_helper_pko3_init_global(unsigned int node, u16 gaura);
/**
* Initialize a simple interface with a a given number of
* fair or prioritized queues.
* This function will assign one channel per sub-interface.
*/
int __cvmx_pko3_config_gen_interface(int xiface, u8 subif, u8 num_queues, bool prioritized);
/*
* Configure and initialize PKO3 for an interface
*
* @param interface is the interface number to configure
* @return 0 on success.
*
*/
int cvmx_helper_pko3_init_interface(int xiface);
int __cvmx_pko3_helper_dqs_activate(int xiface, int index, bool min_pad);
/**
* Uninitialize PKO3 interface
*
* Release all resources held by PKO for an interface.
* The shutdown code is the same for all supported interfaces.
*/
int cvmx_helper_pko3_shut_interface(int xiface);
/**
* Shutdown PKO3
*
* Should be called after all interfaces have been shut down on the PKO3.
*
* Disables the PKO, frees all its buffers.
*/
int cvmx_helper_pko3_shutdown(unsigned int node);
/**
* Show integrated PKO configuration.
*
* @param node node number
*/
int cvmx_helper_pko3_config_dump(unsigned int node);
/**
* Show integrated PKO statistics.
*
* @param node node number
*/
int cvmx_helper_pko3_stats_dump(unsigned int node);
/**
* Clear PKO statistics.
*
* @param node node number
*/
void cvmx_helper_pko3_stats_clear(unsigned int node);
#endif /* __CVMX_HELPER_PKO3_H__ */

View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_RGMII_H__
#define __CVMX_HELPER_RGMII_H__
/**
* @INTERNAL
* Probe RGMII ports and determine the number present
*
* @param xiface Interface to probe
*
* @return Number of RGMII/GMII/MII ports (0-4).
*/
int __cvmx_helper_rgmii_probe(int xiface);
/**
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @param port IPD port number to loop.
*/
void cvmx_helper_rgmii_internal_loopback(int port);
/**
* @INTERNAL
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @param xiface PKO Interface to configure (0 or 1)
*
* @return Zero on success
*/
int __cvmx_helper_rgmii_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_gmii_link_get(int ipd_port);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

View File

@ -0,0 +1,437 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions to abstract SFP and QSFP connectors
*/
#ifndef __CVMX_HELPER_SFP_H__
#define __CVMX_HELPER_SFP_H__
/**
* Maximum size for the SFP EEPROM. Currently only 96 bytes are used.
*/
#define CVMX_SFP_MAX_EEPROM_SIZE 0x100
/**
* Default address of sfp EEPROM
*/
#define CVMX_SFP_DEFAULT_I2C_ADDR 0x50
/**
* Default address of SFP diagnostics chip
*/
#define CVMX_SFP_DEFAULT_DIAG_I2C_ADDR 0x51
struct cvmx_fdt_sfp_info;
struct cvmx_fdt_gpio_info;
/**
* Connector type for module, usually we only see SFP and QSFPP
*/
enum cvmx_phy_sfp_conn_type {
CVMX_SFP_CONN_GBIC = 0x01, /** GBIC */
CVMX_SFP_CONN_SFP = 0x03, /** SFP/SFP+/SFP28 */
CVMX_SFP_CONN_QSFP = 0x0C, /** 1G QSFP (obsolete) */
CVMX_SFP_CONN_QSFPP = 0x0D, /** QSFP+ or later */
CVMX_SFP_CONN_QSFP28 = 0x11, /** QSFP28 (100Gbps) */
CVMX_SFP_CONN_MICRO_QSFP = 0x17, /** Micro QSFP */
CVMX_SFP_CONN_QSFP_DD = 0x18, /** QSFP-DD Double Density 8X */
CVMX_SFP_CONN_SFP_DD = 0x1A, /** SFP-DD Double Density 2X */
};
/**
* module type plugged into a SFP/SFP+/QSFP+ port
*/
enum cvmx_phy_sfp_mod_type {
CVMX_SFP_MOD_UNKNOWN = 0, /** Unknown or unspecified */
/** Fiber optic module (LC connector) */
CVMX_SFP_MOD_OPTICAL_LC = 0x7,
/** Multiple optical */
CVMX_SFP_MOD_MULTIPLE_OPTICAL = 0x9,
/** Fiber optic module (pigtail, no connector) */
CVMX_SFP_MOD_OPTICAL_PIGTAIL = 0xB,
CVMX_SFP_MOD_COPPER_PIGTAIL = 0x21, /** copper module */
CVMX_SFP_MOD_RJ45 = 0x22, /** RJ45 (i.e. 10GBase-T) */
/** No separable connector (SFP28/copper) */
CVMX_SFP_MOD_NO_SEP_CONN = 0x23,
/** MXC 2X16 */
CVMX_SFP_MOD_MXC_2X16 = 0x24,
/** CS optical connector */
CVMX_SFP_MOD_CS_OPTICAL = 0x25,
/** Mini CS optical connector */
CVMX_SFP_MOD_MINI_CS_OPTICAL = 0x26,
/** Unknown/other module type */
CVMX_SFP_MOD_OTHER
};
/** Peak rate supported by SFP cable */
enum cvmx_phy_sfp_rate {
CVMX_SFP_RATE_UNKNOWN, /** Unknown rate */
CVMX_SFP_RATE_1G, /** 1Gbps */
CVMX_SFP_RATE_10G, /** 10Gbps */
CVMX_SFP_RATE_25G, /** 25Gbps */
CVMX_SFP_RATE_40G, /** 40Gbps */
CVMX_SFP_RATE_100G /** 100Gbps */
};
/**
* Cable compliance specification
* See table 4-4 from SFF-8024 for the extended specification compliance codes
*/
enum cvmx_phy_sfp_cable_ext_compliance {
CVMX_SFP_CABLE_UNSPEC = 0,
CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_HIGH_BER = 0x01, /** Active optical cable */
CVMX_SFP_CABLE_100G_SR4_25G_SR = 0x2,
CVMX_SFP_CABLE_100G_LR4_25G_LR = 0x3,
CVMX_SFP_CABLE_100G_ER4_25G_ER = 0x4,
CVMX_SFP_CABLE_100G_SR10 = 0x5,
CVMX_SFP_CABLE_100G_CWDM4_MSA = 0x6,
CVMX_SFP_CABLE_100G_PSM4 = 0x7,
CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_HIGH_BER = 0x8,
CVMX_SFP_CABLE_100G_CWDM4 = 0x9,
CVMX_SFP_CABLE_100G_CR4_25G_CR_CA_L = 0xB,
CVMX_SFP_CABLE_25G_CR_CA_S = 0xC,
CVMX_SFP_CABLE_25G_CR_CA_N = 0xD,
CVMX_SFP_CABLE_40G_ER4 = 0x10,
CVMX_SFP_CABLE_4X10G_SR = 0x11,
CVMX_SFP_CABLE_40G_PSM4 = 0x12,
CVMX_SFP_CABLE_G959_1_P1I1_2D1 = 0x13,
CVMX_SFP_CABLE_G959_1_P1S1_2D2 = 0x14,
CVMX_SFP_CABLE_G959_1_P1L1_2D2 = 0x15,
CVMX_SFP_CABLE_10GBASE_T = 0x16,
CVMX_SFP_CABLE_100G_CLR4 = 0x17,
CVMX_SFP_CABLE_100G_25GAUI_C2M_AOC_LOW_BER = 0x18,
CVMX_SFP_CABLE_100G_25GAUI_C2M_ACC_LOW_BER = 0x19,
CVMX_SFP_CABLE_100G_2_LAMBDA_DWDM = 0x1a,
CVMX_SFP_CABLE_100G_1550NM_WDM = 0x1b,
CVMX_SFP_CABLE_10GBASE_T_SR = 0x1c,
CVMX_SFP_CABLE_5GBASE_T = 0x1d,
CVMX_SFP_CABLE_2_5GBASE_T = 0x1e,
CVMX_SFP_CABLE_40G_SWDM4 = 0x1f,
CVMX_SFP_CABLE_100G_SWDM4 = 0x20,
CVMX_SFP_CABLE_100G_PAM4_BIDI = 0x21,
CVMX_SFP_CABLE_100G_4WDM_10_FEC_HOST = 0x22,
CVMX_SFP_CABLE_100G_4WDM_20_FEC_HOST = 0x23,
CVMX_SFP_CABLE_100G_4WDM_40_FEC_HOST = 0x24,
CVMX_SFP_CABLE_100GBASE_DR_CAUI4_NO_FEC = 0x25,
CVMX_SFP_CABLE_100G_FR_CAUI4_NO_FEC = 0x26,
CVMX_SFP_CABLE_100G_LR_CAUI4_NO_FEC = 0x27,
CVMX_SFP_CABLE_ACTIVE_COPPER_50_100_200GAUI_LOW_BER = 0x30,
CVMX_SFP_CABLE_ACTIVE_OPTICAL_50_100_200GAUI_LOW_BER = 0x31,
CVMX_SFP_CABLE_ACTIVE_COPPER_50_100_200GAUI_HI_BER = 0x32,
CVMX_SFP_CABLE_ACTIVE_OPTICAL_50_100_200GAUI_HI_BER = 0x33,
CVMX_SFP_CABLE_50_100_200G_CR = 0x40,
CVMX_SFP_CABLE_50_100_200G_SR = 0x41,
CVMX_SFP_CABLE_50GBASE_FR_200GBASE_DR4 = 0x42,
CVMX_SFP_CABLE_200GBASE_FR4 = 0x43,
CVMX_SFP_CABLE_200G_1550NM_PSM4 = 0x44,
CVMX_SFP_CABLE_50GBASE_LR = 0x45,
CVMX_SFP_CABLE_200GBASE_LR4 = 0x46,
CVMX_SFP_CABLE_64GFC_EA = 0x50,
CVMX_SFP_CABLE_64GFC_SW = 0x51,
CVMX_SFP_CABLE_64GFC_LW = 0x52,
CVMX_SFP_CABLE_128GFC_EA = 0x53,
CVMX_SFP_CABLE_128GFC_SW = 0x54,
CVMX_SFP_CABLE_128GFC_LW = 0x55,
};
/** Optical modes module is compliant with */
enum cvmx_phy_sfp_10g_eth_compliance {
CVMX_SFP_CABLE_10GBASE_ER = 0x80, /** 10G ER */
CVMX_SFP_CABLE_10GBASE_LRM = 0x40, /** 10G LRM */
CVMX_SFP_CABLE_10GBASE_LR = 0x20, /** 10G LR */
CVMX_SFP_CABLE_10GBASE_SR = 0x10 /** 10G SR */
};
/** Diagnostic ASIC compatibility */
enum cvmx_phy_sfp_sff_8472_diag_rev {
CVMX_SFP_SFF_8472_NO_DIAG = 0x00,
CVMX_SFP_SFF_8472_REV_9_3 = 0x01,
CVMX_SFP_SFF_8472_REV_9_5 = 0x02,
CVMX_SFP_SFF_8472_REV_10_2 = 0x03,
CVMX_SFP_SFF_8472_REV_10_4 = 0x04,
CVMX_SFP_SFF_8472_REV_11_0 = 0x05,
CVMX_SFP_SFF_8472_REV_11_3 = 0x06,
CVMX_SFP_SFF_8472_REV_11_4 = 0x07,
CVMX_SFP_SFF_8472_REV_12_0 = 0x08,
CVMX_SFP_SFF_8472_REV_UNALLOCATED = 0xff
};
/**
* Data structure describing the current SFP or QSFP EEPROM
*/
struct cvmx_sfp_mod_info {
enum cvmx_phy_sfp_conn_type conn_type; /** Connector type */
enum cvmx_phy_sfp_mod_type mod_type; /** Module type */
enum cvmx_phy_sfp_rate rate; /** Rate of module */
/** 10G Ethernet Compliance codes (logical OR) */
enum cvmx_phy_sfp_10g_eth_compliance eth_comp;
/** Extended Cable compliance */
enum cvmx_phy_sfp_cable_ext_compliance cable_comp;
u8 vendor_name[17]; /** Module vendor name */
u8 vendor_oui[3]; /** vendor OUI */
u8 vendor_pn[17]; /** Vendor part number */
u8 vendor_rev[5]; /** Vendor revision */
u8 vendor_sn[17]; /** Vendor serial number */
u8 date_code[9]; /** Date code */
bool valid; /** True if module is valid */
bool active_cable; /** False for passive copper */
bool copper_cable; /** True if cable is copper */
/** True if module is limiting (i.e. not passive copper) */
bool limiting;
/** Maximum length of copper cable in meters */
int max_copper_cable_len;
/** Max single mode cable length in meters */
int max_single_mode_cable_length;
/** Max 50um OM2 cable length */
int max_50um_om2_cable_length;
/** Max 62.5um OM1 cable length */
int max_62_5um_om1_cable_length;
/** Max 50um OM4 cable length */
int max_50um_om4_cable_length;
/** Max 50um OM3 cable length */
int max_50um_om3_cable_length;
/** Minimum bitrate in Mbps */
int bitrate_min;
/** Maximum bitrate in Mbps */
int bitrate_max;
/**
* Set to true if forward error correction is required,
* for example, a 25GBase-CR CA-S cable.
*
* FEC should only be disabled at 25G with CA-N cables. FEC is required
* with 5M and longer cables.
*/
bool fec_required;
/** True if RX output is linear */
bool linear_rx_output;
/** Power level, can be 1, 2 or 3 */
int power_level;
/** False if conventional cooling is used, true for active cooling */
bool cooled_laser;
/** True if internal retimer or clock and data recovery circuit */
bool internal_cdr;
/** True if LoS is implemented */
bool los_implemented;
/** True if LoS is inverted from the standard */
bool los_inverted;
/** True if TX_FAULT is implemented */
bool tx_fault_implemented;
/** True if TX_DISABLE is implemented */
bool tx_disable_implemented;
/** True if RATE_SELECT is implemented */
bool rate_select_implemented;
/** True if tuneable transmitter technology is used */
bool tuneable_transmitter;
/** True if receiver decision threshold is implemented */
bool rx_decision_threshold_implemented;
/** True if diagnostic monitoring present */
bool diag_monitoring;
/** True if diagnostic address 0x7f is used for selecting the page */
bool diag_paging;
/** Diagnostic feature revision */
enum cvmx_phy_sfp_sff_8472_diag_rev diag_rev;
/** True if an address change sequence is required for diagnostics */
bool diag_addr_change_required;
/** True if RX power is averaged, false if OMA */
bool diag_rx_power_averaged;
/** True if diagnostics are externally calibrated */
bool diag_externally_calibrated;
/** True if diagnostics are internally calibrated */
bool diag_internally_calibrated;
/** True of soft rate select control implemented per SFF-8431 */
bool diag_soft_rate_select_control;
/** True if application select control implemented per SFF-8079 */
bool diag_app_select_control;
/** True if soft RATE_SELECT control and moonitoring implemented */
bool diag_soft_rate_select_implemented;
/** True if soft RX_LOS monitoring implemented */
bool diag_soft_rx_los_implemented;
/** True if soft TX_FAULT monitoring implemented */
bool diag_soft_tx_fault_implemented;
/** True if soft TX_DISABLE control and monitoring implemented */
bool diag_soft_tx_disable_implemented;
/** True if alarm/warning flags implemented */
bool diag_alarm_warning_flags_implemented;
};
/**
* Reads the SFP EEPROM using the i2c bus
*
* @param[out] buffer Buffer to store SFP EEPROM data in
* The buffer should be SFP_MAX_EEPROM_SIZE bytes.
* @param i2c_bus i2c bus number to read from for SFP port
* @param i2c_addr i2c address to use, 0 for default
*
* @return -1 if invalid bus or i2c read error, 0 for success
*/
int cvmx_phy_sfp_read_i2c_eeprom(u8 *buffer, int i2c_bus, int i2c_addr);
/**
* Reads the SFP/SFP+/QSFP EEPROM and outputs the type of module or cable
* plugged in
*
* @param[out] sfp_info Info about SFP module
* @param[in] buffer SFP EEPROM buffer to parse
*
* @return 0 on success, -1 if error reading EEPROM or if EEPROM corrupt
*/
int cvmx_phy_sfp_parse_eeprom(struct cvmx_sfp_mod_info *sfp_info, const u8 *buffer);
/**
* Prints out information about a SFP/QSFP device
*
* @param[in] sfp_info data structure to print
*/
void cvmx_phy_sfp_print_info(const struct cvmx_sfp_mod_info *sfp_info);
/**
* Reads and parses SFP/QSFP EEPROM
*
* @param sfp sfp handle to read
*
* @return 0 for success, -1 on error.
*/
int cvmx_sfp_read_i2c_eeprom(struct cvmx_fdt_sfp_info *sfp);
/**
* Returns the information about a SFP/QSFP device
*
* @param sfp sfp handle
*
* @return sfp_info Pointer sfp mod info data structure
*/
const struct cvmx_sfp_mod_info *cvmx_phy_get_sfp_mod_info(const struct cvmx_fdt_sfp_info *sfp);
/**
* Function called to check and return the status of the mod_abs pin or
* mod_pres pin for QSFPs.
*
* @param sfp Handle to SFP information.
* @param data User-defined data passed to the function
*
* @return 0 if absent, 1 if present, -1 on error
*/
int cvmx_sfp_check_mod_abs(struct cvmx_fdt_sfp_info *sfp, void *data);
/**
* Registers a function to be called to check mod_abs/mod_pres for a SFP/QSFP
* slot.
*
* @param sfp Handle to SFP data structure
* @param check_mod_abs Function to be called or NULL to remove
* @param mod_abs_data User-defined data to be passed to check_mod_abs
*
* @return 0 for success
*/
int cvmx_sfp_register_check_mod_abs(struct cvmx_fdt_sfp_info *sfp,
int (*check_mod_abs)(struct cvmx_fdt_sfp_info *sfp, void *data),
void *mod_abs_data);
/**
* Registers a function to be called whenever the mod_abs/mod_pres signal
* changes.
*
* @param sfp Handle to SFP data structure
* @param mod_abs_changed Function called whenever mod_abs is changed
* or NULL to remove.
* @param mod_abs_changed_data User-defined data passed to
* mod_abs_changed
*
* @return 0 for success
*/
int cvmx_sfp_register_mod_abs_changed(struct cvmx_fdt_sfp_info *sfp,
int (*mod_abs_changed)(struct cvmx_fdt_sfp_info *sfp, int val,
void *data),
void *mod_abs_changed_data);
/**
* Function called to check and return the status of the tx_fault pin
*
* @param sfp Handle to SFP information.
* @param data User-defined data passed to the function
*
* @return 0 if signal present, 1 if signal absent, -1 on error
*/
int cvmx_sfp_check_tx_fault(struct cvmx_fdt_sfp_info *sfp, void *data);
/**
* Function called to check and return the status of the rx_los pin
*
* @param sfp Handle to SFP information.
* @param data User-defined data passed to the function
*
* @return 0 if signal present, 1 if signal absent, -1 on error
*/
int cvmx_sfp_check_rx_los(struct cvmx_fdt_sfp_info *sfp, void *data);
/**
* Registers a function to be called whenever rx_los changes
*
* @param sfp Handle to SFP data structure
* @param rx_los_changed Function to be called when rx_los changes
* or NULL to remove the function
* @param rx_los_changed_data User-defined data passed to
* rx_los_changed
*
* @return 0 for success
*/
int cvmx_sfp_register_rx_los_changed(struct cvmx_fdt_sfp_info *sfp,
int (*rx_los_changed)(struct cvmx_fdt_sfp_info *sfp, int val,
void *data),
void *rx_los_changed_data);
/**
* Parses the device tree for SFP and QSFP slots
*
* @param fdt_addr Address of flat device-tree
*
* @return 0 for success, -1 on error
*/
int cvmx_sfp_parse_device_tree(const void *fdt_addr);
/**
* Given an IPD port number find the corresponding SFP or QSFP slot
*
* @param ipd_port IPD port number to search for
*
* @return pointer to SFP data structure or NULL if not found
*/
struct cvmx_fdt_sfp_info *cvmx_sfp_find_slot_by_port(int ipd_port);
/**
* Given a fdt node offset find the corresponding SFP or QSFP slot
*
* @param of_offset flat device tree node offset
*
* @return pointer to SFP data structure or NULL if not found
*/
struct cvmx_fdt_sfp_info *cvmx_sfp_find_slot_by_fdt_node(int of_offset);
/**
* Reads the EEPROMs of all SFP modules.
*
* @return 0 for success
*/
int cvmx_sfp_read_all_modules(void);
/**
* Validates if the module is correct for the specified port
*
* @param[in] sfp SFP port to check
* @param mode interface mode
*
* @return true if module is valid, false if invalid
* NOTE: This will also toggle the error LED, if present
*/
bool cvmx_sfp_validate_module(struct cvmx_fdt_sfp_info *sfp, int mode);
/**
* Prints information about the SFP module
*
* @param[in] sfp sfp data structure
*/
void cvmx_sfp_print_info(const struct cvmx_fdt_sfp_info *sfp);
#endif /* __CVMX_HELPER_SFP_H__ */

View File

@ -0,0 +1,81 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for SGMII initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_SGMII_H__
#define __CVMX_HELPER_SGMII_H__
/**
* @INTERNAL
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_sgmii_probe(int xiface);
int __cvmx_helper_sgmii_enumerate(int xiface);
/**
* @INTERNAL
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for SPI initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_SPI_H__
#define __CVMX_HELPER_SPI_H__
#include "cvmx-helper.h"
/**
* @INTERNAL
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_spi_probe(int interface);
int __cvmx_helper_spi_enumerate(int interface);
/**
* @INTERNAL
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_spi_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* Sets the spi timeout in config data
* @param timeout value
*/
void cvmx_spi_config_set_timeout(int timeout);
#endif

View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for SRIO initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_SRIO_H__
#define __CVMX_HELPER_SRIO_H__
/**
* @INTERNAL
* Convert interface number to sRIO link number
* per SoC model.
*
* @param xiface Interface to convert
*
* @return Srio link number
*/
int __cvmx_helper_srio_port(int xiface);
/**
* @INTERNAL
* Probe a SRIO interface and determine the number of ports
* connected to it. The SRIO interface should still be down after
* this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_srio_probe(int xiface);
/**
* @INTERNAL
* Bringup and enable a SRIO interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_srio_enable(int xiface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by SRIO link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_srio_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_srio_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
#endif

View File

@ -0,0 +1,412 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_HELPER_UTIL_H__
#define __CVMX_HELPER_UTIL_H__
#include "cvmx-mio-defs.h"
#include "cvmx-helper.h"
#include "cvmx-fpa.h"
typedef char cvmx_pknd_t;
typedef char cvmx_bpid_t;
#define CVMX_INVALID_PKND ((cvmx_pknd_t)-1)
#define CVMX_INVALID_BPID ((cvmx_bpid_t)-1)
#define CVMX_MAX_PKND ((cvmx_pknd_t)64)
#define CVMX_MAX_BPID ((cvmx_bpid_t)64)
#define CVMX_HELPER_MAX_IFACE 11
#define CVMX_HELPER_MAX_PORTS 16
/* Maximum range for normalized (a.k.a. IPD) port numbers (12-bit field) */
#define CVMX_PKO3_IPD_NUM_MAX 0x1000 //FIXME- take it from someplace else ?
#define CVMX_PKO3_DQ_NUM_MAX 0x400 // 78xx has 1024 queues
#define CVMX_PKO3_IPD_PORT_NULL (CVMX_PKO3_IPD_NUM_MAX - 1)
#define CVMX_PKO3_IPD_PORT_LOOP 0
struct cvmx_xport {
int node;
int port;
};
typedef struct cvmx_xport cvmx_xport_t;
static inline struct cvmx_xport cvmx_helper_ipd_port_to_xport(int ipd_port)
{
struct cvmx_xport r;
r.port = ipd_port & (CVMX_PKO3_IPD_NUM_MAX - 1);
r.node = (ipd_port >> 12) & CVMX_NODE_MASK;
return r;
}
static inline int cvmx_helper_node_to_ipd_port(int node, int index)
{
return (node << 12) + index;
}
struct cvmx_xdq {
int node;
int queue;
};
typedef struct cvmx_xdq cvmx_xdq_t;
static inline struct cvmx_xdq cvmx_helper_queue_to_xdq(int queue)
{
struct cvmx_xdq r;
r.queue = queue & (CVMX_PKO3_DQ_NUM_MAX - 1);
r.node = (queue >> 10) & CVMX_NODE_MASK;
return r;
}
static inline int cvmx_helper_node_to_dq(int node, int queue)
{
return (node << 10) + queue;
}
struct cvmx_xiface {
int node;
int interface;
};
typedef struct cvmx_xiface cvmx_xiface_t;
/**
* Return node and interface number from XIFACE.
*
* @param xiface interface with node information
*
* @return struct that contains node and interface number.
*/
static inline struct cvmx_xiface cvmx_helper_xiface_to_node_interface(int xiface)
{
cvmx_xiface_t interface_node;
/*
* If the majic number 0xde0000 is not present in the
* interface, then assume it is node 0.
*/
if (((xiface >> 0x8) & 0xff) == 0xde) {
interface_node.node = (xiface >> 16) & CVMX_NODE_MASK;
interface_node.interface = xiface & 0xff;
} else {
interface_node.node = cvmx_get_node_num();
interface_node.interface = xiface & 0xff;
}
return interface_node;
}
/* Used internally only*/
static inline bool __cvmx_helper_xiface_is_null(int xiface)
{
return (xiface & 0xff) == 0xff;
}
#define __CVMX_XIFACE_NULL 0xff
/**
* Return interface with majic number and node information (XIFACE)
*
* @param node node of the interface referred to
* @param interface interface to use.
*
* @return
*/
static inline int cvmx_helper_node_interface_to_xiface(int node, int interface)
{
return ((node & CVMX_NODE_MASK) << 16) | (0xde << 8) | (interface & 0xff);
}
/**
* Free the pip packet buffers contained in a work queue entry.
* The work queue entry is not freed.
*
* @param work Work queue entry with packet to free
*/
static inline void cvmx_helper_free_pip_pkt_data(cvmx_wqe_t *work)
{
u64 number_buffers;
cvmx_buf_ptr_t buffer_ptr;
cvmx_buf_ptr_t next_buffer_ptr;
u64 start_of_buffer;
number_buffers = work->word2.s.bufs;
if (number_buffers == 0)
return;
buffer_ptr = work->packet_ptr;
/* Since the number of buffers is not zero, we know this is not a dynamic
short packet. We need to check if it is a packet received with
IPD_CTL_STATUS[NO_WPTR]. If this is true, we need to free all buffers
except for the first one. The caller doesn't expect their WQE pointer
to be freed */
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
if (cvmx_ptr_to_phys(work) == start_of_buffer) {
next_buffer_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
buffer_ptr = next_buffer_ptr;
number_buffers--;
}
while (number_buffers--) {
/* Remember the back pointer is in cache lines, not 64bit words */
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
/* Read pointer to next buffer before we free the current buffer. */
next_buffer_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer), buffer_ptr.s.pool, 0);
buffer_ptr = next_buffer_ptr;
}
}
/**
* Free the pki packet buffers contained in a work queue entry.
* If first packet buffer contains wqe, wqe gets freed too so do not access
* wqe after calling this function.
* This function asssumes that buffers to be freed are from
* Naturally aligned pool/aura.
* It does not use don't write back.
* @param work Work queue entry with packet to free
*/
static inline void cvmx_helper_free_pki_pkt_data(cvmx_wqe_t *work)
{
u64 number_buffers;
u64 start_of_buffer;
cvmx_buf_ptr_pki_t next_buffer_ptr;
cvmx_buf_ptr_pki_t buffer_ptr;
cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return;
}
/* Make sure errata pki-20776 has been applied*/
cvmx_wqe_pki_errata_20776(work);
buffer_ptr = wqe->packet_ptr;
number_buffers = cvmx_wqe_get_bufs(work);
while (number_buffers--) {
/* FIXME: change WQE function prototype */
unsigned int x = cvmx_wqe_get_aura(work);
cvmx_fpa3_gaura_t aura = __cvmx_fpa3_gaura(x >> 10, x & 0x3ff);
/* XXX- assumes the buffer is cache-line aligned and naturally aligned mode*/
start_of_buffer = (buffer_ptr.addr >> 7) << 7;
/* Read pointer to next buffer before we free the current buffer. */
next_buffer_ptr = *(cvmx_buf_ptr_pki_t *)cvmx_phys_to_ptr(buffer_ptr.addr - 8);
/* FPA AURA comes from WQE, includes node */
cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer), aura, 0);
buffer_ptr = next_buffer_ptr;
}
}
/**
* Free the pki wqe entry buffer.
* If wqe buffers contains first packet buffer, wqe does not get freed here.
* This function asssumes that buffers to be freed are from
* Naturally aligned pool/aura.
* It does not use don't write back.
* @param work Work queue entry to free
*/
static inline void cvmx_wqe_pki_free(cvmx_wqe_t *work)
{
cvmx_wqe_78xx_t *wqe = (cvmx_wqe_78xx_t *)work;
unsigned int x;
cvmx_fpa3_gaura_t aura;
if (!octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
return;
}
/* Do nothing if the first packet buffer shares WQE buffer */
if (!wqe->packet_ptr.packet_outside_wqe)
return;
/* FIXME change WQE function prototype */
x = cvmx_wqe_get_aura(work);
aura = __cvmx_fpa3_gaura(x >> 10, x & 0x3ff);
cvmx_fpa3_free(work, aura, 0);
}
/**
* Convert a interface mode into a human readable string
*
* @param mode Mode to convert
*
* @return String
*/
const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
/**
* Debug routine to dump the packet structure to the console
*
* @param work Work queue entry containing the packet to dump
* @return
*/
int cvmx_helper_dump_packet(cvmx_wqe_t *work);
/**
* Get the version of the CVMX libraries.
*
* @return Version string. Note this buffer is allocated statically
* and will be shared by all callers.
*/
const char *cvmx_helper_get_version(void);
/**
* @INTERNAL
* Setup the common GMX settings that determine the number of
* ports. These setting apply to almost all configurations of all
* chips.
*
* @param xiface Interface to configure
* @param num_ports Number of ports on the interface
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_setup_gmx(int xiface, int num_ports);
/**
* @INTERNAL
* Get the number of pko_ports on an interface.
*
* @param interface
*
* @return the number of pko_ports on the interface.
*/
int __cvmx_helper_get_num_pko_ports(int interface);
/**
* Returns the IPD port number for a port on the given
* interface.
*
* @param interface Interface to use
* @param port Port on the interface
*
* @return IPD port number
*/
int cvmx_helper_get_ipd_port(int interface, int port);
/**
* Returns the PKO port number for a port on the given interface,
* This is the base pko_port for o68 and ipd_port for older models.
*
* @param interface Interface to use
* @param port Port on the interface
*
* @return PKO port number and -1 on error.
*/
int cvmx_helper_get_pko_port(int interface, int port);
/**
* Returns the IPD/PKO port number for the first port on the given
* interface.
*
* @param interface Interface to use
*
* @return IPD/PKO port number
*/
static inline int cvmx_helper_get_first_ipd_port(int interface)
{
return cvmx_helper_get_ipd_port(interface, 0);
}
int cvmx_helper_ports_on_interface(int interface);
/**
* Returns the IPD/PKO port number for the last port on the given
* interface.
*
* @param interface Interface to use
*
* @return IPD/PKO port number
*
* Note: for o68, the last ipd port on an interface does not always equal to
* the first plus the number of ports as the ipd ports are not contiguous in
* some cases, e.g., SGMII.
*
* Note: code that makes the assumption of contiguous ipd port numbers needs to
* be aware of this.
*/
static inline int cvmx_helper_get_last_ipd_port(int interface)
{
return cvmx_helper_get_ipd_port(interface, cvmx_helper_ports_on_interface(interface) - 1);
}
/**
* Free the packet buffers contained in a work queue entry.
* The work queue entry is not freed.
* Note that this function will not free the work queue entry
* even if it contains a non-redundant data packet, and hence
* it is not really comparable to how the PKO would free a packet
* buffers if requested.
*
* @param work Work queue entry with packet to free
*/
void cvmx_helper_free_packet_data(cvmx_wqe_t *work);
/**
* Returns the interface number for an IPD/PKO port number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface number
*/
int cvmx_helper_get_interface_num(int ipd_port);
/**
* Returns the interface index number for an IPD/PKO port
* number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface index number
*/
int cvmx_helper_get_interface_index_num(int ipd_port);
/**
* Get port kind for a given port in an interface.
*
* @param xiface Interface
* @param index index of the port in the interface
*
* @return port kind on sucicess and -1 on failure
*/
int cvmx_helper_get_pknd(int xiface, int index);
/**
* Get bpid for a given port in an interface.
*
* @param interface Interface
* @param port index of the port in the interface
*
* @return port kind on sucicess and -1 on failure
*/
int cvmx_helper_get_bpid(int interface, int port);
/**
* Internal functions.
*/
int __cvmx_helper_post_init_interfaces(void);
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
void cvmx_helper_show_stats(int port);
/*
* Return number of array alements
*/
#define NUM_ELEMENTS(arr) (sizeof(arr) / sizeof((arr)[0]))
/**
* Prints out a buffer with the address, hex bytes, and ASCII
*
* @param addr Start address to print on the left
* @param[in] buffer array of bytes to print
* @param count Number of bytes to print
*/
void cvmx_print_buffer_u8(unsigned int addr, const u8 *buffer, size_t count);
#endif /* __CVMX_HELPER_H__ */

View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Functions for XAUI initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_XAUI_H__
#define __CVMX_HELPER_XAUI_H__
/**
* @INTERNAL
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @param xiface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_xaui_probe(int xiface);
int __cvmx_helper_xaui_enumerate(int xiface);
/**
* @INTERNAL
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param xiface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_enable(int xiface);
/**
* Retrain XAUI interface.
*
* GMX is disabled as part of retraining.
* While GMX is disabled, new received packets are dropped.
* If GMX was in the middle of recieving a packet when disabled,
* that packet will be received before GMX idles.
* Transmitted packets are buffered normally, but not sent.
* If GMX was in the middle of transmitting a packet when disabled,
* that packet will be transmitted before GMX idles.
*
* @param interface Interface to retrain
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_xaui_link_retrain(int interface);
/**
* Reinitialize XAUI interface. Does a probe without changing the hardware
* state.
*
* @param interface Interface to reinitialize
*
* @return 0 on success, negative on failure
*/
int cvmx_helper_xaui_link_reinit(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_xaui_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

View File

@ -0,0 +1,565 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Helper functions for common, but complicated tasks.
*/
#ifndef __CVMX_HELPER_H__
#define __CVMX_HELPER_H__
#include "cvmx-wqe.h"
/* Max number of GMXX */
#define CVMX_HELPER_MAX_GMX \
(OCTEON_IS_MODEL(OCTEON_CN78XX) ? \
6 : \
(OCTEON_IS_MODEL(OCTEON_CN68XX) ? \
5 : \
(OCTEON_IS_MODEL(OCTEON_CN73XX) ? \
3 : \
(OCTEON_IS_MODEL(OCTEON_CNF75XX) ? 1 : 2))))
#define CVMX_HELPER_CSR_INIT0 \
0 /* Do not change as
CVMX_HELPER_WRITE_CSR()
assumes it */
#define CVMX_HELPER_CSR_INIT_READ -1
/*
* CVMX_HELPER_WRITE_CSR--set a field in a CSR with a value.
*
* @param chcsr_init initial value of the csr (CVMX_HELPER_CSR_INIT_READ
* means to use the existing csr value as the
* initial value.)
* @param chcsr_csr the name of the csr
* @param chcsr_type the type of the csr (see the -defs.h)
* @param chcsr_chip the chip for the csr/field
* @param chcsr_fld the field in the csr
* @param chcsr_val the value for field
*/
#define CVMX_HELPER_WRITE_CSR(chcsr_init, chcsr_csr, chcsr_type, chcsr_chip, chcsr_fld, chcsr_val) \
do { \
chcsr_type csr; \
if ((chcsr_init) == CVMX_HELPER_CSR_INIT_READ) \
csr.u64 = cvmx_read_csr(chcsr_csr); \
else \
csr.u64 = (chcsr_init); \
csr.chcsr_chip.chcsr_fld = (chcsr_val); \
cvmx_write_csr((chcsr_csr), csr.u64); \
} while (0)
/*
* CVMX_HELPER_WRITE_CSR0--set a field in a CSR with the initial value of 0
*/
#define CVMX_HELPER_WRITE_CSR0(chcsr_csr, chcsr_type, chcsr_chip, chcsr_fld, chcsr_val) \
CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT0, chcsr_csr, chcsr_type, chcsr_chip, chcsr_fld, \
chcsr_val)
/*
* CVMX_HELPER_WRITE_CSR1--set a field in a CSR with the initial value of
* the CSR's current value.
*/
#define CVMX_HELPER_WRITE_CSR1(chcsr_csr, chcsr_type, chcsr_chip, chcsr_fld, chcsr_val) \
CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT_READ, chcsr_csr, chcsr_type, chcsr_chip, \
chcsr_fld, chcsr_val)
/* These flags are passed to __cvmx_helper_packet_hardware_enable */
typedef enum {
CVMX_HELPER_INTERFACE_MODE_DISABLED,
CVMX_HELPER_INTERFACE_MODE_RGMII,
CVMX_HELPER_INTERFACE_MODE_GMII,
CVMX_HELPER_INTERFACE_MODE_SPI,
CVMX_HELPER_INTERFACE_MODE_PCIE,
CVMX_HELPER_INTERFACE_MODE_XAUI,
CVMX_HELPER_INTERFACE_MODE_SGMII,
CVMX_HELPER_INTERFACE_MODE_PICMG,
CVMX_HELPER_INTERFACE_MODE_NPI,
CVMX_HELPER_INTERFACE_MODE_LOOP,
CVMX_HELPER_INTERFACE_MODE_SRIO,
CVMX_HELPER_INTERFACE_MODE_ILK,
CVMX_HELPER_INTERFACE_MODE_RXAUI,
CVMX_HELPER_INTERFACE_MODE_QSGMII,
CVMX_HELPER_INTERFACE_MODE_AGL,
CVMX_HELPER_INTERFACE_MODE_XLAUI,
CVMX_HELPER_INTERFACE_MODE_XFI,
CVMX_HELPER_INTERFACE_MODE_10G_KR,
CVMX_HELPER_INTERFACE_MODE_40G_KR4,
CVMX_HELPER_INTERFACE_MODE_MIXED,
} cvmx_helper_interface_mode_t;
typedef union cvmx_helper_link_info {
u64 u64;
struct {
u64 reserved_20_63 : 43;
u64 init_success : 1;
u64 link_up : 1;
u64 full_duplex : 1;
u64 speed : 18;
} s;
} cvmx_helper_link_info_t;
/**
* Sets the back pressure configuration in internal data structure.
* @param backpressure_dis disable/enable backpressure
*/
void cvmx_rgmii_set_back_pressure(u64 backpressure_dis);
#include "cvmx-helper-fpa.h"
#include "cvmx-helper-agl.h"
#include "cvmx-helper-errata.h"
#include "cvmx-helper-ilk.h"
#include "cvmx-helper-loop.h"
#include "cvmx-helper-npi.h"
#include "cvmx-helper-rgmii.h"
#include "cvmx-helper-sgmii.h"
#include "cvmx-helper-spi.h"
#include "cvmx-helper-srio.h"
#include "cvmx-helper-util.h"
#include "cvmx-helper-xaui.h"
#include "cvmx-fpa3.h"
enum cvmx_pko_padding {
CVMX_PKO_PADDING_NONE = 0,
CVMX_PKO_PADDING_60 = 1,
};
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
* IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* @return 0 on success
* -1 on failure
*/
int cvmx_helper_ipd_and_packet_input_enable_node(int node);
int cvmx_helper_ipd_and_packet_input_enable(void);
/**
* Initialize and allocate memory for the SSO.
*
* @param wqe_entries The maximum number of work queue entries to be
* supported.
*
* @return Zero on success, non-zero on failure.
*/
int cvmx_helper_initialize_sso(int wqe_entries);
/**
* Initialize and allocate memory for the SSO on a specific node.
*
* @param node Node SSO to initialize
* @param wqe_entries The maximum number of work queue entries to be
* supported.
*
* @return Zero on success, non-zero on failure.
*/
int cvmx_helper_initialize_sso_node(unsigned int node, int wqe_entries);
/**
* Undo the effect of cvmx_helper_initialize_sso().
*
* @return Zero on success, non-zero on failure.
*/
int cvmx_helper_uninitialize_sso(void);
/**
* Undo the effect of cvmx_helper_initialize_sso_node().
*
* @param node Node SSO to initialize
*
* @return Zero on success, non-zero on failure.
*/
int cvmx_helper_uninitialize_sso_node(unsigned int node);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* @return Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_global(void);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* @param node Node on which to initialize packet io hardware
*
* @return Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_node(unsigned int node);
/**
* Does core local initialization for packet io
*
* @return Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_local(void);
/**
* Undo the initialization performed in
* cvmx_helper_initialize_packet_io_global(). After calling this routine and the
* local version on each core, packet IO for Octeon will be disabled and placed
* in the initial reset state. It will then be safe to call the initialize
* later on. Note that this routine does not empty the FPA pools. It frees all
* buffers used by the packet IO hardware to the FPA so a function emptying the
* FPA after shutdown should find all packet buffers in the FPA.
*
* @return Zero on success, negative on failure.
*/
int cvmx_helper_shutdown_packet_io_global(void);
/**
* Helper function for 78xx global packet IO shutdown
*/
int cvmx_helper_shutdown_packet_io_global_cn78xx(int node);
/**
* Does core local shutdown of packet io
*
* @return Zero on success, non-zero on failure
*/
int cvmx_helper_shutdown_packet_io_local(void);
/**
* Returns the number of ports on the given interface.
* The interface must be initialized before the port count
* can be returned.
*
* @param interface Which interface to return port count for.
*
* @return Port count for interface
* -1 for uninitialized interface
*/
int cvmx_helper_ports_on_interface(int interface);
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* @return Number of interfaces on chip
*/
int cvmx_helper_get_number_of_interfaces(void);
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @param xiface Interface to probe
*
* @return Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int xiface);
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
* cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
*
* @param ipd_port IPD/PKO port to auto configure
*
* @return Link state after configure
*/
cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* This function probes an interface to determine the actual number of
* hardware ports connected to it. It does some setup the ports but
* doesn't enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Final hardware setup of
* the ports will be performed later.
*
* @param xiface Interface to probe
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_interface_probe(int xiface);
/**
* Determine the actual number of hardware ports connected to an
* interface. It doesn't setup the ports or enable them.
*
* @param xiface Interface to enumerate
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_interface_enumerate(int xiface);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external);
/**
* Returns the number of ports on the given interface.
*
* @param interface Which interface to return port count for.
*
* @return Port count for interface
* -1 for uninitialized interface
*/
int __cvmx_helper_early_ports_on_interface(int interface);
void cvmx_helper_setup_simulator_io_buffer_counts(int node, int num_packet_buffers,
int pko_buffers);
void cvmx_helper_set_wqe_no_ptr_mode(bool mode);
void cvmx_helper_set_pkt_wqe_le_mode(bool mode);
int cvmx_helper_shutdown_fpa_pools(int node);
/**
* Convert Ethernet QoS/PCP value to system-level priority
*
* In OCTEON, highest priority is 0, in Ethernet 802.1p PCP field
* the highest priority is 7, lowest is 1. Here is the full conversion
* table between QoS (PCP) and OCTEON priority values, per IEEE 802.1Q-2005:
*
* PCP Priority Acronym Traffic Types
* 1 7 (lowest) BK Background
* 0 6 BE Best Effort
* 2 5 EE Excellent Effort
* 3 4 CA Critical Applications
* 4 3 VI Video, < 100 ms latency and jitter
* 5 2 VO Voice, < 10 ms latency and jitter
* 6 1 IC Internetwork Control
* 7 0 (highest) NC Network Control
*/
static inline u8 cvmx_helper_qos2prio(u8 qos)
{
static const unsigned int pcp_map = 6 << (4 * 0) | 7 << (4 * 1) | 5 << (4 * 2) |
4 << (4 * 3) | 3 << (4 * 4) | 2 << (4 * 5) |
1 << (4 * 6) | 0 << (4 * 7);
return (pcp_map >> ((qos & 0x7) << 2)) & 0x7;
}
/**
* Convert system-level priority to Ethernet QoS/PCP value
*
* Calculate the reverse of cvmx_helper_qos2prio() per IEEE 802.1Q-2005.
*/
static inline u8 cvmx_helper_prio2qos(u8 prio)
{
static const unsigned int prio_map = 7 << (4 * 0) | 6 << (4 * 1) | 5 << (4 * 2) |
4 << (4 * 3) | 3 << (4 * 4) | 2 << (4 * 5) |
0 << (4 * 6) | 1 << (4 * 7);
return (prio_map >> ((prio & 0x7) << 2)) & 0x7;
}
/**
* @INTERNAL
* Get the number of ipd_ports on an interface.
*
* @param xiface
*
* @return the number of ipd_ports on the interface and -1 for error.
*/
int __cvmx_helper_get_num_ipd_ports(int xiface);
enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int xiface);
/**
* @INTERNAL
*
* @param xiface
* @param num_ipd_ports is the number of ipd_ports on the interface
* @param has_fcs indicates if PKO does FCS for the ports on this
* @param pad The padding that PKO should apply.
* interface.
*
* @return 0 for success and -1 for failure
*/
int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
enum cvmx_pko_padding pad);
void __cvmx_helper_shutdown_interfaces(void);
/*
* @INTERNAL
* Enable packet input/output from the hardware. This function is
* called after all internal setup is complete and IPD is enabled.
* After this function completes, packets will be accepted from the
* hardware ports. PKO should still be disabled to make sure packets
* aren't sent out partially setup hardware.
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_packet_hardware_enable(int xiface);
/*
* @INTERNAL
*
* @return 0 for success and -1 for failure
*/
int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
*
* @param xiface
* @param port
*
* @return valid link_info on success or -1 on failure
*/
cvmx_helper_link_info_t __cvmx_helper_get_link_info(int xiface, int port);
/**
* @INTERNAL
*
* @param xiface
*
* @return 0 if PKO does not do FCS and 1 otherwise.
*/
int __cvmx_helper_get_has_fcs(int xiface);
void *cvmx_helper_mem_alloc(int node, u64 alloc_size, u64 align);
void cvmx_helper_mem_free(void *buffer, u64 size);
#define CVMX_QOS_NUM 8 /* Number of QoS priority classes */
typedef enum {
CVMX_QOS_PROTO_NONE, /* Disable QOS */
CVMX_QOS_PROTO_PAUSE, /* IEEE 802.3 PAUSE */
CVMX_QOS_PROTO_PFC /* IEEE 802.1Qbb-2011 PFC/CBFC */
} cvmx_qos_proto_t;
typedef enum {
CVMX_QOS_PKT_MODE_HWONLY, /* PAUSE packets processed in Hardware only. */
CVMX_QOS_PKT_MODE_SWONLY, /* PAUSE packets processed in Software only. */
CVMX_QOS_PKT_MODE_HWSW, /* PAUSE packets processed in both HW and SW. */
CVMX_QOS_PKT_MODE_DROP /* Ignore PAUSE packets. */
} cvmx_qos_pkt_mode_t;
typedef enum {
CVMX_QOS_POOL_PER_PORT, /* Pool per Physical Port */
CVMX_QOS_POOL_PER_CLASS /* Pool per Priority Class */
} cvmx_qos_pool_mode_t;
typedef struct cvmx_qos_config {
cvmx_qos_proto_t qos_proto; /* QoS protocol.*/
cvmx_qos_pkt_mode_t pkt_mode; /* PAUSE processing mode.*/
cvmx_qos_pool_mode_t pool_mode; /* FPA Pool mode.*/
int pktbuf_size; /* Packet buffer size */
int aura_size; /* Number of buffers */
int drop_thresh[CVMX_QOS_NUM]; /* DROP threashold in % */
int red_thresh[CVMX_QOS_NUM]; /* RED threashold in % */
int bp_thresh[CVMX_QOS_NUM]; /* BP threashold in % */
int groups[CVMX_QOS_NUM]; /* Base SSO group for QOS group set. */
int group_prio[CVMX_QOS_NUM]; /* SSO group priorities.*/
int pko_pfc_en; /* Enable PKO PFC layout. */
int vlan_num; /* VLAN number: 0 = 1st or 1 = 2nd. */
int p_time; /* PAUSE packets send time (in number of 512 bit-times).*/
int p_interval; /* PAUSE packet send interval (in number of 512 bit-times).*/
/* Internal parameters (should not be used by application developer): */
cvmx_fpa3_pool_t gpools[CVMX_QOS_NUM]; /* Pool to use.*/
cvmx_fpa3_gaura_t gauras[CVMX_QOS_NUM]; /* Global auras -- one per priority class. */
int bpids[CVMX_QOS_NUM]; /* PKI BPID.*/
int qpg_base; /* QPG Table base index.*/
} cvmx_qos_config_t;
/**
* Initialize QoS configuraiton with the SDK defaults.
*
* @param qos_cfg User QOS configuration parameters.
* @return Zero on success, negative number otherwise.
*/
int cvmx_helper_qos_config_init(cvmx_qos_proto_t qos_proto, cvmx_qos_config_t *qos_cfg);
/**
* Update the user static processor configuration.
* It should be done before any initialization of the DP units is performed.
*
* @param xipdport Global IPD port
* @param qos_cfg User QOS configuration parameters.
* @return Zero on success, negative number otherwise.
*/
int cvmx_helper_qos_port_config_update(int xipdport, cvmx_qos_config_t *qos_cfg);
/**
* Configure the Data Path components for QOS function.
* This function is called after the global processor initialization is
* performed.
*
* @param xipdport Global IPD port
* @param qos_cfg User QOS configuration parameters.
* @return Zero on success, negative number otherwise.
*/
int cvmx_helper_qos_port_setup(int xipdport, cvmx_qos_config_t *qos_cfg);
/**
* Configure the SSO for QOS function.
* This function is called after the global processor initialization is
* performed.
*
* @param node OCTEON3 node number.
* @param qos_cfg User QOS configuration parameters.
* @return Zero on success, negative number otherwise.
*/
int cvmx_helper_qos_sso_setup(int node, cvmx_qos_config_t *qos_cfg);
/**
* Return PKI_CHAN_E channel name based on the provided index.
* @param chan Channel index.
* @param namebuf Name buffer (output).
* @param buflen Name maximum length.
* @return Length of name (in bytes) on success, negative number otherwise.
*/
int cvmx_helper_get_chan_e_name(int chan, char *namebuf, int buflen);
#ifdef CVMX_DUMP_DIAGNOSTICS
void cvmx_helper_dump_for_diagnostics(int node);
#endif
#endif /* __CVMX_HELPER_H__ */

View File

@ -0,0 +1,606 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Fetch and Add Unit.
*/
/**
* @file
*
* Interface to the hardware Fetch and Add Unit.
*
*/
#ifndef __CVMX_HWFAU_H__
#define __CVMX_HWFAU_H__
typedef int cvmx_fau_reg64_t;
typedef int cvmx_fau_reg32_t;
typedef int cvmx_fau_reg16_t;
typedef int cvmx_fau_reg8_t;
#define CVMX_FAU_REG_ANY -1
/*
* Octeon Fetch and Add Unit (FAU)
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
#define CVMX_FAU_BITS_SCRADDR 63, 56
#define CVMX_FAU_BITS_LEN 55, 48
#define CVMX_FAU_BITS_INEVAL 35, 14
#define CVMX_FAU_BITS_TAGWAIT 13, 13
#define CVMX_FAU_BITS_NOADD 13, 13
#define CVMX_FAU_BITS_SIZE 12, 11
#define CVMX_FAU_BITS_REGISTER 10, 0
#define CVMX_FAU_MAX_REGISTERS_8 (2048)
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
} cvmx_fau_op_size_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
u64 error : 1;
s64 value : 63;
} cvmx_fau_tagwait64_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
u64 error : 1;
s32 value : 31;
} cvmx_fau_tagwait32_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
u64 error : 1;
s16 value : 15;
} cvmx_fau_tagwait16_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
u64 error : 1;
int8_t value : 7;
} cvmx_fau_tagwait8_t;
/**
* Asynchronous tagwait return definition. If a timeout occurs,
* the error bit will be set. Otherwise the value of the
* register before the update will be returned.
*/
typedef union {
u64 u64;
struct {
u64 invalid : 1;
u64 data : 63; /* unpredictable if invalid is set */
} s;
} cvmx_fau_async_tagwait_result_t;
#define SWIZZLE_8 0
#define SWIZZLE_16 0
#define SWIZZLE_32 0
/**
* @INTERNAL
* Builds a store I/O address for writing to the FAU
*
* @param noadd 0 = Store value is atomically added to the current value
* 1 = Store value is atomically written over the current value
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @return Address to store for atomic update
*/
static inline u64 __cvmx_hwfau_store_address(u64 noadd, u64 reg)
{
return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* @INTERNAL
* Builds a I/O address for accessing the FAU
*
* @param tagwait Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* @return Address to read from for atomic update
*/
static inline u64 __cvmx_hwfau_atomic_address(u64 tagwait, u64 reg, s64 value)
{
return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline s64 cvmx_hwfau_fetch_and_add64(cvmx_fau_reg64_t reg, s64 value)
{
return cvmx_read64_int64(__cvmx_hwfau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline s32 cvmx_hwfau_fetch_and_add32(cvmx_fau_reg32_t reg, s32 value)
{
reg ^= SWIZZLE_32;
return cvmx_read64_int32(__cvmx_hwfau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline s16 cvmx_hwfau_fetch_and_add16(cvmx_fau_reg16_t reg, s16 value)
{
reg ^= SWIZZLE_16;
return cvmx_read64_int16(__cvmx_hwfau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline int8_t cvmx_hwfau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
{
reg ^= SWIZZLE_8;
return cvmx_read64_int8(__cvmx_hwfau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 64 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait64_t cvmx_hwfau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg,
s64 value)
{
union {
u64 i64;
cvmx_fau_tagwait64_t t;
} result;
result.i64 = cvmx_read64_int64(__cvmx_hwfau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 32 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait32_t cvmx_hwfau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg,
s32 value)
{
union {
u64 i32;
cvmx_fau_tagwait32_t t;
} result;
reg ^= SWIZZLE_32;
result.i32 = cvmx_read64_int32(__cvmx_hwfau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 16 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait16_t cvmx_hwfau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg,
s16 value)
{
union {
u64 i16;
cvmx_fau_tagwait16_t t;
} result;
reg ^= SWIZZLE_16;
result.i16 = cvmx_read64_int16(__cvmx_hwfau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 8 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait8_t cvmx_hwfau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg,
int8_t value)
{
union {
u64 i8;
cvmx_fau_tagwait8_t t;
} result;
reg ^= SWIZZLE_8;
result.i8 = cvmx_read64_int8(__cvmx_hwfau_atomic_address(1, reg, value));
return result.t;
}
/**
* @INTERNAL
* Builds I/O data for async operations
*
* @param scraddr Scratch pad byte address to write to. Must be 8 byte aligned
* @param value Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* @param tagwait Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @param size The size of the operation:
* - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
* - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
* - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
* - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @return Data to write using cvmx_send_single
*/
static inline u64 __cvmx_fau_iobdma_data(u64 scraddr, s64 value, u64 tagwait,
cvmx_fau_op_size_t size, u64 reg)
{
return (CVMX_FAU_LOAD_IO_ADDRESS | cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* Perform an async atomic 64 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, s64 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, s32 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, s16 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an async atomic 64 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_tagwait_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg,
s64 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_tagwait_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg,
s32 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_tagwait_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg,
s16 value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_hwfau_async_tagwait_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg,
int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_hwfau_atomic_add64(cvmx_fau_reg64_t reg, s64 value)
{
cvmx_write64_int64(__cvmx_hwfau_store_address(0, reg), value);
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_hwfau_atomic_add32(cvmx_fau_reg32_t reg, s32 value)
{
reg ^= SWIZZLE_32;
cvmx_write64_int32(__cvmx_hwfau_store_address(0, reg), value);
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_hwfau_atomic_add16(cvmx_fau_reg16_t reg, s16 value)
{
reg ^= SWIZZLE_16;
cvmx_write64_int16(__cvmx_hwfau_store_address(0, reg), value);
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
*/
static inline void cvmx_hwfau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
{
reg ^= SWIZZLE_8;
cvmx_write64_int8(__cvmx_hwfau_store_address(0, reg), value);
}
/**
* Perform an atomic 64 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_hwfau_atomic_write64(cvmx_fau_reg64_t reg, s64 value)
{
cvmx_write64_int64(__cvmx_hwfau_store_address(1, reg), value);
}
/**
* Perform an atomic 32 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_hwfau_atomic_write32(cvmx_fau_reg32_t reg, s32 value)
{
reg ^= SWIZZLE_32;
cvmx_write64_int32(__cvmx_hwfau_store_address(1, reg), value);
}
/**
* Perform an atomic 16 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_hwfau_atomic_write16(cvmx_fau_reg16_t reg, s16 value)
{
reg ^= SWIZZLE_16;
cvmx_write64_int16(__cvmx_hwfau_store_address(1, reg), value);
}
/**
* Perform an atomic 8 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to write.
*/
static inline void cvmx_hwfau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
{
reg ^= SWIZZLE_8;
cvmx_write64_int8(__cvmx_hwfau_store_address(1, reg), value);
}
/** Allocates 64bit FAU register.
* @return value is the base address of allocated FAU register
*/
int cvmx_fau64_alloc(int reserve);
/** Allocates 32bit FAU register.
* @return value is the base address of allocated FAU register
*/
int cvmx_fau32_alloc(int reserve);
/** Allocates 16bit FAU register.
* @return value is the base address of allocated FAU register
*/
int cvmx_fau16_alloc(int reserve);
/** Allocates 8bit FAU register.
* @return value is the base address of allocated FAU register
*/
int cvmx_fau8_alloc(int reserve);
/** Frees the specified FAU register.
* @param address Base address of register to release.
* @return 0 on success; -1 on failure
*/
int cvmx_fau_free(int address);
/** Display the fau registers array
*/
void cvmx_fau_show(void);
#endif /* __CVMX_HWFAU_H__ */

View File

@ -0,0 +1,570 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Packet Output unit.
*
* Starting with SDK 1.7.0, the PKO output functions now support
* two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
* function similarly to previous SDKs by using POW atomic tags
* to preserve ordering and exclusivity. As a new option, you
* can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
* memory based locking instead. This locking has the advantage
* of not affecting the tag state but doesn't preserve packet
* ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
* generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
* with hand tuned fast path code.
*
* Some of other SDK differences visible to the command command
* queuing:
* - PKO indexes are no longer stored in the FAU. A large
* percentage of the FAU register block used to be tied up
* maintaining PKO queue pointers. These are now stored in a
* global named block.
* - The PKO <b>use_locking</b> parameter can now have a global
* effect. Since all application use the same named block,
* queue locking correctly applies across all operating
* systems when using CVMX_PKO_LOCK_CMD_QUEUE.
* - PKO 3 word commands are now supported. Use
* cvmx_pko_send_packet_finish3().
*/
#ifndef __CVMX_HWPKO_H__
#define __CVMX_HWPKO_H__
#include "cvmx-hwfau.h"
#include "cvmx-fpa.h"
#include "cvmx-pow.h"
#include "cvmx-cmd-queue.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#include "cvmx-helper-cfg.h"
/* Adjust the command buffer size by 1 word so that in the case of using only
** two word PKO commands no command words stradle buffers. The useful values
** for this are 0 and 1. */
#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
#define CVMX_PKO_MAX_OUTPUT_QUEUES \
((OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) ? 256 : 128)
#define CVMX_PKO_NUM_OUTPUT_PORTS \
((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 44 : (OCTEON_IS_MODEL(OCTEON_CN66XX) ? 48 : 40))
#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
#define CVMX_PKO_MAX_QUEUE_DEPTH 0
typedef enum {
CVMX_PKO_SUCCESS,
CVMX_PKO_INVALID_PORT,
CVMX_PKO_INVALID_QUEUE,
CVMX_PKO_INVALID_PRIORITY,
CVMX_PKO_NO_MEMORY,
CVMX_PKO_PORT_ALREADY_SETUP,
CVMX_PKO_CMD_QUEUE_INIT_ERROR
} cvmx_pko_return_value_t;
/**
* This enumeration represents the differnet locking modes supported by PKO.
*/
typedef enum {
CVMX_PKO_LOCK_NONE = 0,
CVMX_PKO_LOCK_ATOMIC_TAG = 1,
CVMX_PKO_LOCK_CMD_QUEUE = 2,
} cvmx_pko_lock_t;
typedef struct cvmx_pko_port_status {
u32 packets;
u64 octets;
u64 doorbell;
} cvmx_pko_port_status_t;
/**
* This structure defines the address to use on a packet enqueue
*/
typedef union {
u64 u64;
struct {
cvmx_mips_space_t mem_space : 2;
u64 reserved : 13;
u64 is_io : 1;
u64 did : 8;
u64 reserved2 : 4;
u64 reserved3 : 15;
u64 port : 9;
u64 queue : 9;
u64 reserved4 : 3;
} s;
} cvmx_pko_doorbell_address_t;
/**
* Structure of the first packet output command word.
*/
typedef union {
u64 u64;
struct {
cvmx_fau_op_size_t size1 : 2;
cvmx_fau_op_size_t size0 : 2;
u64 subone1 : 1;
u64 reg1 : 11;
u64 subone0 : 1;
u64 reg0 : 11;
u64 le : 1;
u64 n2 : 1;
u64 wqp : 1;
u64 rsp : 1;
u64 gather : 1;
u64 ipoffp1 : 7;
u64 ignore_i : 1;
u64 dontfree : 1;
u64 segs : 6;
u64 total_bytes : 16;
} s;
} cvmx_pko_command_word0_t;
/**
* Call before any other calls to initialize the packet
* output system.
*/
void cvmx_pko_hw_init(u8 pool, unsigned int bufsize);
/**
* Enables the packet output hardware. It must already be
* configured.
*/
void cvmx_pko_enable(void);
/**
* Disables the packet output. Does not affect any configuration.
*/
void cvmx_pko_disable(void);
/**
* Shutdown and free resources required by packet output.
*/
void cvmx_pko_shutdown(void);
/**
* Configure a output port and the associated queues for use.
*
* @param port Port to configure.
* @param base_queue First queue number to associate with this port.
* @param num_queues Number of queues t oassociate with this port
* @param priority Array of priority levels for each queue. Values are
* allowed to be 1-8. A value of 8 get 8 times the traffic
* of a value of 1. There must be num_queues elements in the
* array.
*/
cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue, int num_queues,
const u8 priority[]);
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
* to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* WARNING: This function may have to look up the proper PKO port in
* the IPD port to PKO port map, and is thus slower than calling
* cvmx_pko_doorbell_pkoid() directly if the PKO port identifier is
* known.
*
* @param ipd_port The IPD port corresponding the to pko port the packet is for
* @param queue Queue the packet is for
* @param len Length of the command in 64 bit words
*/
static inline void cvmx_pko_doorbell(u64 ipd_port, u64 queue, u64 len)
{
cvmx_pko_doorbell_address_t ptr;
u64 pko_port;
pko_port = ipd_port;
if (octeon_has_feature(OCTEON_FEATURE_PKND))
pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port);
ptr.u64 = 0;
ptr.s.mem_space = CVMX_IO_SEG;
ptr.s.did = CVMX_OCT_DID_PKT_SEND;
ptr.s.is_io = 1;
ptr.s.port = pko_port;
ptr.s.queue = queue;
/* Need to make sure output queue data is in DRAM before doorbell write */
CVMX_SYNCWS;
cvmx_write_io(ptr.u64, len);
}
/**
* Prepare to send a packet. This may initiate a tag switch to
* get exclusive access to the output queue structure, and
* performs other prep work for the packet send operation.
*
* cvmx_pko_send_packet_finish() MUST be called after this function is called,
* and must be called with the same port/queue/use_locking arguments.
*
* The use_locking parameter allows the caller to use three
* possible locking modes.
* - CVMX_PKO_LOCK_NONE
* - PKO doesn't do any locking. It is the responsibility
* of the application to make sure that no other core
* is accessing the same queue at the same time.
* - CVMX_PKO_LOCK_ATOMIC_TAG
* - PKO performs an atomic tagswitch to insure exclusive
* access to the output queue. This will maintain
* packet ordering on output.
* - CVMX_PKO_LOCK_CMD_QUEUE
* - PKO uses the common command queue locks to insure
* exclusive access to the output queue. This is a
* memory based ll/sc. This is the most portable
* locking mechanism.
*
* NOTE: If atomic locking is used, the POW entry CANNOT be
* descheduled, as it does not contain a valid WQE pointer.
*
* @param port Port to send it on, this can be either IPD port or PKO
* port.
* @param queue Queue to use
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
*/
static inline void cvmx_pko_send_packet_prepare(u64 port __attribute__((unused)), u64 queue,
cvmx_pko_lock_t use_locking)
{
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
/*
* Must do a full switch here to handle all cases. We use a
* fake WQE pointer, as the POW does not access this memory.
* The WQE pointer and group are only used if this work is
* descheduled, which is not supported by the
* cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
* combination. Note that this is a special case in which these
* fake values can be used - this is not a general technique.
*/
u32 tag = CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
(CVMX_TAG_SUBGROUP_MASK & queue);
cvmx_pow_tag_sw_full((cvmx_wqe_t *)cvmx_phys_to_ptr(0x80), tag,
CVMX_POW_TAG_TYPE_ATOMIC, 0);
}
}
#define cvmx_pko_send_packet_prepare_pkoid cvmx_pko_send_packet_prepare
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
* and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish().
*
* WARNING: This function may have to look up the proper PKO port in
* the IPD port to PKO port map, and is thus slower than calling
* cvmx_pko_send_packet_finish_pkoid() directly if the PKO port
* identifier is known.
*
* @param ipd_port The IPD port corresponding the to pko port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet Packet to send
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_hwpko_send_packet_finish(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,
packet.u64);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(ipd_port, queue, 2);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
* and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish().
*
* WARNING: This function may have to look up the proper PKO port in
* the IPD port to PKO port map, and is thus slower than calling
* cvmx_pko_send_packet_finish3_pkoid() directly if the PKO port
* identifier is known.
*
* @param ipd_port The IPD port corresponding the to pko port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet Packet to send
* @param addr Plysical address of a work queue entry or physical address to zero on complete.
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_hwpko_send_packet_finish3(u64 ipd_port, u64 queue, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,
packet.u64, addr);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(ipd_port, queue, 3);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Get the first pko_port for the (interface, index)
*
* @param interface
* @param index
*/
int cvmx_pko_get_base_pko_port(int interface, int index);
/**
* Get the number of pko_ports for the (interface, index)
*
* @param interface
* @param index
*/
int cvmx_pko_get_num_pko_ports(int interface, int index);
/**
* For a given port number, return the base pko output queue
* for the port.
*
* @param port IPD port number
* @return Base output queue
*/
int cvmx_pko_get_base_queue(int port);
/**
* For a given port number, return the number of pko output queues.
*
* @param port IPD port number
* @return Number of output queues
*/
int cvmx_pko_get_num_queues(int port);
/**
* Sets the internal FPA pool data structure for PKO comamnd queue.
* @param pool fpa pool number yo use
* @param buffer_size buffer size of pool
* @param buffer_count number of buufers to allocate to pool
*
* @note the caller is responsable for setting up the pool with
* an appropriate buffer size and sufficient buffer count.
*/
void cvmx_pko_set_cmd_que_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);
/**
* Get the status counters for a port.
*
* @param ipd_port Port number (ipd_port) to get statistics for.
* @param clear Set to 1 to clear the counters after they are read
* @param status Where to put the results.
*
* Note:
* - Only the doorbell for the base queue of the ipd_port is
* collected.
* - Retrieving the stats involves writing the index through
* CVMX_PKO_REG_READ_IDX and reading the stat CSRs, in that
* order. It is not MP-safe and caller should guarantee
* atomicity.
*/
void cvmx_pko_get_port_status(u64 ipd_port, u64 clear, cvmx_pko_port_status_t *status);
/**
* Rate limit a PKO port to a max packets/sec. This function is only
* supported on CN57XX, CN56XX, CN55XX, and CN54XX.
*
* @param port Port to rate limit
* @param packets_s Maximum packet/sec
* @param burst Maximum number of packets to burst in a row before rate
* limiting cuts in.
*
* @return Zero on success, negative on failure
*/
int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
/**
* Rate limit a PKO port to a max bits/sec. This function is only
* supported on CN57XX, CN56XX, CN55XX, and CN54XX.
*
* @param port Port to rate limit
* @param bits_s PKO rate limit in bits/sec
* @param burst Maximum number of bits to burst before rate
* limiting cuts in.
*
* @return Zero on success, negative on failure
*/
int cvmx_pko_rate_limit_bits(int port, u64 bits_s, int burst);
/**
* @INTERNAL
*
* Retrieve the PKO pipe number for a port
*
* @param interface
* @param index
*
* @return negative on error.
*
* This applies only to the non-loopback interfaces.
*
*/
int __cvmx_pko_get_pipe(int interface, int index);
/**
* For a given PKO port number, return the base output queue
* for the port.
*
* @param pko_port PKO port number
* @return Base output queue
*/
int cvmx_pko_get_base_queue_pkoid(int pko_port);
/**
* For a given PKO port number, return the number of output queues
* for the port.
*
* @param pko_port PKO port number
* @return the number of output queues
*/
int cvmx_pko_get_num_queues_pkoid(int pko_port);
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
* to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* @param pko_port Port the packet is for
* @param queue Queue the packet is for
* @param len Length of the command in 64 bit words
*/
static inline void cvmx_pko_doorbell_pkoid(u64 pko_port, u64 queue, u64 len)
{
cvmx_pko_doorbell_address_t ptr;
ptr.u64 = 0;
ptr.s.mem_space = CVMX_IO_SEG;
ptr.s.did = CVMX_OCT_DID_PKT_SEND;
ptr.s.is_io = 1;
ptr.s.port = pko_port;
ptr.s.queue = queue;
/* Need to make sure output queue data is in DRAM before doorbell write */
CVMX_SYNCWS;
cvmx_write_io(ptr.u64, len);
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
* and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish_pkoid().
*
* @param pko_port Port to send it on
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet Packet to send
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_hwpko_send_packet_finish_pkoid(int pko_port, u64 queue, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,
packet.u64);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell_pkoid(pko_port, queue, 2);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be called exactly once before this,
* and the same parameters must be passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish_pkoid().
*
* @param pko_port The PKO port the packet is for
* @param queue Queue to use
* @param pko_command
* PKO HW command word
* @param packet Packet to send
* @param addr Plysical address of a work queue entry or physical address to zero on complete.
* @param use_locking
* CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or CVMX_PKO_LOCK_CMD_QUEUE
*
* @return returns CVMX_PKO_SUCCESS on success, or error code on failure of output
*/
static inline cvmx_pko_return_value_t
cvmx_hwpko_send_packet_finish3_pkoid(u64 pko_port, u64 queue, cvmx_pko_command_word0_t pko_command,
cvmx_buf_ptr_t packet, u64 addr, cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE), pko_command.u64,
packet.u64, addr);
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell_pkoid(pko_port, queue, 3);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) || (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/*
* Obtain the number of PKO commands pending in a queue
*
* @param queue is the queue identifier to be queried
* @return the number of commands pending transmission or -1 on error
*/
int cvmx_pko_queue_pend_count(cvmx_cmd_queue_id_t queue);
void cvmx_pko_set_cmd_queue_pool_buffer_count(u64 buffer_count);
#endif /* __CVMX_HWPKO_H__ */

View File

@ -0,0 +1,154 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* This file contains defines for the ILK interface
*/
#ifndef __CVMX_ILK_H__
#define __CVMX_ILK_H__
/* CSR typedefs have been moved to cvmx-ilk-defs.h */
/*
* Note: this macro must match the first ilk port in the ipd_port_map_68xx[]
* and ipd_port_map_78xx[] arrays.
*/
static inline int CVMX_ILK_GBL_BASE(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 5;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 6;
return -1;
}
static inline int CVMX_ILK_QLM_BASE(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 1;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 4;
return -1;
}
typedef struct {
int intf_en : 1;
int la_mode : 1;
int reserved : 14; /* unused */
int lane_speed : 16;
/* add more here */
} cvmx_ilk_intf_t;
#define CVMX_NUM_ILK_INTF 2
static inline int CVMX_ILK_MAX_LANES(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 8;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 16;
return -1;
}
extern unsigned short cvmx_ilk_lane_mask[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];
typedef struct {
unsigned int pipe;
unsigned int chan;
} cvmx_ilk_pipe_chan_t;
#define CVMX_ILK_MAX_PIPES 45
/* Max number of channels allowed */
#define CVMX_ILK_MAX_CHANS 256
extern int cvmx_ilk_chans[CVMX_MAX_NODES][CVMX_NUM_ILK_INTF];
typedef struct {
unsigned int chan;
unsigned int pknd;
} cvmx_ilk_chan_pknd_t;
#define CVMX_ILK_MAX_PKNDS 16 /* must be <45 */
typedef struct {
int *chan_list; /* for discrete channels. or, must be null */
unsigned int num_chans;
unsigned int chan_start; /* for continuous channels */
unsigned int chan_end;
unsigned int chan_step;
unsigned int clr_on_rd;
} cvmx_ilk_stats_ctrl_t;
#define CVMX_ILK_MAX_CAL 288
#define CVMX_ILK_MAX_CAL_IDX (CVMX_ILK_MAX_CAL / 8)
#define CVMX_ILK_TX_MIN_CAL 1
#define CVMX_ILK_RX_MIN_CAL 1
#define CVMX_ILK_CAL_GRP_SZ 8
#define CVMX_ILK_PIPE_BPID_SZ 7
#define CVMX_ILK_ENT_CTRL_SZ 2
#define CVMX_ILK_RX_FIFO_WM 0x200
typedef enum { PIPE_BPID = 0, LINK, XOFF, XON } cvmx_ilk_cal_ent_ctrl_t;
typedef struct {
unsigned char pipe_bpid;
cvmx_ilk_cal_ent_ctrl_t ent_ctrl;
} cvmx_ilk_cal_entry_t;
typedef enum { CVMX_ILK_LPBK_DISA = 0, CVMX_ILK_LPBK_ENA } cvmx_ilk_lpbk_ena_t;
typedef enum { CVMX_ILK_LPBK_INT = 0, CVMX_ILK_LPBK_EXT } cvmx_ilk_lpbk_mode_t;
/**
* This header is placed in front of all received ILK look-aside mode packets
*/
typedef union {
u64 u64;
struct {
u32 reserved_63_57 : 7; /* bits 63...57 */
u32 nsp_cmd : 5; /* bits 56...52 */
u32 nsp_flags : 4; /* bits 51...48 */
u32 nsp_grp_id_upper : 6; /* bits 47...42 */
u32 reserved_41_40 : 2; /* bits 41...40 */
/* Protocol type, 1 for LA mode packet */
u32 la_mode : 1; /* bit 39 */
u32 nsp_grp_id_lower : 2; /* bits 38...37 */
u32 nsp_xid_upper : 4; /* bits 36...33 */
/* ILK channel number, 0 or 1 */
u32 ilk_channel : 1; /* bit 32 */
u32 nsp_xid_lower : 8; /* bits 31...24 */
/* Unpredictable, may be any value */
u32 reserved_23_0 : 24; /* bits 23...0 */
} s;
} cvmx_ilk_la_nsp_compact_hdr_t;
typedef struct cvmx_ilk_LA_mode_struct {
int ilk_LA_mode;
int ilk_LA_mode_cal_ena;
} cvmx_ilk_LA_mode_t;
extern cvmx_ilk_LA_mode_t cvmx_ilk_LA_mode[CVMX_NUM_ILK_INTF];
int cvmx_ilk_use_la_mode(int interface, int channel);
int cvmx_ilk_start_interface(int interface, unsigned short num_lanes);
int cvmx_ilk_start_interface_la(int interface, unsigned char num_lanes);
int cvmx_ilk_set_pipe(int interface, int pipe_base, unsigned int pipe_len);
int cvmx_ilk_tx_set_channel(int interface, cvmx_ilk_pipe_chan_t *pch, unsigned int num_chs);
int cvmx_ilk_rx_set_pknd(int interface, cvmx_ilk_chan_pknd_t *chpknd, unsigned int num_pknd);
int cvmx_ilk_enable(int interface);
int cvmx_ilk_disable(int interface);
int cvmx_ilk_get_intf_ena(int interface);
int cvmx_ilk_get_chan_info(int interface, unsigned char **chans, unsigned char *num_chan);
cvmx_ilk_la_nsp_compact_hdr_t cvmx_ilk_enable_la_header(int ipd_port, int mode);
void cvmx_ilk_show_stats(int interface, cvmx_ilk_stats_ctrl_t *pstats);
int cvmx_ilk_cal_setup_rx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent, int hi_wm,
unsigned char cal_ena);
int cvmx_ilk_cal_setup_tx(int interface, int cal_depth, cvmx_ilk_cal_entry_t *pent,
unsigned char cal_ena);
int cvmx_ilk_lpbk(int interface, cvmx_ilk_lpbk_ena_t enable, cvmx_ilk_lpbk_mode_t mode);
int cvmx_ilk_la_mode_enable_rx_calendar(int interface);
#endif /* __CVMX_ILK_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,233 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Input Packet Data unit.
*/
#ifndef __CVMX_IPD_H__
#define __CVMX_IPD_H__
#include "cvmx-pki.h"
/* CSR typedefs have been moved to cvmx-ipd-defs.h */
typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_not_first_skip_t;
typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
typedef struct cvmx_ipd_tag_fields {
u64 ipv6_src_ip : 1;
u64 ipv6_dst_ip : 1;
u64 ipv6_src_port : 1;
u64 ipv6_dst_port : 1;
u64 ipv6_next_header : 1;
u64 ipv4_src_ip : 1;
u64 ipv4_dst_ip : 1;
u64 ipv4_src_port : 1;
u64 ipv4_dst_port : 1;
u64 ipv4_protocol : 1;
u64 input_port : 1;
} cvmx_ipd_tag_fields_t;
typedef struct cvmx_pip_port_config {
u64 parse_mode;
u64 tag_type;
u64 tag_mode;
cvmx_ipd_tag_fields_t tag_fields;
} cvmx_pip_port_config_t;
typedef struct cvmx_ipd_config_struct {
u64 first_mbuf_skip;
u64 not_first_mbuf_skip;
u64 ipd_enable;
u64 enable_len_M8_fix;
u64 cache_mode;
cvmx_fpa_pool_config_t packet_pool;
cvmx_fpa_pool_config_t wqe_pool;
cvmx_pip_port_config_t port_config;
} cvmx_ipd_config_t;
extern cvmx_ipd_config_t cvmx_ipd_cfg;
/**
* Gets the fpa pool number of packet pool
*/
static inline s64 cvmx_fpa_get_packet_pool(void)
{
return (cvmx_ipd_cfg.packet_pool.pool_num);
}
/**
* Gets the buffer size of packet pool buffer
*/
static inline u64 cvmx_fpa_get_packet_pool_block_size(void)
{
return (cvmx_ipd_cfg.packet_pool.buffer_size);
}
/**
* Gets the buffer count of packet pool
*/
static inline u64 cvmx_fpa_get_packet_pool_buffer_count(void)
{
return (cvmx_ipd_cfg.packet_pool.buffer_count);
}
/**
* Gets the fpa pool number of wqe pool
*/
static inline s64 cvmx_fpa_get_wqe_pool(void)
{
return (cvmx_ipd_cfg.wqe_pool.pool_num);
}
/**
* Gets the buffer size of wqe pool buffer
*/
static inline u64 cvmx_fpa_get_wqe_pool_block_size(void)
{
return (cvmx_ipd_cfg.wqe_pool.buffer_size);
}
/**
* Gets the buffer count of wqe pool
*/
static inline u64 cvmx_fpa_get_wqe_pool_buffer_count(void)
{
return (cvmx_ipd_cfg.wqe_pool.buffer_count);
}
/**
* Sets the ipd related configuration in internal structure which is then used
* for seting IPD hardware block
*/
int cvmx_ipd_set_config(cvmx_ipd_config_t ipd_config);
/**
* Gets the ipd related configuration from internal structure.
*/
void cvmx_ipd_get_config(cvmx_ipd_config_t *ipd_config);
/**
* Sets the internal FPA pool data structure for packet buffer pool.
* @param pool fpa pool number yo use
* @param buffer_size buffer size of pool
* @param buffer_count number of buufers to allocate to pool
*/
void cvmx_ipd_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);
/**
* Sets the internal FPA pool data structure for wqe pool.
* @param pool fpa pool number yo use
* @param buffer_size buffer size of pool
* @param buffer_count number of buufers to allocate to pool
*/
void cvmx_ipd_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count);
/**
* Gets the FPA packet buffer pool parameters.
*/
static inline void cvmx_fpa_get_packet_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count)
{
if (pool)
*pool = cvmx_ipd_cfg.packet_pool.pool_num;
if (buffer_size)
*buffer_size = cvmx_ipd_cfg.packet_pool.buffer_size;
if (buffer_count)
*buffer_count = cvmx_ipd_cfg.packet_pool.buffer_count;
}
/**
* Sets the FPA packet buffer pool parameters.
*/
static inline void cvmx_fpa_set_packet_pool_config(s64 pool, u64 buffer_size, u64 buffer_count)
{
cvmx_ipd_set_packet_pool_config(pool, buffer_size, buffer_count);
}
/**
* Gets the FPA WQE pool parameters.
*/
static inline void cvmx_fpa_get_wqe_pool_config(s64 *pool, u64 *buffer_size, u64 *buffer_count)
{
if (pool)
*pool = cvmx_ipd_cfg.wqe_pool.pool_num;
if (buffer_size)
*buffer_size = cvmx_ipd_cfg.wqe_pool.buffer_size;
if (buffer_count)
*buffer_count = cvmx_ipd_cfg.wqe_pool.buffer_count;
}
/**
* Sets the FPA WQE pool parameters.
*/
static inline void cvmx_fpa_set_wqe_pool_config(s64 pool, u64 buffer_size, u64 buffer_count)
{
cvmx_ipd_set_wqe_pool_config(pool, buffer_size, buffer_count);
}
/**
* Configure IPD
*
* @param mbuff_size Packets buffer size in 8 byte words
* @param first_mbuff_skip
* Number of 8 byte words to skip in the first buffer
* @param not_first_mbuff_skip
* Number of 8 byte words to skip in each following buffer
* @param first_back Must be same as first_mbuff_skip / 128
* @param second_back
* Must be same as not_first_mbuff_skip / 128
* @param wqe_fpa_pool
* FPA pool to get work entries from
* @param cache_mode
* @param back_pres_enable_flag
* Enable or disable port back pressure at a global level.
* This should always be 1 as more accurate control can be
* found in IPD_PORTX_BP_PAGE_CNT[BP_ENB].
*/
void cvmx_ipd_config(u64 mbuff_size, u64 first_mbuff_skip, u64 not_first_mbuff_skip, u64 first_back,
u64 second_back, u64 wqe_fpa_pool, cvmx_ipd_mode_t cache_mode,
u64 back_pres_enable_flag);
/**
* Enable IPD
*/
void cvmx_ipd_enable(void);
/**
* Disable IPD
*/
void cvmx_ipd_disable(void);
void __cvmx_ipd_free_ptr(void);
void cvmx_ipd_set_packet_pool_buffer_count(u64 buffer_count);
void cvmx_ipd_set_wqe_pool_buffer_count(u64 buffer_count);
/**
* Setup Random Early Drop on a specific input queue
*
* @param queue Input queue to setup RED on (0-7)
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_ipd_setup_red_queue(int queue, int pass_thresh, int drop_thresh);
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_ipd_setup_red(int pass_thresh, int drop_thresh);
#endif /* __CVMX_IPD_H__ */

View File

@ -0,0 +1,172 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_L2C_DEFS_H_
#define __CVMX_L2C_DEFS_H_
#define CVMX_L2C_CFG 0x0001180080000000ull
#define CVMX_L2C_CTL 0x0001180080800000ull
/*
* Mapping is done starting from 0x11800.80000000
* Use _REL for relative mapping
*/
#define CVMX_L2C_CTL_REL 0x00800000
#define CVMX_L2C_BIG_CTL_REL 0x00800030
#define CVMX_L2C_TADX_INT_REL(i) (0x00a00028 + (((i) & 7) * 0x40000))
#define CVMX_L2C_MCIX_INT_REL(i) (0x00c00028 + (((i) & 3) * 0x40000))
/**
* cvmx_l2c_cfg
*
* Specify the RSL base addresses for the block
*
* L2C_CFG = L2C Configuration
*
* Description:
*/
union cvmx_l2c_cfg {
u64 u64;
struct cvmx_l2c_cfg_s {
u64 reserved_20_63 : 44;
u64 bstrun : 1;
u64 lbist : 1;
u64 xor_bank : 1;
u64 dpres1 : 1;
u64 dpres0 : 1;
u64 dfill_dis : 1;
u64 fpexp : 4;
u64 fpempty : 1;
u64 fpen : 1;
u64 idxalias : 1;
u64 mwf_crd : 4;
u64 rsp_arb_mode : 1;
u64 rfb_arb_mode : 1;
u64 lrf_arb_mode : 1;
} s;
};
/**
* cvmx_l2c_ctl
*
* L2C_CTL = L2C Control
*
*
* Notes:
* (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB.
*
* (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE,
* and VBFDBE errors for the purposes of testing error handling code. When
* one (or both) of these bits are set a PL2 which misses in the L2 will fill
* with the appropriate error in the first 2 OWs of the fill. Software can
* determine which OW pair gets the error by choosing the desired fill order
* (address<6:5>). A PL2 which hits in the L2 will not inject any errors.
* Therefore sending a WBIL2 prior to the PL2 is recommended to make a miss
* likely (if multiple processors are involved software must be careful to be
* sure no other processor or IO device can bring the block into the L2).
*
* To generate a VBFSBE or VBFDBE, software must first get the cache block
* into the cache with an error using a PL2 which misses the L2. Then a
* store partial to a portion of the cache block without the error must
* change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
* trigger the VBFSBE/VBFDBE error.
*/
union cvmx_l2c_ctl {
u64 u64;
struct cvmx_l2c_ctl_s {
u64 reserved_29_63 : 35;
u64 rdf_fast : 1;
u64 disstgl2i : 1;
u64 l2dfsbe : 1;
u64 l2dfdbe : 1;
u64 discclk : 1;
u64 maxvab : 4;
u64 maxlfb : 4;
u64 rsp_arb_mode : 1;
u64 xmc_arb_mode : 1;
u64 reserved_2_13 : 12;
u64 disecc : 1;
u64 disidxalias : 1;
} s;
struct cvmx_l2c_ctl_cn73xx {
u64 reserved_32_63 : 32;
u64 ocla_qos : 3;
u64 reserved_28_28 : 1;
u64 disstgl2i : 1;
u64 reserved_25_26 : 2;
u64 discclk : 1;
u64 reserved_16_23 : 8;
u64 rsp_arb_mode : 1;
u64 xmc_arb_mode : 1;
u64 rdf_cnt : 8;
u64 reserved_4_5 : 2;
u64 disldwb : 1;
u64 dissblkdty : 1;
u64 disecc : 1;
u64 disidxalias : 1;
} cn73xx;
struct cvmx_l2c_ctl_cn73xx cn78xx;
};
/**
* cvmx_l2c_big_ctl
*
* L2C_BIG_CTL = L2C Big memory control register
*
*
* Notes:
* (1) BIGRD interrupts can occur during normal operation as the PP's are
* allowed to prefetch to non-existent memory locations. Therefore,
* BIGRD is for informational purposes only.
*
* (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB,
* and L2C_VER_MSC will be loaded just like a store which is blocked by VRTWR.
* Additionally, L2C_ERR_XMC will be loaded.
*/
union cvmx_l2c_big_ctl {
u64 u64;
struct cvmx_l2c_big_ctl_s {
u64 reserved_8_63 : 56;
u64 maxdram : 4;
u64 reserved_0_3 : 4;
} s;
struct cvmx_l2c_big_ctl_cn61xx {
u64 reserved_8_63 : 56;
u64 maxdram : 4;
u64 reserved_1_3 : 3;
u64 disable : 1;
} cn61xx;
struct cvmx_l2c_big_ctl_cn61xx cn63xx;
struct cvmx_l2c_big_ctl_cn61xx cn66xx;
struct cvmx_l2c_big_ctl_cn61xx cn68xx;
struct cvmx_l2c_big_ctl_cn61xx cn68xxp1;
struct cvmx_l2c_big_ctl_cn70xx {
u64 reserved_8_63 : 56;
u64 maxdram : 4;
u64 reserved_1_3 : 3;
u64 disbig : 1;
} cn70xx;
struct cvmx_l2c_big_ctl_cn70xx cn70xxp1;
struct cvmx_l2c_big_ctl_cn70xx cn73xx;
struct cvmx_l2c_big_ctl_cn70xx cn78xx;
struct cvmx_l2c_big_ctl_cn70xx cn78xxp1;
struct cvmx_l2c_big_ctl_cn61xx cnf71xx;
struct cvmx_l2c_big_ctl_cn70xx cnf75xx;
};
struct rlevel_byte_data {
int delay;
int loop_total;
int loop_count;
int best;
u64 bm;
int bmerrs;
int sqerrs;
int bestsq;
};
#endif

View File

@ -0,0 +1,353 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_MIO_DEFS_H__
#define __CVMX_MIO_DEFS_H__
#define CVMX_MIO_PTP_CLOCK_CFG (0x0001070000000F00ull)
#define CVMX_MIO_PTP_EVT_CNT (0x0001070000000F28ull)
#define CVMX_MIO_RST_BOOT (0x0001180000001600ull)
#define CVMX_MIO_RST_CTLX(offset) (0x0001180000001618ull + ((offset) & 1))
#define CVMX_MIO_QLMX_CFG(offset) (0x0001180000001590ull + ((offset) & 7) * 8)
/**
* cvmx_mio_ptp_clock_cfg
*
* This register configures the timestamp architecture.
*
*/
union cvmx_mio_ptp_clock_cfg {
u64 u64;
struct cvmx_mio_ptp_clock_cfg_s {
u64 reserved_40_63 : 24;
u64 ext_clk_edge : 2;
u64 ckout_out4 : 1;
u64 pps_out : 5;
u64 pps_inv : 1;
u64 pps_en : 1;
u64 ckout_out : 4;
u64 ckout_inv : 1;
u64 ckout_en : 1;
u64 evcnt_in : 6;
u64 evcnt_edge : 1;
u64 evcnt_en : 1;
u64 tstmp_in : 6;
u64 tstmp_edge : 1;
u64 tstmp_en : 1;
u64 ext_clk_in : 6;
u64 ext_clk_en : 1;
u64 ptp_en : 1;
} s;
struct cvmx_mio_ptp_clock_cfg_cn61xx {
u64 reserved_42_63 : 22;
u64 pps : 1;
u64 ckout : 1;
u64 ext_clk_edge : 2;
u64 ckout_out4 : 1;
u64 pps_out : 5;
u64 pps_inv : 1;
u64 pps_en : 1;
u64 ckout_out : 4;
u64 ckout_inv : 1;
u64 ckout_en : 1;
u64 evcnt_in : 6;
u64 evcnt_edge : 1;
u64 evcnt_en : 1;
u64 tstmp_in : 6;
u64 tstmp_edge : 1;
u64 tstmp_en : 1;
u64 ext_clk_in : 6;
u64 ext_clk_en : 1;
u64 ptp_en : 1;
} cn61xx;
struct cvmx_mio_ptp_clock_cfg_cn63xx {
u64 reserved_24_63 : 40;
u64 evcnt_in : 6;
u64 evcnt_edge : 1;
u64 evcnt_en : 1;
u64 tstmp_in : 6;
u64 tstmp_edge : 1;
u64 tstmp_en : 1;
u64 ext_clk_in : 6;
u64 ext_clk_en : 1;
u64 ptp_en : 1;
} cn63xx;
struct cvmx_mio_ptp_clock_cfg_cn63xx cn63xxp1;
struct cvmx_mio_ptp_clock_cfg_s cn66xx;
struct cvmx_mio_ptp_clock_cfg_cn61xx cn68xx;
struct cvmx_mio_ptp_clock_cfg_cn63xx cn68xxp1;
struct cvmx_mio_ptp_clock_cfg_cn70xx {
u64 reserved_42_63 : 22;
u64 ckout : 1;
u64 pps : 1;
u64 ext_clk_edge : 2;
u64 reserved_32_37 : 6;
u64 pps_inv : 1;
u64 pps_en : 1;
u64 reserved_26_29 : 4;
u64 ckout_inv : 1;
u64 ckout_en : 1;
u64 evcnt_in : 6;
u64 evcnt_edge : 1;
u64 evcnt_en : 1;
u64 tstmp_in : 6;
u64 tstmp_edge : 1;
u64 tstmp_en : 1;
u64 ext_clk_in : 6;
u64 ext_clk_en : 1;
u64 ptp_en : 1;
} cn70xx;
struct cvmx_mio_ptp_clock_cfg_cn70xx cn70xxp1;
struct cvmx_mio_ptp_clock_cfg_cn70xx cn73xx;
struct cvmx_mio_ptp_clock_cfg_cn70xx cn78xx;
struct cvmx_mio_ptp_clock_cfg_cn70xx cn78xxp1;
struct cvmx_mio_ptp_clock_cfg_cn61xx cnf71xx;
struct cvmx_mio_ptp_clock_cfg_cn70xx cnf75xx;
};
typedef union cvmx_mio_ptp_clock_cfg cvmx_mio_ptp_clock_cfg_t;
/**
* cvmx_mio_ptp_evt_cnt
*
* This register contains the PTP event counter.
*
*/
union cvmx_mio_ptp_evt_cnt {
u64 u64;
struct cvmx_mio_ptp_evt_cnt_s {
u64 cntr : 64;
} s;
struct cvmx_mio_ptp_evt_cnt_s cn61xx;
struct cvmx_mio_ptp_evt_cnt_s cn63xx;
struct cvmx_mio_ptp_evt_cnt_s cn63xxp1;
struct cvmx_mio_ptp_evt_cnt_s cn66xx;
struct cvmx_mio_ptp_evt_cnt_s cn68xx;
struct cvmx_mio_ptp_evt_cnt_s cn68xxp1;
struct cvmx_mio_ptp_evt_cnt_s cn70xx;
struct cvmx_mio_ptp_evt_cnt_s cn70xxp1;
struct cvmx_mio_ptp_evt_cnt_s cn73xx;
struct cvmx_mio_ptp_evt_cnt_s cn78xx;
struct cvmx_mio_ptp_evt_cnt_s cn78xxp1;
struct cvmx_mio_ptp_evt_cnt_s cnf71xx;
struct cvmx_mio_ptp_evt_cnt_s cnf75xx;
};
typedef union cvmx_mio_ptp_evt_cnt cvmx_mio_ptp_evt_cnt_t;
/**
* cvmx_mio_rst_boot
*
* Notes:
* JTCSRDIS, EJTAGDIS, ROMEN reset to 1 in authentik mode; in all other modes they reset to 0.
*
*/
union cvmx_mio_rst_boot {
u64 u64;
struct cvmx_mio_rst_boot_s {
u64 chipkill : 1;
u64 jtcsrdis : 1;
u64 ejtagdis : 1;
u64 romen : 1;
u64 ckill_ppdis : 1;
u64 jt_tstmode : 1;
u64 reserved_50_57 : 8;
u64 lboot_ext : 2;
u64 reserved_44_47 : 4;
u64 qlm4_spd : 4;
u64 qlm3_spd : 4;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} s;
struct cvmx_mio_rst_boot_cn61xx {
u64 chipkill : 1;
u64 jtcsrdis : 1;
u64 ejtagdis : 1;
u64 romen : 1;
u64 ckill_ppdis : 1;
u64 jt_tstmode : 1;
u64 reserved_50_57 : 8;
u64 lboot_ext : 2;
u64 reserved_36_47 : 12;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} cn61xx;
struct cvmx_mio_rst_boot_cn63xx {
u64 reserved_36_63 : 28;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} cn63xx;
struct cvmx_mio_rst_boot_cn63xx cn63xxp1;
struct cvmx_mio_rst_boot_cn66xx {
u64 chipkill : 1;
u64 jtcsrdis : 1;
u64 ejtagdis : 1;
u64 romen : 1;
u64 ckill_ppdis : 1;
u64 reserved_50_58 : 9;
u64 lboot_ext : 2;
u64 reserved_36_47 : 12;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} cn66xx;
struct cvmx_mio_rst_boot_cn68xx {
u64 reserved_59_63 : 5;
u64 jt_tstmode : 1;
u64 reserved_44_57 : 14;
u64 qlm4_spd : 4;
u64 qlm3_spd : 4;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} cn68xx;
struct cvmx_mio_rst_boot_cn68xxp1 {
u64 reserved_44_63 : 20;
u64 qlm4_spd : 4;
u64 qlm3_spd : 4;
u64 c_mul : 6;
u64 pnr_mul : 6;
u64 qlm2_spd : 4;
u64 qlm1_spd : 4;
u64 qlm0_spd : 4;
u64 lboot : 10;
u64 rboot : 1;
u64 rboot_pin : 1;
} cn68xxp1;
struct cvmx_mio_rst_boot_cn61xx cnf71xx;
};
typedef union cvmx_mio_rst_boot cvmx_mio_rst_boot_t;
/**
* cvmx_mio_rst_ctl#
*
* Notes:
* GEN1_Only mode is enabled for PEM0 when QLM1_SPD[0] is set or when sclk < 550Mhz.
* GEN1_Only mode is enabled for PEM1 when QLM1_SPD[1] is set or when sclk < 550Mhz.
*/
union cvmx_mio_rst_ctlx {
u64 u64;
struct cvmx_mio_rst_ctlx_s {
u64 reserved_13_63 : 51;
u64 in_rev_ln : 1;
u64 rev_lanes : 1;
u64 gen1_only : 1;
u64 prst_link : 1;
u64 rst_done : 1;
u64 rst_link : 1;
u64 host_mode : 1;
u64 prtmode : 2;
u64 rst_drv : 1;
u64 rst_rcv : 1;
u64 rst_chip : 1;
u64 rst_val : 1;
} s;
struct cvmx_mio_rst_ctlx_s cn61xx;
struct cvmx_mio_rst_ctlx_cn63xx {
u64 reserved_10_63 : 54;
u64 prst_link : 1;
u64 rst_done : 1;
u64 rst_link : 1;
u64 host_mode : 1;
u64 prtmode : 2;
u64 rst_drv : 1;
u64 rst_rcv : 1;
u64 rst_chip : 1;
u64 rst_val : 1;
} cn63xx;
struct cvmx_mio_rst_ctlx_cn63xxp1 {
u64 reserved_9_63 : 55;
u64 rst_done : 1;
u64 rst_link : 1;
u64 host_mode : 1;
u64 prtmode : 2;
u64 rst_drv : 1;
u64 rst_rcv : 1;
u64 rst_chip : 1;
u64 rst_val : 1;
} cn63xxp1;
struct cvmx_mio_rst_ctlx_cn63xx cn66xx;
struct cvmx_mio_rst_ctlx_cn63xx cn68xx;
struct cvmx_mio_rst_ctlx_cn63xx cn68xxp1;
struct cvmx_mio_rst_ctlx_s cnf71xx;
};
typedef union cvmx_mio_rst_ctlx cvmx_mio_rst_ctlx_t;
/**
* cvmx_mio_qlm#_cfg
*
* Notes:
* Certain QLM_SPD is valid only for certain QLM_CFG configuration, refer to HRM for valid
* combinations. These csrs are reset only on COLD_RESET. The Reset values for QLM_SPD and QLM_CFG
* are as follows: MIO_QLM0_CFG SPD=F, CFG=2 SGMII (AGX0)
* MIO_QLM1_CFG SPD=0, CFG=1 PCIE 2x1 (PEM0/PEM1)
*/
union cvmx_mio_qlmx_cfg {
u64 u64;
struct cvmx_mio_qlmx_cfg_s {
u64 reserved_15_63 : 49;
u64 prtmode : 1;
u64 reserved_12_13 : 2;
u64 qlm_spd : 4;
u64 reserved_4_7 : 4;
u64 qlm_cfg : 4;
} s;
struct cvmx_mio_qlmx_cfg_cn61xx {
u64 reserved_15_63 : 49;
u64 prtmode : 1;
u64 reserved_12_13 : 2;
u64 qlm_spd : 4;
u64 reserved_2_7 : 6;
u64 qlm_cfg : 2;
} cn61xx;
struct cvmx_mio_qlmx_cfg_cn66xx {
u64 reserved_12_63 : 52;
u64 qlm_spd : 4;
u64 reserved_4_7 : 4;
u64 qlm_cfg : 4;
} cn66xx;
struct cvmx_mio_qlmx_cfg_cn68xx {
u64 reserved_12_63 : 52;
u64 qlm_spd : 4;
u64 reserved_3_7 : 5;
u64 qlm_cfg : 3;
} cn68xx;
struct cvmx_mio_qlmx_cfg_cn68xx cn68xxp1;
struct cvmx_mio_qlmx_cfg_cn61xx cnf71xx;
};
typedef union cvmx_mio_qlmx_cfg cvmx_mio_qlmx_cfg_t;
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Packet buffer defines.
*/
#ifndef __CVMX_PACKET_H__
#define __CVMX_PACKET_H__
union cvmx_buf_ptr_pki {
u64 u64;
struct {
u64 size : 16;
u64 packet_outside_wqe : 1;
u64 rsvd0 : 5;
u64 addr : 42;
};
};
typedef union cvmx_buf_ptr_pki cvmx_buf_ptr_pki_t;
/**
* This structure defines a buffer pointer on Octeon
*/
union cvmx_buf_ptr {
void *ptr;
u64 u64;
struct {
u64 i : 1;
u64 back : 4;
u64 pool : 3;
u64 size : 16;
u64 addr : 40;
} s;
};
typedef union cvmx_buf_ptr cvmx_buf_ptr_t;
#endif /* __CVMX_PACKET_H__ */

View File

@ -0,0 +1,279 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_PCIE_H__
#define __CVMX_PCIE_H__
#define CVMX_PCIE_MAX_PORTS 4
#define CVMX_PCIE_PORTS \
((OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX)) ? \
CVMX_PCIE_MAX_PORTS : \
(OCTEON_IS_MODEL(OCTEON_CN70XX) ? 3 : 2))
/*
* The physical memory base mapped by BAR1. 256MB at the end of the
* first 4GB.
*/
#define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28))
#define CVMX_PCIE_BAR1_PHYS_SIZE BIT_ULL(28)
/*
* The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
* place BAR1 so it is the same for both.
*/
#define CVMX_PCIE_BAR1_RC_BASE BIT_ULL(41)
typedef union {
u64 u64;
struct {
u64 upper : 2; /* Normally 2 for XKPHYS */
u64 reserved_49_61 : 13; /* Must be zero */
u64 io : 1; /* 1 for IO space access */
u64 did : 5; /* PCIe DID = 3 */
u64 subdid : 3; /* PCIe SubDID = 1 */
u64 reserved_38_39 : 2; /* Must be zero */
u64 node : 2; /* Numa node number */
u64 es : 2; /* Endian swap = 1 */
u64 port : 2; /* PCIe port 0,1 */
u64 reserved_29_31 : 3; /* Must be zero */
u64 ty : 1;
u64 bus : 8;
u64 dev : 5;
u64 func : 3;
u64 reg : 12;
} config;
struct {
u64 upper : 2; /* Normally 2 for XKPHYS */
u64 reserved_49_61 : 13; /* Must be zero */
u64 io : 1; /* 1 for IO space access */
u64 did : 5; /* PCIe DID = 3 */
u64 subdid : 3; /* PCIe SubDID = 2 */
u64 reserved_38_39 : 2; /* Must be zero */
u64 node : 2; /* Numa node number */
u64 es : 2; /* Endian swap = 1 */
u64 port : 2; /* PCIe port 0,1 */
u64 address : 32; /* PCIe IO address */
} io;
struct {
u64 upper : 2; /* Normally 2 for XKPHYS */
u64 reserved_49_61 : 13; /* Must be zero */
u64 io : 1; /* 1 for IO space access */
u64 did : 5; /* PCIe DID = 3 */
u64 subdid : 3; /* PCIe SubDID = 3-6 */
u64 reserved_38_39 : 2; /* Must be zero */
u64 node : 2; /* Numa node number */
u64 address : 36; /* PCIe Mem address */
} mem;
} cvmx_pcie_address_t;
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
*
* @param pcie_port PCIe port the IO is for
*
* @return 64bit Octeon IO base address for read/write
*/
u64 cvmx_pcie_get_io_base_address(int pcie_port);
/**
* Size of the IO address region returned at address
* cvmx_pcie_get_io_base_address()
*
* @param pcie_port PCIe port the IO is for
*
* @return Size of the IO window
*/
u64 cvmx_pcie_get_io_size(int pcie_port);
/**
* Return the Core virtual base address for PCIe MEM access. Memory is
* read/written as an offset from this address.
*
* @param pcie_port PCIe port the IO is for
*
* @return 64bit Octeon IO base address for read/write
*/
u64 cvmx_pcie_get_mem_base_address(int pcie_port);
/**
* Size of the Mem address region returned at address
* cvmx_pcie_get_mem_base_address()
*
* @param pcie_port PCIe port the IO is for
*
* @return Size of the Mem window
*/
u64 cvmx_pcie_get_mem_size(int pcie_port);
/**
* Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
int cvmx_pcie_rc_initialize(int pcie_port);
/**
* Shutdown a PCIe port and put it in reset
*
* @param pcie_port PCIe port to shutdown
*
* @return Zero on success
*/
int cvmx_pcie_rc_shutdown(int pcie_port);
/**
* Read 8bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
u8 cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg);
/**
* Read 16bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
u16 cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg);
/**
* Read 32bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
u32 cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg);
/**
* Write 8bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, u8 val);
/**
* Write 16bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, u16 val);
/**
* Write 32bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, u32 val);
/**
* Read a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @param pcie_port PCIe port to read from
* @param cfg_offset Address to read
*
* @return Value read
*/
u32 cvmx_pcie_cfgx_read(int pcie_port, u32 cfg_offset);
u32 cvmx_pcie_cfgx_read_node(int node, int pcie_port, u32 cfg_offset);
/**
* Write a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @param pcie_port PCIe port to write to
* @param cfg_offset Address to write
* @param val Value to write
*/
void cvmx_pcie_cfgx_write(int pcie_port, u32 cfg_offset, u32 val);
void cvmx_pcie_cfgx_write_node(int node, int pcie_port, u32 cfg_offset, u32 val);
/**
* Write a 32bit value to the Octeon NPEI register space
*
* @param address Address to write to
* @param val Value to write
*/
static inline void cvmx_pcie_npei_write32(u64 address, u32 val)
{
cvmx_write64_uint32(address ^ 4, val);
cvmx_read64_uint32(address ^ 4);
}
/**
* Read a 32bit value from the Octeon NPEI register space
*
* @param address Address to read
* @return The result
*/
static inline u32 cvmx_pcie_npei_read32(u64 address)
{
return cvmx_read64_uint32(address ^ 4);
}
/**
* Initialize a PCIe port for use in target(EP) mode.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
int cvmx_pcie_ep_initialize(int pcie_port);
/**
* Wait for posted PCIe read/writes to reach the other side of
* the internal PCIe switch. This will insure that core
* read/writes are posted before anything after this function
* is called. This may be necessary when writing to memory that
* will later be read using the DMA/PKT engines.
*
* @param pcie_port PCIe port to wait for
*/
void cvmx_pcie_wait_for_pending(int pcie_port);
/**
* Returns if a PCIe port is in host or target mode.
*
* @param pcie_port PCIe port number (PEM number)
*
* @return 0 if PCIe port is in target mode, !0 if in host mode.
*/
int cvmx_pcie_is_host_mode(int pcie_port);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Resource management for PKI resources.
*/
#ifndef __CVMX_PKI_RESOURCES_H__
#define __CVMX_PKI_RESOURCES_H__
/**
* This function allocates/reserves a style from pool of global styles per node.
* @param node node to allocate style from.
* @param style style to allocate, if -1 it will be allocated
first available style from style resource. If index is positive
number and in range, it will try to allocate specified style.
* @return style number on success, -1 on failure.
*/
int cvmx_pki_style_alloc(int node, int style);
/**
* This function allocates/reserves a cluster group from per node
cluster group resources.
* @param node node to allocate cluster group from.
@param cl_grp cluster group to allocate/reserve, if -1 ,
allocate any available cluster group.
* @return cluster group number or -1 on failure
*/
int cvmx_pki_cluster_grp_alloc(int node, int cl_grp);
/**
* This function allocates/reserves a cluster from per node
cluster resources.
* @param node node to allocate cluster group from.
@param cluster_mask mask of clusters to allocate/reserve, if -1 ,
allocate any available clusters.
* @param num_clusters number of clusters that will be allocated
*/
int cvmx_pki_cluster_alloc(int node, int num_clusters, u64 *cluster_mask);
/**
* This function allocates/reserves a pcam entry from node
* @param node node to allocate pcam entry from.
@param index index of pacm entry (0-191), if -1 ,
allocate any available pcam entry.
* @param bank pcam bank where to allocate/reserve pcan entry from
* @param cluster_mask mask of clusters from which pcam entry is needed.
* @return pcam entry of -1 on failure
*/
int cvmx_pki_pcam_entry_alloc(int node, int index, int bank, u64 cluster_mask);
/**
* This function allocates/reserves QPG table entries per node.
* @param node node number.
* @param base_offset base_offset in qpg table. If -1, first available
qpg base_offset will be allocated. If base_offset is positive
number and in range, it will try to allocate specified base_offset.
@param count number of consecutive qpg entries to allocate. They will be consecutive
from base offset.
* @return qpg table base offset number on success, -1 on failure.
*/
int cvmx_pki_qpg_entry_alloc(int node, int base_offset, int count);
/**
* This function frees a style from pool of global styles per node.
* @param node node to free style from.
* @param style style to free
* @return 0 on success, -1 on failure.
*/
int cvmx_pki_style_free(int node, int style);
/**
* This function frees a cluster group from per node
cluster group resources.
* @param node node to free cluster group from.
@param cl_grp cluster group to free
* @return 0 on success or -1 on failure
*/
int cvmx_pki_cluster_grp_free(int node, int cl_grp);
/**
* This function frees QPG table entries per node.
* @param node node number.
* @param base_offset base_offset in qpg table. If -1, first available
* qpg base_offset will be allocated. If base_offset is positive
* number and in range, it will try to allocate specified base_offset.
* @param count number of consecutive qpg entries to allocate. They will be consecutive
* from base offset.
* @return qpg table base offset number on success, -1 on failure.
*/
int cvmx_pki_qpg_entry_free(int node, int base_offset, int count);
/**
* This function frees clusters from per node
clusters resources.
* @param node node to free clusters from.
* @param cluster_mask mask of clusters need freeing
* @return 0 on success or -1 on failure
*/
int cvmx_pki_cluster_free(int node, u64 cluster_mask);
/**
* This function frees a pcam entry from node
* @param node node to allocate pcam entry from.
@param index index of pacm entry (0-191) needs to be freed.
* @param bank pcam bank where to free pcam entry from
* @param cluster_mask mask of clusters from which pcam entry is freed.
* @return 0 on success OR -1 on failure
*/
int cvmx_pki_pcam_entry_free(int node, int index, int bank, u64 cluster_mask);
/**
* This function allocates/reserves a bpid from pool of global bpid per node.
* @param node node to allocate bpid from.
* @param bpid bpid to allocate, if -1 it will be allocated
* first available boid from bpid resource. If index is positive
* number and in range, it will try to allocate specified bpid.
* @return bpid number on success,
* -1 on alloc failure.
* -2 on resource already reserved.
*/
int cvmx_pki_bpid_alloc(int node, int bpid);
/**
* This function frees a bpid from pool of global bpid per node.
* @param node node to free bpid from.
* @param bpid bpid to free
* @return 0 on success, -1 on failure or
*/
int cvmx_pki_bpid_free(int node, int bpid);
/**
* This function frees all the PKI software resources
* (clusters, styles, qpg_entry, pcam_entry etc) for the specified node
*/
/**
* This function allocates/reserves an index from pool of global MTAG-IDX per node.
* @param node node to allocate index from.
* @param idx index to allocate, if -1 it will be allocated
* @return MTAG index number on success,
* -1 on alloc failure.
* -2 on resource already reserved.
*/
int cvmx_pki_mtag_idx_alloc(int node, int idx);
/**
* This function frees an index from pool of global MTAG-IDX per node.
* @param node node to free bpid from.
* @param bpid bpid to free
* @return 0 on success, -1 on failure or
*/
int cvmx_pki_mtag_idx_free(int node, int idx);
void __cvmx_pki_global_rsrc_free(int node);
#endif /* __CVM_PKI_RESOURCES_H__ */

View File

@ -0,0 +1,970 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Interface to the hardware Packet Input Data unit.
*/
#ifndef __CVMX_PKI_H__
#define __CVMX_PKI_H__
#include "cvmx-fpa3.h"
#include "cvmx-helper-util.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-error.h"
/* PKI AURA and BPID count are equal to FPA AURA count */
#define CVMX_PKI_NUM_AURA (cvmx_fpa3_num_auras())
#define CVMX_PKI_NUM_BPID (cvmx_fpa3_num_auras())
#define CVMX_PKI_NUM_SSO_GROUP (cvmx_sso_num_xgrp())
#define CVMX_PKI_NUM_CLUSTER_GROUP_MAX 1
#define CVMX_PKI_NUM_CLUSTER_GROUP (cvmx_pki_num_cl_grp())
#define CVMX_PKI_NUM_CLUSTER (cvmx_pki_num_clusters())
/* FIXME: Reduce some of these values, convert to routines XXX */
#define CVMX_PKI_NUM_CHANNEL 4096
#define CVMX_PKI_NUM_PKIND 64
#define CVMX_PKI_NUM_INTERNAL_STYLE 256
#define CVMX_PKI_NUM_FINAL_STYLE 64
#define CVMX_PKI_NUM_QPG_ENTRY 2048
#define CVMX_PKI_NUM_MTAG_IDX (32 / 4) /* 32 registers grouped by 4*/
#define CVMX_PKI_NUM_LTYPE 32
#define CVMX_PKI_NUM_PCAM_BANK 2
#define CVMX_PKI_NUM_PCAM_ENTRY 192
#define CVMX_PKI_NUM_FRAME_CHECK 2
#define CVMX_PKI_NUM_BELTYPE 32
#define CVMX_PKI_MAX_FRAME_SIZE 65535
#define CVMX_PKI_FIND_AVAL_ENTRY (-1)
#define CVMX_PKI_CLUSTER_ALL 0xf
#ifdef CVMX_SUPPORT_SEPARATE_CLUSTER_CONFIG
#define CVMX_PKI_TOTAL_PCAM_ENTRY \
((CVMX_PKI_NUM_CLUSTER) * (CVMX_PKI_NUM_PCAM_BANK) * (CVMX_PKI_NUM_PCAM_ENTRY))
#else
#define CVMX_PKI_TOTAL_PCAM_ENTRY (CVMX_PKI_NUM_PCAM_BANK * CVMX_PKI_NUM_PCAM_ENTRY)
#endif
static inline unsigned int cvmx_pki_num_clusters(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
return 2;
return 4;
}
static inline unsigned int cvmx_pki_num_cl_grp(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN78XX))
return 1;
return 0;
}
enum cvmx_pki_pkind_parse_mode {
CVMX_PKI_PARSE_LA_TO_LG = 0, /* Parse LA(L2) to LG */
CVMX_PKI_PARSE_LB_TO_LG = 1, /* Parse LB(custom) to LG */
CVMX_PKI_PARSE_LC_TO_LG = 3, /* Parse LC(L3) to LG */
CVMX_PKI_PARSE_LG = 0x3f, /* Parse LG */
CVMX_PKI_PARSE_NOTHING = 0x7f /* Parse nothing */
};
enum cvmx_pki_parse_mode_chg {
CVMX_PKI_PARSE_NO_CHG = 0x0,
CVMX_PKI_PARSE_SKIP_TO_LB = 0x1,
CVMX_PKI_PARSE_SKIP_TO_LC = 0x3,
CVMX_PKI_PARSE_SKIP_TO_LD = 0x7,
CVMX_PKI_PARSE_SKIP_TO_LG = 0x3f,
CVMX_PKI_PARSE_SKIP_ALL = 0x7f,
};
enum cvmx_pki_l2_len_mode { PKI_L2_LENCHK_EQUAL_GREATER = 0, PKI_L2_LENCHK_EQUAL_ONLY };
enum cvmx_pki_cache_mode {
CVMX_PKI_OPC_MODE_STT = 0LL, /* All blocks write through DRAM,*/
CVMX_PKI_OPC_MODE_STF = 1LL, /* All blocks into L2 */
CVMX_PKI_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_PKI_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
/**
* Tag type definitions
*/
enum cvmx_sso_tag_type {
CVMX_SSO_TAG_TYPE_ORDERED = 0L,
CVMX_SSO_TAG_TYPE_ATOMIC = 1L,
CVMX_SSO_TAG_TYPE_UNTAGGED = 2L,
CVMX_SSO_TAG_TYPE_EMPTY = 3L
};
enum cvmx_pki_qpg_qos {
CVMX_PKI_QPG_QOS_NONE = 0,
CVMX_PKI_QPG_QOS_VLAN,
CVMX_PKI_QPG_QOS_MPLS,
CVMX_PKI_QPG_QOS_DSA_SRC,
CVMX_PKI_QPG_QOS_DIFFSERV,
CVMX_PKI_QPG_QOS_HIGIG,
};
enum cvmx_pki_wqe_vlan { CVMX_PKI_USE_FIRST_VLAN = 0, CVMX_PKI_USE_SECOND_VLAN };
/**
* Controls how the PKI statistics counters are handled
* The PKI_STAT*_X registers can be indexed either by port kind (pkind), or
* final style. (Does not apply to the PKI_STAT_INB* registers.)
* 0 = X represents the packets pkind
* 1 = X represents the low 6-bits of packets final style
*/
enum cvmx_pki_stats_mode { CVMX_PKI_STAT_MODE_PKIND, CVMX_PKI_STAT_MODE_STYLE };
enum cvmx_pki_fpa_wait { CVMX_PKI_DROP_PKT, CVMX_PKI_WAIT_PKT };
#define PKI_BELTYPE_E__NONE_M 0x0
#define PKI_BELTYPE_E__MISC_M 0x1
#define PKI_BELTYPE_E__IP4_M 0x2
#define PKI_BELTYPE_E__IP6_M 0x3
#define PKI_BELTYPE_E__TCP_M 0x4
#define PKI_BELTYPE_E__UDP_M 0x5
#define PKI_BELTYPE_E__SCTP_M 0x6
#define PKI_BELTYPE_E__SNAP_M 0x7
/* PKI_BELTYPE_E_t */
enum cvmx_pki_beltype {
CVMX_PKI_BELTYPE_NONE = PKI_BELTYPE_E__NONE_M,
CVMX_PKI_BELTYPE_MISC = PKI_BELTYPE_E__MISC_M,
CVMX_PKI_BELTYPE_IP4 = PKI_BELTYPE_E__IP4_M,
CVMX_PKI_BELTYPE_IP6 = PKI_BELTYPE_E__IP6_M,
CVMX_PKI_BELTYPE_TCP = PKI_BELTYPE_E__TCP_M,
CVMX_PKI_BELTYPE_UDP = PKI_BELTYPE_E__UDP_M,
CVMX_PKI_BELTYPE_SCTP = PKI_BELTYPE_E__SCTP_M,
CVMX_PKI_BELTYPE_SNAP = PKI_BELTYPE_E__SNAP_M,
CVMX_PKI_BELTYPE_MAX = CVMX_PKI_BELTYPE_SNAP
};
struct cvmx_pki_frame_len {
u16 maxlen;
u16 minlen;
};
struct cvmx_pki_tag_fields {
u64 layer_g_src : 1;
u64 layer_f_src : 1;
u64 layer_e_src : 1;
u64 layer_d_src : 1;
u64 layer_c_src : 1;
u64 layer_b_src : 1;
u64 layer_g_dst : 1;
u64 layer_f_dst : 1;
u64 layer_e_dst : 1;
u64 layer_d_dst : 1;
u64 layer_c_dst : 1;
u64 layer_b_dst : 1;
u64 input_port : 1;
u64 mpls_label : 1;
u64 first_vlan : 1;
u64 second_vlan : 1;
u64 ip_prot_nexthdr : 1;
u64 tag_sync : 1;
u64 tag_spi : 1;
u64 tag_gtp : 1;
u64 tag_vni : 1;
};
struct cvmx_pki_pkind_parse {
u64 mpls_en : 1;
u64 inst_hdr : 1;
u64 lg_custom : 1;
u64 fulc_en : 1;
u64 dsa_en : 1;
u64 hg2_en : 1;
u64 hg_en : 1;
};
struct cvmx_pki_pool_config {
int pool_num;
cvmx_fpa3_pool_t pool;
u64 buffer_size;
u64 buffer_count;
};
struct cvmx_pki_qpg_config {
int qpg_base;
int port_add;
int aura_num;
int grp_ok;
int grp_bad;
int grptag_ok;
int grptag_bad;
};
struct cvmx_pki_aura_config {
int aura_num;
int pool_num;
cvmx_fpa3_pool_t pool;
cvmx_fpa3_gaura_t aura;
int buffer_count;
};
struct cvmx_pki_cluster_grp_config {
int grp_num;
u64 cluster_mask; /* Bit mask of cluster assigned to this cluster group */
};
struct cvmx_pki_sso_grp_config {
int group;
int priority;
int weight;
int affinity;
u64 core_mask;
u8 core_mask_set;
};
/* This is per style structure for configuring port parameters,
* it is kind of of profile which can be assigned to any port.
* If multiple ports are assigned same style be aware that modifying
* that style will modify the respective parameters for all the ports
* which are using this style
*/
struct cvmx_pki_style_parm {
bool ip6_udp_opt;
bool lenerr_en;
bool maxerr_en;
bool minerr_en;
u8 lenerr_eqpad;
u8 minmax_sel;
bool qpg_dis_grptag;
bool fcs_strip;
bool fcs_chk;
bool rawdrp;
bool force_drop;
bool nodrop;
bool qpg_dis_padd;
bool qpg_dis_grp;
bool qpg_dis_aura;
u16 qpg_base;
enum cvmx_pki_qpg_qos qpg_qos;
u8 qpg_port_sh;
u8 qpg_port_msb;
u8 apad_nip;
u8 wqe_vs;
enum cvmx_sso_tag_type tag_type;
bool pkt_lend;
u8 wqe_hsz;
u16 wqe_skip;
u16 first_skip;
u16 later_skip;
enum cvmx_pki_cache_mode cache_mode;
u8 dis_wq_dat;
u64 mbuff_size;
bool len_lg;
bool len_lf;
bool len_le;
bool len_ld;
bool len_lc;
bool len_lb;
bool csum_lg;
bool csum_lf;
bool csum_le;
bool csum_ld;
bool csum_lc;
bool csum_lb;
};
/* This is per style structure for configuring port's tag configuration,
* it is kind of of profile which can be assigned to any port.
* If multiple ports are assigned same style be aware that modiying that style
* will modify the respective parameters for all the ports which are
* using this style */
enum cvmx_pki_mtag_ptrsel {
CVMX_PKI_MTAG_PTRSEL_SOP = 0,
CVMX_PKI_MTAG_PTRSEL_LA = 8,
CVMX_PKI_MTAG_PTRSEL_LB = 9,
CVMX_PKI_MTAG_PTRSEL_LC = 10,
CVMX_PKI_MTAG_PTRSEL_LD = 11,
CVMX_PKI_MTAG_PTRSEL_LE = 12,
CVMX_PKI_MTAG_PTRSEL_LF = 13,
CVMX_PKI_MTAG_PTRSEL_LG = 14,
CVMX_PKI_MTAG_PTRSEL_VL = 15,
};
struct cvmx_pki_mask_tag {
bool enable;
int base; /* CVMX_PKI_MTAG_PTRSEL_XXX */
int offset; /* Offset from base. */
u64 val; /* Bitmask:
1 = enable, 0 = disabled for each byte in the 64-byte array.*/
};
struct cvmx_pki_style_tag_cfg {
struct cvmx_pki_tag_fields tag_fields;
struct cvmx_pki_mask_tag mask_tag[4];
};
struct cvmx_pki_style_config {
struct cvmx_pki_style_parm parm_cfg;
struct cvmx_pki_style_tag_cfg tag_cfg;
};
struct cvmx_pki_pkind_config {
u8 cluster_grp;
bool fcs_pres;
struct cvmx_pki_pkind_parse parse_en;
enum cvmx_pki_pkind_parse_mode initial_parse_mode;
u8 fcs_skip;
u8 inst_skip;
int initial_style;
bool custom_l2_hdr;
u8 l2_scan_offset;
u64 lg_scan_offset;
};
struct cvmx_pki_port_config {
struct cvmx_pki_pkind_config pkind_cfg;
struct cvmx_pki_style_config style_cfg;
};
struct cvmx_pki_global_parse {
u64 virt_pen : 1;
u64 clg_pen : 1;
u64 cl2_pen : 1;
u64 l4_pen : 1;
u64 il3_pen : 1;
u64 l3_pen : 1;
u64 mpls_pen : 1;
u64 fulc_pen : 1;
u64 dsa_pen : 1;
u64 hg_pen : 1;
};
struct cvmx_pki_tag_sec {
u16 dst6;
u16 src6;
u16 dst;
u16 src;
};
struct cvmx_pki_global_config {
u64 cluster_mask[CVMX_PKI_NUM_CLUSTER_GROUP_MAX];
enum cvmx_pki_stats_mode stat_mode;
enum cvmx_pki_fpa_wait fpa_wait;
struct cvmx_pki_global_parse gbl_pen;
struct cvmx_pki_tag_sec tag_secret;
struct cvmx_pki_frame_len frm_len[CVMX_PKI_NUM_FRAME_CHECK];
enum cvmx_pki_beltype ltype_map[CVMX_PKI_NUM_BELTYPE];
int pki_enable;
};
#define CVMX_PKI_PCAM_TERM_E_NONE_M 0x0
#define CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M 0x2
#define CVMX_PKI_PCAM_TERM_E_HIGIGD_M 0x4
#define CVMX_PKI_PCAM_TERM_E_HIGIG_M 0x5
#define CVMX_PKI_PCAM_TERM_E_SMACH_M 0x8
#define CVMX_PKI_PCAM_TERM_E_SMACL_M 0x9
#define CVMX_PKI_PCAM_TERM_E_DMACH_M 0xA
#define CVMX_PKI_PCAM_TERM_E_DMACL_M 0xB
#define CVMX_PKI_PCAM_TERM_E_GLORT_M 0x12
#define CVMX_PKI_PCAM_TERM_E_DSA_M 0x13
#define CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M 0x18
#define CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M 0x19
#define CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M 0x1A
#define CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M 0x1B
#define CVMX_PKI_PCAM_TERM_E_MPLS0_M 0x1E
#define CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M 0x1F
#define CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M 0x20
#define CVMX_PKI_PCAM_TERM_E_L3_SIPML_M 0x21
#define CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M 0x22
#define CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M 0x23
#define CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M 0x24
#define CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M 0x25
#define CVMX_PKI_PCAM_TERM_E_L3_DIPML_M 0x26
#define CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M 0x27
#define CVMX_PKI_PCAM_TERM_E_LD_VNI_M 0x28
#define CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M 0x2B
#define CVMX_PKI_PCAM_TERM_E_LF_SPI_M 0x2E
#define CVMX_PKI_PCAM_TERM_E_L4_SPORT_M 0x2f
#define CVMX_PKI_PCAM_TERM_E_L4_PORT_M 0x30
#define CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M 0x39
enum cvmx_pki_term {
CVMX_PKI_PCAM_TERM_NONE = CVMX_PKI_PCAM_TERM_E_NONE_M,
CVMX_PKI_PCAM_TERM_L2_CUSTOM = CVMX_PKI_PCAM_TERM_E_L2_CUSTOM_M,
CVMX_PKI_PCAM_TERM_HIGIGD = CVMX_PKI_PCAM_TERM_E_HIGIGD_M,
CVMX_PKI_PCAM_TERM_HIGIG = CVMX_PKI_PCAM_TERM_E_HIGIG_M,
CVMX_PKI_PCAM_TERM_SMACH = CVMX_PKI_PCAM_TERM_E_SMACH_M,
CVMX_PKI_PCAM_TERM_SMACL = CVMX_PKI_PCAM_TERM_E_SMACL_M,
CVMX_PKI_PCAM_TERM_DMACH = CVMX_PKI_PCAM_TERM_E_DMACH_M,
CVMX_PKI_PCAM_TERM_DMACL = CVMX_PKI_PCAM_TERM_E_DMACL_M,
CVMX_PKI_PCAM_TERM_GLORT = CVMX_PKI_PCAM_TERM_E_GLORT_M,
CVMX_PKI_PCAM_TERM_DSA = CVMX_PKI_PCAM_TERM_E_DSA_M,
CVMX_PKI_PCAM_TERM_ETHTYPE0 = CVMX_PKI_PCAM_TERM_E_ETHTYPE0_M,
CVMX_PKI_PCAM_TERM_ETHTYPE1 = CVMX_PKI_PCAM_TERM_E_ETHTYPE1_M,
CVMX_PKI_PCAM_TERM_ETHTYPE2 = CVMX_PKI_PCAM_TERM_E_ETHTYPE2_M,
CVMX_PKI_PCAM_TERM_ETHTYPE3 = CVMX_PKI_PCAM_TERM_E_ETHTYPE3_M,
CVMX_PKI_PCAM_TERM_MPLS0 = CVMX_PKI_PCAM_TERM_E_MPLS0_M,
CVMX_PKI_PCAM_TERM_L3_SIPHH = CVMX_PKI_PCAM_TERM_E_L3_SIPHH_M,
CVMX_PKI_PCAM_TERM_L3_SIPMH = CVMX_PKI_PCAM_TERM_E_L3_SIPMH_M,
CVMX_PKI_PCAM_TERM_L3_SIPML = CVMX_PKI_PCAM_TERM_E_L3_SIPML_M,
CVMX_PKI_PCAM_TERM_L3_SIPLL = CVMX_PKI_PCAM_TERM_E_L3_SIPLL_M,
CVMX_PKI_PCAM_TERM_L3_FLAGS = CVMX_PKI_PCAM_TERM_E_L3_FLAGS_M,
CVMX_PKI_PCAM_TERM_L3_DIPHH = CVMX_PKI_PCAM_TERM_E_L3_DIPHH_M,
CVMX_PKI_PCAM_TERM_L3_DIPMH = CVMX_PKI_PCAM_TERM_E_L3_DIPMH_M,
CVMX_PKI_PCAM_TERM_L3_DIPML = CVMX_PKI_PCAM_TERM_E_L3_DIPML_M,
CVMX_PKI_PCAM_TERM_L3_DIPLL = CVMX_PKI_PCAM_TERM_E_L3_DIPLL_M,
CVMX_PKI_PCAM_TERM_LD_VNI = CVMX_PKI_PCAM_TERM_E_LD_VNI_M,
CVMX_PKI_PCAM_TERM_IL3_FLAGS = CVMX_PKI_PCAM_TERM_E_IL3_FLAGS_M,
CVMX_PKI_PCAM_TERM_LF_SPI = CVMX_PKI_PCAM_TERM_E_LF_SPI_M,
CVMX_PKI_PCAM_TERM_L4_PORT = CVMX_PKI_PCAM_TERM_E_L4_PORT_M,
CVMX_PKI_PCAM_TERM_L4_SPORT = CVMX_PKI_PCAM_TERM_E_L4_SPORT_M,
CVMX_PKI_PCAM_TERM_LG_CUSTOM = CVMX_PKI_PCAM_TERM_E_LG_CUSTOM_M
};
#define CVMX_PKI_DMACH_SHIFT 32
#define CVMX_PKI_DMACH_MASK cvmx_build_mask(16)
#define CVMX_PKI_DMACL_MASK CVMX_PKI_DATA_MASK_32
#define CVMX_PKI_DATA_MASK_32 cvmx_build_mask(32)
#define CVMX_PKI_DATA_MASK_16 cvmx_build_mask(16)
#define CVMX_PKI_DMAC_MATCH_EXACT cvmx_build_mask(48)
struct cvmx_pki_pcam_input {
u64 style;
u64 style_mask; /* bits: 1-match, 0-dont care */
enum cvmx_pki_term field;
u32 field_mask; /* bits: 1-match, 0-dont care */
u64 data;
u64 data_mask; /* bits: 1-match, 0-dont care */
};
struct cvmx_pki_pcam_action {
enum cvmx_pki_parse_mode_chg parse_mode_chg;
enum cvmx_pki_layer_type layer_type_set;
int style_add;
int parse_flag_set;
int pointer_advance;
};
struct cvmx_pki_pcam_config {
int in_use;
int entry_num;
u64 cluster_mask;
struct cvmx_pki_pcam_input pcam_input;
struct cvmx_pki_pcam_action pcam_action;
};
/**
* Status statistics for a port
*/
struct cvmx_pki_port_stats {
u64 dropped_octets;
u64 dropped_packets;
u64 pci_raw_packets;
u64 octets;
u64 packets;
u64 multicast_packets;
u64 broadcast_packets;
u64 len_64_packets;
u64 len_65_127_packets;
u64 len_128_255_packets;
u64 len_256_511_packets;
u64 len_512_1023_packets;
u64 len_1024_1518_packets;
u64 len_1519_max_packets;
u64 fcs_align_err_packets;
u64 runt_packets;
u64 runt_crc_packets;
u64 oversize_packets;
u64 oversize_crc_packets;
u64 inb_packets;
u64 inb_octets;
u64 inb_errors;
u64 mcast_l2_red_packets;
u64 bcast_l2_red_packets;
u64 mcast_l3_red_packets;
u64 bcast_l3_red_packets;
};
/**
* PKI Packet Instruction Header Structure (PKI_INST_HDR_S)
*/
typedef union {
u64 u64;
struct {
u64 w : 1; /* INST_HDR size: 0 = 2 bytes, 1 = 4 or 8 bytes */
u64 raw : 1; /* RAW packet indicator in WQE[RAW]: 1 = enable */
u64 utag : 1; /* Use INST_HDR[TAG] to compute WQE[TAG]: 1 = enable */
u64 uqpg : 1; /* Use INST_HDR[QPG] to compute QPG: 1 = enable */
u64 rsvd1 : 1;
u64 pm : 3; /* Packet parsing mode. Legal values = 0x0..0x7 */
u64 sl : 8; /* Number of bytes in INST_HDR. */
/* The following fields are not present, if INST_HDR[W] = 0: */
u64 utt : 1; /* Use INST_HDR[TT] to compute WQE[TT]: 1 = enable */
u64 tt : 2; /* INST_HDR[TT] => WQE[TT], if INST_HDR[UTT] = 1 */
u64 rsvd2 : 2;
u64 qpg : 11; /* INST_HDR[QPG] => QPG, if INST_HDR[UQPG] = 1 */
u64 tag : 32; /* INST_HDR[TAG] => WQE[TAG], if INST_HDR[UTAG] = 1 */
} s;
} cvmx_pki_inst_hdr_t;
/**
* This function assignes the clusters to a group, later pkind can be
* configured to use that group depending on number of clusters pkind
* would use. A given cluster can only be enabled in a single cluster group.
* Number of clusters assign to that group determines how many engine can work
* in parallel to process the packet. Eack cluster can process x MPPS.
*
* @param node Node
* @param cluster_group Group to attach clusters to.
* @param cluster_mask The mask of clusters which needs to be assigned to the group.
*/
static inline int cvmx_pki_attach_cluster_to_group(int node, u64 cluster_group, u64 cluster_mask)
{
cvmx_pki_icgx_cfg_t pki_cl_grp;
if (cluster_group >= CVMX_PKI_NUM_CLUSTER_GROUP) {
debug("ERROR: config cluster group %d", (int)cluster_group);
return -1;
}
pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group));
pki_cl_grp.s.clusters = cluster_mask;
cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cluster_group), pki_cl_grp.u64);
return 0;
}
static inline void cvmx_pki_write_global_parse(int node, struct cvmx_pki_global_parse gbl_pen)
{
cvmx_pki_gbl_pen_t gbl_pen_reg;
gbl_pen_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_GBL_PEN);
gbl_pen_reg.s.virt_pen = gbl_pen.virt_pen;
gbl_pen_reg.s.clg_pen = gbl_pen.clg_pen;
gbl_pen_reg.s.cl2_pen = gbl_pen.cl2_pen;
gbl_pen_reg.s.l4_pen = gbl_pen.l4_pen;
gbl_pen_reg.s.il3_pen = gbl_pen.il3_pen;
gbl_pen_reg.s.l3_pen = gbl_pen.l3_pen;
gbl_pen_reg.s.mpls_pen = gbl_pen.mpls_pen;
gbl_pen_reg.s.fulc_pen = gbl_pen.fulc_pen;
gbl_pen_reg.s.dsa_pen = gbl_pen.dsa_pen;
gbl_pen_reg.s.hg_pen = gbl_pen.hg_pen;
cvmx_write_csr_node(node, CVMX_PKI_GBL_PEN, gbl_pen_reg.u64);
}
static inline void cvmx_pki_write_tag_secret(int node, struct cvmx_pki_tag_sec tag_secret)
{
cvmx_pki_tag_secret_t tag_secret_reg;
tag_secret_reg.u64 = cvmx_read_csr_node(node, CVMX_PKI_TAG_SECRET);
tag_secret_reg.s.dst6 = tag_secret.dst6;
tag_secret_reg.s.src6 = tag_secret.src6;
tag_secret_reg.s.dst = tag_secret.dst;
tag_secret_reg.s.src = tag_secret.src;
cvmx_write_csr_node(node, CVMX_PKI_TAG_SECRET, tag_secret_reg.u64);
}
static inline void cvmx_pki_write_ltype_map(int node, enum cvmx_pki_layer_type layer,
enum cvmx_pki_beltype backend)
{
cvmx_pki_ltypex_map_t ltype_map;
if (layer > CVMX_PKI_LTYPE_E_MAX || backend > CVMX_PKI_BELTYPE_MAX) {
debug("ERROR: invalid ltype beltype mapping\n");
return;
}
ltype_map.u64 = cvmx_read_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer));
ltype_map.s.beltype = backend;
cvmx_write_csr_node(node, CVMX_PKI_LTYPEX_MAP(layer), ltype_map.u64);
}
/**
* This function enables the cluster group to start parsing.
*
* @param node Node number.
* @param cl_grp Cluster group to enable parsing.
*/
static inline int cvmx_pki_parse_enable(int node, unsigned int cl_grp)
{
cvmx_pki_icgx_cfg_t pki_cl_grp;
if (cl_grp >= CVMX_PKI_NUM_CLUSTER_GROUP) {
debug("ERROR: pki parse en group %d", (int)cl_grp);
return -1;
}
pki_cl_grp.u64 = cvmx_read_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
pki_cl_grp.s.pena = 1;
cvmx_write_csr_node(node, CVMX_PKI_ICGX_CFG(cl_grp), pki_cl_grp.u64);
return 0;
}
/**
* This function enables the PKI to send bpid level backpressure to CN78XX inputs.
*
* @param node Node number.
*/
static inline void cvmx_pki_enable_backpressure(int node)
{
cvmx_pki_buf_ctl_t pki_buf_ctl;
pki_buf_ctl.u64 = cvmx_read_csr_node(node, CVMX_PKI_BUF_CTL);
pki_buf_ctl.s.pbp_en = 1;
cvmx_write_csr_node(node, CVMX_PKI_BUF_CTL, pki_buf_ctl.u64);
}
/**
* Clear the statistics counters for a port.
*
* @param node Node number.
* @param port Port number (ipd_port) to get statistics for.
* Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.
*/
void cvmx_pki_clear_port_stats(int node, u64 port);
/**
* Get the status counters for index from PKI.
*
* @param node Node number.
* @param index PKIND number, if PKI_STATS_CTL:mode = 0 or
* style(flow) number, if PKI_STATS_CTL:mode = 1
* @param status Where to put the results.
*/
void cvmx_pki_get_stats(int node, int index, struct cvmx_pki_port_stats *status);
/**
* Get the statistics counters for a port.
*
* @param node Node number
* @param port Port number (ipd_port) to get statistics for.
* Make sure PKI_STATS_CTL:mode is set to 0 for collecting per port/pkind stats.
* @param status Where to put the results.
*/
static inline void cvmx_pki_get_port_stats(int node, u64 port, struct cvmx_pki_port_stats *status)
{
int xipd = cvmx_helper_node_to_ipd_port(node, port);
int xiface = cvmx_helper_get_interface_num(xipd);
int index = cvmx_helper_get_interface_index_num(port);
int pknd = cvmx_helper_get_pknd(xiface, index);
cvmx_pki_get_stats(node, pknd, status);
}
/**
* Get the statistics counters for a flow represented by style in PKI.
*
* @param node Node number.
* @param style_num Style number to get statistics for.
* Make sure PKI_STATS_CTL:mode is set to 1 for collecting per style/flow stats.
* @param status Where to put the results.
*/
static inline void cvmx_pki_get_flow_stats(int node, u64 style_num,
struct cvmx_pki_port_stats *status)
{
cvmx_pki_get_stats(node, style_num, status);
}
/**
* Show integrated PKI configuration.
*
* @param node node number
*/
int cvmx_pki_config_dump(unsigned int node);
/**
* Show integrated PKI statistics.
*
* @param node node number
*/
int cvmx_pki_stats_dump(unsigned int node);
/**
* Clear PKI statistics.
*
* @param node node number
*/
void cvmx_pki_stats_clear(unsigned int node);
/**
* This function enables PKI.
*
* @param node node to enable pki in.
*/
void cvmx_pki_enable(int node);
/**
* This function disables PKI.
*
* @param node node to disable pki in.
*/
void cvmx_pki_disable(int node);
/**
* This function soft resets PKI.
*
* @param node node to enable pki in.
*/
void cvmx_pki_reset(int node);
/**
* This function sets the clusters in PKI.
*
* @param node node to set clusters in.
*/
int cvmx_pki_setup_clusters(int node);
/**
* This function reads global configuration of PKI block.
*
* @param node Node number.
* @param gbl_cfg Pointer to struct to read global configuration
*/
void cvmx_pki_read_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);
/**
* This function writes global configuration of PKI into hw.
*
* @param node Node number.
* @param gbl_cfg Pointer to struct to global configuration
*/
void cvmx_pki_write_global_config(int node, struct cvmx_pki_global_config *gbl_cfg);
/**
* This function reads per pkind parameters in hardware which defines how
* the incoming packet is processed.
*
* @param node Node number.
* @param pkind PKI supports a large number of incoming interfaces and packets
* arriving on different interfaces or channels may want to be processed
* differently. PKI uses the pkind to determine how the incoming packet
* is processed.
* @param pkind_cfg Pointer to struct conatining pkind configuration read
* from hardware.
*/
int cvmx_pki_read_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);
/**
* This function writes per pkind parameters in hardware which defines how
* the incoming packet is processed.
*
* @param node Node number.
* @param pkind PKI supports a large number of incoming interfaces and packets
* arriving on different interfaces or channels may want to be processed
* differently. PKI uses the pkind to determine how the incoming packet
* is processed.
* @param pkind_cfg Pointer to struct conatining pkind configuration need
* to be written in hardware.
*/
int cvmx_pki_write_pkind_config(int node, int pkind, struct cvmx_pki_pkind_config *pkind_cfg);
/**
* This function reads parameters associated with tag configuration in hardware.
*
* @param node Node number.
* @param style Style to configure tag for.
* @param cluster_mask Mask of clusters to configure the style for.
* @param tag_cfg Pointer to tag configuration struct.
*/
void cvmx_pki_read_tag_config(int node, int style, u64 cluster_mask,
struct cvmx_pki_style_tag_cfg *tag_cfg);
/**
* This function writes/configures parameters associated with tag
* configuration in hardware.
*
* @param node Node number.
* @param style Style to configure tag for.
* @param cluster_mask Mask of clusters to configure the style for.
* @param tag_cfg Pointer to taf configuration struct.
*/
void cvmx_pki_write_tag_config(int node, int style, u64 cluster_mask,
struct cvmx_pki_style_tag_cfg *tag_cfg);
/**
* This function reads parameters associated with style in hardware.
*
* @param node Node number.
* @param style Style to read from.
* @param cluster_mask Mask of clusters style belongs to.
* @param style_cfg Pointer to style config struct.
*/
void cvmx_pki_read_style_config(int node, int style, u64 cluster_mask,
struct cvmx_pki_style_config *style_cfg);
/**
* This function writes/configures parameters associated with style in hardware.
*
* @param node Node number.
* @param style Style to configure.
* @param cluster_mask Mask of clusters to configure the style for.
* @param style_cfg Pointer to style config struct.
*/
void cvmx_pki_write_style_config(int node, u64 style, u64 cluster_mask,
struct cvmx_pki_style_config *style_cfg);
/**
* This function reads qpg entry at specified offset from qpg table
*
* @param node Node number.
* @param offset Offset in qpg table to read from.
* @param qpg_cfg Pointer to structure containing qpg values
*/
int cvmx_pki_read_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);
/**
* This function writes qpg entry at specified offset in qpg table
*
* @param node Node number.
* @param offset Offset in qpg table to write to.
* @param qpg_cfg Pointer to stricture containing qpg values.
*/
void cvmx_pki_write_qpg_entry(int node, int offset, struct cvmx_pki_qpg_config *qpg_cfg);
/**
* This function writes pcam entry at given offset in pcam table in hardware
*
* @param node Node number.
* @param index Offset in pcam table.
* @param cluster_mask Mask of clusters in which to write pcam entry.
* @param input Input keys to pcam match passed as struct.
* @param action PCAM match action passed as struct
*/
int cvmx_pki_pcam_write_entry(int node, int index, u64 cluster_mask,
struct cvmx_pki_pcam_input input, struct cvmx_pki_pcam_action action);
/**
* Configures the channel which will receive backpressure from the specified bpid.
* Each channel listens for backpressure on a specific bpid.
* Each bpid can backpressure multiple channels.
* @param node Node number.
* @param bpid BPID from which channel will receive backpressure.
* @param channel Channel number to receive backpressue.
*/
int cvmx_pki_write_channel_bpid(int node, int channel, int bpid);
/**
* Configures the bpid on which, specified channel will
* assert backpressure.
* Each bpid receives backpressure from auras.
* Multiple auras can backpressure single bpid.
* @param node Node number.
* @param aura Number which will assert backpressure on that bpid.
* @param bpid To assert backpressure on.
*/
int cvmx_pki_write_aura_bpid(int node, int aura, int bpid);
/**
* Enables/Disabled QoS (RED Drop, Tail Drop & backpressure) for the* PKI aura.
*
* @param node Node number
* @param aura To enable/disable QoS on.
* @param ena_red Enable/Disable RED drop between pass and drop level
* 1-enable 0-disable
* @param ena_drop Enable/disable tail drop when max drop level exceeds
* 1-enable 0-disable
* @param ena_bp Enable/Disable asserting backpressure on bpid when
* max DROP level exceeds.
* 1-enable 0-disable
*/
int cvmx_pki_enable_aura_qos(int node, int aura, bool ena_red, bool ena_drop, bool ena_bp);
/**
* This function gives the initial style used by that pkind.
*
* @param node Node number.
* @param pkind PKIND number.
*/
int cvmx_pki_get_pkind_style(int node, int pkind);
/**
* This function sets the wqe buffer mode. First packet data buffer can reside
* either in same buffer as wqe OR it can go in separate buffer. If used the later mode,
* make sure software allocate enough buffers to now have wqe separate from packet data.
*
* @param node Node number.
* @param style Style to configure.
* @param pkt_outside_wqe
* 0 = The packet link pointer will be at word [FIRST_SKIP] immediately
* followed by packet data, in the same buffer as the work queue entry.
* 1 = The packet link pointer will be at word [FIRST_SKIP] in a new
* buffer separate from the work queue entry. Words following the
* WQE in the same cache line will be zeroed, other lines in the
* buffer will not be modified and will retain stale data (from the
* buffers previous use). This setting may decrease the peak PKI
* performance by up to half on small packets.
*/
void cvmx_pki_set_wqe_mode(int node, u64 style, bool pkt_outside_wqe);
/**
* This function sets the Packet mode of all ports and styles to little-endian.
* It Changes write operations of packet data to L2C to
* be in little-endian. Does not change the WQE header format, which is
* properly endian neutral.
*
* @param node Node number.
* @param style Style to configure.
*/
void cvmx_pki_set_little_endian(int node, u64 style);
/**
* Enables/Disables L2 length error check and max & min frame length checks.
*
* @param node Node number.
* @param pknd PKIND to disable error for.
* @param l2len_err L2 length error check enable.
* @param maxframe_err Max frame error check enable.
* @param minframe_err Min frame error check enable.
* 1 -- Enabel err checks
* 0 -- Disable error checks
*/
void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err, bool maxframe_err,
bool minframe_err);
/**
* Enables/Disables fcs check and fcs stripping on the pkind.
*
* @param node Node number.
* @param pknd PKIND to apply settings on.
* @param fcs_chk Enable/disable fcs check.
* 1 -- enable fcs error check.
* 0 -- disable fcs error check.
* @param fcs_strip Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
* 1 -- strip L2 FCS.
* 0 -- Do not strip L2 FCS.
*/
void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip);
/**
* This function shows the qpg table entries, read directly from hardware.
*
* @param node Node number.
* @param num_entry Number of entries to print.
*/
void cvmx_pki_show_qpg_entries(int node, u16 num_entry);
/**
* This function shows the pcam table in raw format read directly from hardware.
*
* @param node Node number.
*/
void cvmx_pki_show_pcam_entries(int node);
/**
* This function shows the valid entries in readable format,
* read directly from hardware.
*
* @param node Node number.
*/
void cvmx_pki_show_valid_pcam_entries(int node);
/**
* This function shows the pkind attributes in readable format,
* read directly from hardware.
* @param node Node number.
* @param pkind PKIND number to print.
*/
void cvmx_pki_show_pkind_attributes(int node, int pkind);
/**
* @INTERNAL
* This function is called by cvmx_helper_shutdown() to extract all FPA buffers
* out of the PKI. After this function completes, all FPA buffers that were
* prefetched by PKI will be in the appropriate FPA pool.
* This functions does not reset the PKI.
* WARNING: It is very important that PKI be reset soon after a call to this function.
*
* @param node Node number.
*/
void __cvmx_pki_free_ptr(int node);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_INTERNAL_PORTS_RANGE__
#define __CVMX_INTERNAL_PORTS_RANGE__
/*
* Allocated a block of internal ports for the specified interface/port
*
* @param interface the interface for which the internal ports are requested
* @param port the index of the port within in the interface for which the internal ports
* are requested.
* @param count the number of internal ports requested
*
* @return 0 on success
* -1 on failure
*/
int cvmx_pko_internal_ports_alloc(int interface, int port, u64 count);
/*
* Free the internal ports associated with the specified interface/port
*
* @param interface the interface for which the internal ports are requested
* @param port the index of the port within in the interface for which the internal ports
* are requested.
*
* @return 0 on success
* -1 on failure
*/
int cvmx_pko_internal_ports_free(int interface, int port);
/*
* Frees up all the allocated internal ports.
*/
void cvmx_pko_internal_ports_range_free_all(void);
void cvmx_pko_internal_ports_range_show(void);
int __cvmx_pko_internal_ports_range_init(void);
#endif

View File

@ -0,0 +1,175 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_PKO3_QUEUE_H__
#define __CVMX_PKO3_QUEUE_H__
/**
* @INTERNAL
*
* Find or allocate global port/dq map table
* which is a named table, contains entries for
* all possible OCI nodes.
*
* The table global pointer is stored in core-local variable
* so that every core will call this function once, on first use.
*/
int __cvmx_pko3_dq_table_setup(void);
/*
* Get the base Descriptor Queue number for an IPD port on the local node
*/
int cvmx_pko3_get_queue_base(int ipd_port);
/*
* Get the number of Descriptor Queues assigned for an IPD port
*/
int cvmx_pko3_get_queue_num(int ipd_port);
/**
* Get L1/Port Queue number assigned to interface port.
*
* @param xiface is interface number.
* @param index is port index.
*/
int cvmx_pko3_get_port_queue(int xiface, int index);
/*
* Configure L3 through L5 Scheduler Queues and Descriptor Queues
*
* The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
* configured one-to-one or many-to-one to a single parent Scheduler
* Queues. The level of the parent SQ is specified in an argument,
* as well as the number of children to attach to the specific parent.
* The children can have fair round-robin or priority-based scheduling
* when multiple children are assigned a single parent.
*
* @param node is the OCI node location for the queues to be configured
* @param parent_level is the level of the parent queue, 2 to 5.
* @param parent_queue is the number of the parent Scheduler Queue
* @param child_base is the number of the first child SQ or DQ to assign to
* @param parent
* @param child_count is the number of consecutive children to assign
* @param stat_prio_count is the priority setting for the children L2 SQs
*
* If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
* relationship with eachother. If <stat_prio_count> is 0, all Ln children
* will be arranged in Weighted-Round-Robin, with the first having the most
* precedence. If <stat_prio_count> is between 1 and 8, it indicates how
* many children will have static priority settings (with the first having
* the most precedence), with the remaining Ln children having WRR scheduling.
*
* @returns 0 on success, -1 on failure.
*
* Note: this function supports the configuration of node-local unit.
*/
int cvmx_pko3_sq_config_children(unsigned int node, unsigned int parent_level,
unsigned int parent_queue, unsigned int child_base,
unsigned int child_count, int stat_prio_count);
/*
* @INTERNAL
* Register a range of Descriptor Queues wth an interface port
*
* This function poulates the DQ-to-IPD translation table
* used by the application to retrieve the DQ range (typically ordered
* by priority) for a given IPD-port, which is either a physical port,
* or a channel on a channelized interface (i.e. ILK).
*
* @param xiface is the physical interface number
* @param index is either a physical port on an interface
* @param or a channel of an ILK interface
* @param dq_base is the first Descriptor Queue number in a consecutive range
* @param dq_count is the number of consecutive Descriptor Queues leading
* @param the same channel or port.
*
* Only a consecurive range of Descriptor Queues can be associated with any
* given channel/port, and usually they are ordered from most to least
* in terms of scheduling priority.
*
* Note: thus function only populates the node-local translation table.
*
* @returns 0 on success, -1 on failure.
*/
int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base, unsigned int dq_count);
/**
* @INTERNAL
*
* Unregister DQs associated with CHAN_E (IPD port)
*/
int __cvmx_pko3_ipd_dq_unregister(int xiface, int index);
/*
* Map channel number in PKO
*
* @param node is to specify the node to which this configuration is applied.
* @param pq_num specifies the Port Queue (i.e. L1) queue number.
* @param l2_l3_q_num specifies L2/L3 queue number.
* @param channel specifies the channel number to map to the queue.
*
* The channel assignment applies to L2 or L3 Shaper Queues depending
* on the setting of channel credit level.
*
* @return returns none.
*/
void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num, unsigned int l2_l3_q_num,
u16 channel);
int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num, unsigned int pq_num);
int cvmx_pko3_port_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,
unsigned int burst_bytes, int adj_bytes);
int cvmx_pko3_dq_cir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,
unsigned int burst_bytes);
int cvmx_pko3_dq_pir_set(unsigned int node, unsigned int pq_num, unsigned long rate_kbips,
unsigned int burst_bytes);
typedef enum {
CVMX_PKO3_SHAPE_RED_STALL,
CVMX_PKO3_SHAPE_RED_DISCARD,
CVMX_PKO3_SHAPE_RED_PASS
} red_action_t;
void cvmx_pko3_dq_red(unsigned int node, unsigned int dq_num, red_action_t red_act,
int8_t len_adjust);
/**
* Macros to deal with short floating point numbers,
* where unsigned exponent, and an unsigned normalized
* mantissa are represented each with a defined field width.
*
*/
#define CVMX_SHOFT_MANT_BITS 8
#define CVMX_SHOFT_EXP_BITS 4
/**
* Convert short-float to an unsigned integer
* Note that it will lose precision.
*/
#define CVMX_SHOFT_TO_U64(m, e) \
((((1ull << CVMX_SHOFT_MANT_BITS) | (m)) << (e)) >> CVMX_SHOFT_MANT_BITS)
/**
* Convert to short-float from an unsigned integer
*/
#define CVMX_SHOFT_FROM_U64(ui, m, e) \
do { \
unsigned long long u; \
unsigned int k; \
k = (1ull << (CVMX_SHOFT_MANT_BITS + 1)) - 1; \
(e) = 0; \
u = (ui) << CVMX_SHOFT_MANT_BITS; \
while ((u) > k) { \
u >>= 1; \
(e)++; \
} \
(m) = u & (k >> 1); \
} while (0);
#define CVMX_SHOFT_MAX() \
CVMX_SHOFT_TO_U64((1 << CVMX_SHOFT_MANT_BITS) - 1, (1 << CVMX_SHOFT_EXP_BITS) - 1)
#define CVMX_SHOFT_MIN() CVMX_SHOFT_TO_U64(0, 0)
#endif /* __CVMX_PKO3_QUEUE_H__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,304 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_QLM_H__
#define __CVMX_QLM_H__
/*
* Interface 0 on the 78xx can be connected to qlm 0 or qlm 2. When interface
* 0 is connected to qlm 0, this macro must be set to 0. When interface 0 is
* connected to qlm 2, this macro must be set to 1.
*/
#define MUX_78XX_IFACE0 0
/*
* Interface 1 on the 78xx can be connected to qlm 1 or qlm 3. When interface
* 1 is connected to qlm 1, this macro must be set to 0. When interface 1 is
* connected to qlm 3, this macro must be set to 1.
*/
#define MUX_78XX_IFACE1 0
/* Uncomment this line to print QLM JTAG state */
/* #define CVMX_QLM_DUMP_STATE 1 */
typedef struct {
const char *name;
int stop_bit;
int start_bit;
} __cvmx_qlm_jtag_field_t;
/**
* Return the number of QLMs supported by the chip
*
* @return Number of QLMs
*/
int cvmx_qlm_get_num(void);
/**
* Return the qlm number based on the interface
*
* @param xiface Interface to look
*/
int cvmx_qlm_interface(int xiface);
/**
* Return the qlm number based for a port in the interface
*
* @param xiface interface to look up
* @param index index in an interface
*
* @return the qlm number based on the xiface
*/
int cvmx_qlm_lmac(int xiface, int index);
/**
* Return if only DLM5/DLM6/DLM5+DLM6 is used by BGX
*
* @param BGX BGX to search for.
*
* @return muxes used 0 = DLM5+DLM6, 1 = DLM5, 2 = DLM6.
*/
int cvmx_qlm_mux_interface(int bgx);
/**
* Return number of lanes for a given qlm
*
* @param qlm QLM block to query
*
* @return Number of lanes
*/
int cvmx_qlm_get_lanes(int qlm);
/**
* Get the QLM JTAG fields based on Octeon model on the supported chips.
*
* @return qlm_jtag_field_t structure
*/
const __cvmx_qlm_jtag_field_t *cvmx_qlm_jtag_get_field(void);
/**
* Get the QLM JTAG length by going through qlm_jtag_field for each
* Octeon model that is supported
*
* @return return the length.
*/
int cvmx_qlm_jtag_get_length(void);
/**
* Initialize the QLM layer
*/
void cvmx_qlm_init(void);
/**
* Get a field in a QLM JTAG chain
*
* @param qlm QLM to get
* @param lane Lane in QLM to get
* @param name String name of field
*
* @return JTAG field value
*/
u64 cvmx_qlm_jtag_get(int qlm, int lane, const char *name);
/**
* Set a field in a QLM JTAG chain
*
* @param qlm QLM to set
* @param lane Lane in QLM to set, or -1 for all lanes
* @param name String name of field
* @param value Value of the field
*/
void cvmx_qlm_jtag_set(int qlm, int lane, const char *name, u64 value);
/**
* Errata G-16094: QLM Gen2 Equalizer Default Setting Change.
* CN68XX pass 1.x and CN66XX pass 1.x QLM tweak. This function tweaks the
* JTAG setting for a QLMs to run better at 5 and 6.25Ghz.
*/
void __cvmx_qlm_speed_tweak(void);
/**
* Errata G-16174: QLM Gen2 PCIe IDLE DAC change.
* CN68XX pass 1.x, CN66XX pass 1.x and CN63XX pass 1.0-2.2 QLM tweak.
* This function tweaks the JTAG setting for a QLMs for PCIe to run better.
*/
void __cvmx_qlm_pcie_idle_dac_tweak(void);
void __cvmx_qlm_pcie_cfg_rxd_set_tweak(int qlm, int lane);
/**
* Get the speed (Gbaud) of the QLM in Mhz.
*
* @param qlm QLM to examine
*
* @return Speed in Mhz
*/
int cvmx_qlm_get_gbaud_mhz(int qlm);
/**
* Get the speed (Gbaud) of the QLM in Mhz on specific node.
*
* @param node Target QLM node
* @param qlm QLM to examine
*
* @return Speed in Mhz
*/
int cvmx_qlm_get_gbaud_mhz_node(int node, int qlm);
enum cvmx_qlm_mode {
CVMX_QLM_MODE_DISABLED = -1,
CVMX_QLM_MODE_SGMII = 1,
CVMX_QLM_MODE_XAUI,
CVMX_QLM_MODE_RXAUI,
CVMX_QLM_MODE_PCIE, /* gen3 / gen2 / gen1 */
CVMX_QLM_MODE_PCIE_1X2, /* 1x2 gen2 / gen1 */
CVMX_QLM_MODE_PCIE_2X1, /* 2x1 gen2 / gen1 */
CVMX_QLM_MODE_PCIE_1X1, /* 1x1 gen2 / gen1 */
CVMX_QLM_MODE_SRIO_1X4, /* 1x4 short / long */
CVMX_QLM_MODE_SRIO_2X2, /* 2x2 short / long */
CVMX_QLM_MODE_SRIO_4X1, /* 4x1 short / long */
CVMX_QLM_MODE_ILK,
CVMX_QLM_MODE_QSGMII,
CVMX_QLM_MODE_SGMII_SGMII,
CVMX_QLM_MODE_SGMII_DISABLED,
CVMX_QLM_MODE_DISABLED_SGMII,
CVMX_QLM_MODE_SGMII_QSGMII,
CVMX_QLM_MODE_QSGMII_QSGMII,
CVMX_QLM_MODE_QSGMII_DISABLED,
CVMX_QLM_MODE_DISABLED_QSGMII,
CVMX_QLM_MODE_QSGMII_SGMII,
CVMX_QLM_MODE_RXAUI_1X2,
CVMX_QLM_MODE_SATA_2X1,
CVMX_QLM_MODE_XLAUI,
CVMX_QLM_MODE_XFI,
CVMX_QLM_MODE_10G_KR,
CVMX_QLM_MODE_40G_KR4,
CVMX_QLM_MODE_PCIE_1X8, /* 1x8 gen3 / gen2 / gen1 */
CVMX_QLM_MODE_RGMII_SGMII,
CVMX_QLM_MODE_RGMII_XFI,
CVMX_QLM_MODE_RGMII_10G_KR,
CVMX_QLM_MODE_RGMII_RXAUI,
CVMX_QLM_MODE_RGMII_XAUI,
CVMX_QLM_MODE_RGMII_XLAUI,
CVMX_QLM_MODE_RGMII_40G_KR4,
CVMX_QLM_MODE_MIXED, /* BGX2 is mixed mode, DLM5(SGMII) & DLM6(XFI) */
CVMX_QLM_MODE_SGMII_2X1, /* Configure BGX2 separate for DLM5 & DLM6 */
CVMX_QLM_MODE_10G_KR_1X2, /* Configure BGX2 separate for DLM5 & DLM6 */
CVMX_QLM_MODE_XFI_1X2, /* Configure BGX2 separate for DLM5 & DLM6 */
CVMX_QLM_MODE_RGMII_SGMII_1X1, /* Configure BGX2, applies to DLM5 */
CVMX_QLM_MODE_RGMII_SGMII_2X1, /* Configure BGX2, applies to DLM6 */
CVMX_QLM_MODE_RGMII_10G_KR_1X1, /* Configure BGX2, applies to DLM6 */
CVMX_QLM_MODE_RGMII_XFI_1X1, /* Configure BGX2, applies to DLM6 */
CVMX_QLM_MODE_SDL, /* RMAC Pipe */
CVMX_QLM_MODE_CPRI, /* RMAC */
CVMX_QLM_MODE_OCI
};
enum cvmx_gmx_inf_mode {
CVMX_GMX_INF_MODE_DISABLED = 0,
CVMX_GMX_INF_MODE_SGMII = 1, /* Other interface can be SGMII or QSGMII */
CVMX_GMX_INF_MODE_QSGMII = 2, /* Other interface can be SGMII or QSGMII */
CVMX_GMX_INF_MODE_RXAUI = 3, /* Only interface 0, interface 1 must be DISABLED */
};
/**
* Eye diagram captures are stored in the following structure
*/
typedef struct {
int width; /* Width in the x direction (time) */
int height; /* Height in the y direction (voltage) */
u32 data[64][128]; /* Error count at location, saturates as max */
} cvmx_qlm_eye_t;
/**
* These apply to DLM1 and DLM2 if its not in SATA mode
* Manual refers to lanes as follows:
* DML 0 lane 0 == GSER0 lane 0
* DML 0 lane 1 == GSER0 lane 1
* DML 1 lane 2 == GSER1 lane 0
* DML 1 lane 3 == GSER1 lane 1
* DML 2 lane 4 == GSER2 lane 0
* DML 2 lane 5 == GSER2 lane 1
*/
enum cvmx_pemx_cfg_mode {
CVMX_PEM_MD_GEN2_2LANE = 0, /* Valid for PEM0(DLM1), PEM1(DLM2) */
CVMX_PEM_MD_GEN2_1LANE = 1, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */
CVMX_PEM_MD_GEN2_4LANE = 2, /* Valid for PEM0(DLM1-2) */
/* Reserved */
CVMX_PEM_MD_GEN1_2LANE = 4, /* Valid for PEM0(DLM1), PEM1(DLM2) */
CVMX_PEM_MD_GEN1_1LANE = 5, /* Valid for PEM0(DLM1.0), PEM1(DLM1.1,DLM2.0), PEM2(DLM2.1) */
CVMX_PEM_MD_GEN1_4LANE = 6, /* Valid for PEM0(DLM1-2) */
/* Reserved */
};
/*
* Read QLM and return mode.
*/
enum cvmx_qlm_mode cvmx_qlm_get_mode(int qlm);
enum cvmx_qlm_mode cvmx_qlm_get_mode_cn78xx(int node, int qlm);
enum cvmx_qlm_mode cvmx_qlm_get_dlm_mode(int dlm_mode, int interface);
void __cvmx_qlm_set_mult(int qlm, int baud_mhz, int old_multiplier);
void cvmx_qlm_display_registers(int qlm);
int cvmx_qlm_measure_clock(int qlm);
/**
* Measure the reference clock of a QLM on a multi-node setup
*
* @param node node to measure
* @param qlm QLM to measure
*
* @return Clock rate in Hz
*/
int cvmx_qlm_measure_clock_node(int node, int qlm);
/*
* Perform RX equalization on a QLM
*
* @param node Node the QLM is on
* @param qlm QLM to perform RX equalization on
* @param lane Lane to use, or -1 for all lanes
*
* @return Zero on success, negative if any lane failed RX equalization
*/
int __cvmx_qlm_rx_equalization(int node, int qlm, int lane);
/**
* Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer
* Training may not update PHY Tx Taps. This function is not static
* so we can share it with BGX KR
*
* @param node Node to apply errata workaround
* @param qlm QLM to apply errata workaround
* @param lane Lane to apply the errata
*/
int cvmx_qlm_gser_errata_27882(int node, int qlm, int lane);
void cvmx_qlm_gser_errata_25992(int node, int qlm);
#ifdef CVMX_DUMP_GSER
/**
* Dump GSER configuration for node 0
*/
int cvmx_dump_gser_config(unsigned int gser);
/**
* Dump GSER status for node 0
*/
int cvmx_dump_gser_status(unsigned int gser);
/**
* Dump GSER configuration
*/
int cvmx_dump_gser_config_node(unsigned int node, unsigned int gser);
/**
* Dump GSER status
*/
int cvmx_dump_gser_status_node(unsigned int node, unsigned int gser);
#endif
int cvmx_qlm_eye_display(int node, int qlm, int qlm_lane, int format, const cvmx_qlm_eye_t *eye);
void cvmx_prbs_process_cmd(int node, int qlm, int mode);
#endif /* __CVMX_QLM_H__ */

View File

@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <mach/cvmx-address.h>
/* General defines */
#define CVMX_MAX_CORES 48
@ -26,48 +27,117 @@
#define MAX_CORE_TADS 8
#define CAST_ULL(v) ((unsigned long long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
#define CAST64(v) ((long long)(long)(v))
/* Regs */
#define CVMX_CIU_PP_RST 0x0001010000000100ULL
#define CVMX_CIU3_NMI 0x0001010000000160ULL
#define CVMX_CIU_FUSE 0x00010100000001a0ULL
#define CVMX_CIU_NMI 0x0001070000000718ULL
#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
#define MIO_FUS_RCMD_PEND BIT_ULL(12)
#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
#define MIO_FUS_RCMD_PEND BIT_ULL(12)
#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
#define CVMX_RDHWR(result, regstr) \
asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
#define CVMX_RDHWRNV(result, regstr) \
asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
#define CVMX_POP(result, input) \
asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
#define CVMX_SYNCW \
asm volatile ("syncw\nsyncw\n" : : : "memory")
#define CVMX_SYNC asm volatile("sync\n" : : : "memory")
#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
#define CVMX_SYNCS asm volatile("syncs\n" : : : "memory")
#define CVMX_SYNCWS asm volatile("syncws\n" : : : "memory")
#define CVMX_CACHE_LINE_SIZE 128 // In bytes
#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
#define CVMX_CACHE_LINE_ALIGNED __aligned(CVMX_CACHE_LINE_SIZE)
#define CVMX_SYNCIOBDMA asm volatile("synciobdma" : : : "memory")
#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
/*
* The macros cvmx_likely and cvmx_unlikely use the
* __builtin_expect GCC operation to control branch
* probabilities for a conditional. For example, an "if"
* statement in the code that will almost always be
* executed should be written as "if (cvmx_likely(...))".
* If the "else" section of an if statement is more
* probable, use "if (cvmx_unlikey(...))".
*/
#define cvmx_likely(x) __builtin_expect(!!(x), 1)
#define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, to_us) \
({ \
int result; \
do { \
u64 done = get_timer(0); \
type c; \
while (1) { \
c.u64 = csr_rd(address); \
if ((c.s.field)op(value)) { \
result = 0; \
break; \
} else if (get_timer(done) > ((to_us) / 1000)) { \
result = -1; \
break; \
} else \
udelay(100); \
} \
} while (0); \
result; \
})
#define CVMX_WAIT_FOR_FIELD64_NODE(node, address, type, field, op, value, to_us) \
({ \
int result; \
do { \
u64 done = get_timer(0); \
type c; \
while (1) { \
c.u64 = csr_rd(address); \
if ((c.s.field)op(value)) { \
result = 0; \
break; \
} else if (get_timer(done) > ((to_us) / 1000)) { \
result = -1; \
break; \
} else \
udelay(100); \
} \
} while (0); \
result; \
})
/* ToDo: Currently only node = 0 supported */
#define cvmx_get_node_num() 0
static inline u64 csr_rd_node(int node, u64 addr)
{
void __iomem *base;
@ -76,11 +146,24 @@ static inline u64 csr_rd_node(int node, u64 addr)
return ioread64(base);
}
static inline u32 csr_rd32_node(int node, u64 addr)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
return ioread32(base);
}
static inline u64 csr_rd(u64 addr)
{
return csr_rd_node(0, addr);
}
static inline u32 csr_rd32(u64 addr)
{
return csr_rd32_node(0, addr);
}
static inline void csr_wr_node(int node, u64 addr, u64 val)
{
void __iomem *base;
@ -89,11 +172,24 @@ static inline void csr_wr_node(int node, u64 addr, u64 val)
iowrite64(val, base);
}
static inline void csr_wr32_node(int node, u64 addr, u32 val)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
iowrite32(val, base);
}
static inline void csr_wr(u64 addr, u64 val)
{
csr_wr_node(0, addr, val);
}
static inline void csr_wr32(u64 addr, u32 val)
{
csr_wr32_node(0, addr, val);
}
/*
* We need to use the volatile access here, otherwise the IO accessor
* functions might swap the bytes
@ -103,21 +199,173 @@ static inline u64 cvmx_read64_uint64(u64 addr)
return *(volatile u64 *)addr;
}
static inline s64 cvmx_read64_int64(u64 addr)
{
return *(volatile s64 *)addr;
}
static inline void cvmx_write64_uint64(u64 addr, u64 val)
{
*(volatile u64 *)addr = val;
}
static inline void cvmx_write64_int64(u64 addr, s64 val)
{
*(volatile s64 *)addr = val;
}
static inline u32 cvmx_read64_uint32(u64 addr)
{
return *(volatile u32 *)addr;
}
static inline s32 cvmx_read64_int32(u64 addr)
{
return *(volatile s32 *)addr;
}
static inline void cvmx_write64_uint32(u64 addr, u32 val)
{
*(volatile u32 *)addr = val;
}
static inline void cvmx_write64_int32(u64 addr, s32 val)
{
*(volatile s32 *)addr = val;
}
static inline void cvmx_write64_int16(u64 addr, s16 val)
{
*(volatile s16 *)addr = val;
}
static inline void cvmx_write64_uint16(u64 addr, u16 val)
{
*(volatile u16 *)addr = val;
}
static inline void cvmx_write64_int8(u64 addr, int8_t val)
{
*(volatile int8_t *)addr = val;
}
static inline void cvmx_write64_uint8(u64 addr, u8 val)
{
*(volatile u8 *)addr = val;
}
static inline s16 cvmx_read64_int16(u64 addr)
{
return *(volatile s16 *)addr;
}
static inline u16 cvmx_read64_uint16(u64 addr)
{
return *(volatile u16 *)addr;
}
static inline int8_t cvmx_read64_int8(u64 addr)
{
return *(volatile int8_t *)addr;
}
static inline u8 cvmx_read64_uint8(u64 addr)
{
return *(volatile u8 *)addr;
}
static inline void cvmx_send_single(u64 data)
{
cvmx_write64_uint64(CVMX_IOBDMA_ORDERED_IO_ADDR, data);
}
/**
* Perform a 64-bit write to an IO address
*
* @param io_addr I/O address to write to
* @param val 64-bit value to write
*/
static inline void cvmx_write_io(u64 io_addr, u64 val)
{
cvmx_write64_uint64(io_addr, val);
}
/**
* Builds a memory address for I/O based on the Major and Sub DID.
*
* @param major_did 5 bit major did
* @param sub_did 3 bit sub did
* @return I/O base address
*/
static inline u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
{
return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
}
/**
* Builds a bit mask given the required size in bits.
*
* @param bits Number of bits in the mask
* @return The mask
*/
static inline u64 cvmx_build_mask(u64 bits)
{
if (bits == 64)
return -1;
return ~((~0x0ull) << bits);
}
/**
* Extract bits out of a number
*
* @param input Number to extract from
* @param lsb Starting bit, least significant (0-63)
* @param width Width in bits (1-64)
*
* @return Extracted number
*/
static inline u64 cvmx_bit_extract(u64 input, int lsb, int width)
{
u64 result = input >> lsb;
result &= cvmx_build_mask(width);
return result;
}
/**
* Perform mask and shift to place the supplied value into
* the supplied bit rage.
*
* Example: cvmx_build_bits(39,24,value)
* <pre>
* 6 5 4 3 3 2 1
* 3 5 7 9 1 3 5 7 0
* +-------+-------+-------+-------+-------+-------+-------+------+
* 000000000000000000000000___________value000000000000000000000000
* </pre>
*
* @param high_bit Highest bit value can occupy (inclusive) 0-63
* @param low_bit Lowest bit value can occupy inclusive 0-high_bit
* @param value Value to use
* @return Value masked and shifted
*/
static inline u64 cvmx_build_bits(u64 high_bit, u64 low_bit, u64 value)
{
return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
}
static inline u64 cvmx_mask_to_localaddr(u64 addr)
{
return (addr & 0xffffffffff);
}
static inline u64 cvmx_addr_on_node(u64 node, u64 addr)
{
return (node << 40) | cvmx_mask_to_localaddr(addr);
}
static inline void *cvmx_phys_to_ptr(u64 addr)
{
return (void *)CKSEG0ADDR(addr);
@ -141,4 +389,53 @@ static inline unsigned int cvmx_get_core_num(void)
return core_num;
}
/**
* Node-local number of the core on which the program is currently running.
*
* @return core number on local node
*/
static inline unsigned int cvmx_get_local_core_num(void)
{
unsigned int core_num, core_mask;
CVMX_RDHWRNV(core_num, 0);
/* note that MAX_CORES may not be power of 2 */
core_mask = (1 << CVMX_NODE_NO_SHIFT) - 1;
return core_num & core_mask;
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
*
* @param val 32 bit value to count set bits in
*
* @return Number of bits set
*/
static inline u32 cvmx_pop(u32 val)
{
u32 pop;
CVMX_POP(pop, val);
return pop;
}
#define cvmx_read_csr_node(node, addr) csr_rd(addr)
#define cvmx_write_csr_node(node, addr, val) csr_wr(addr, val)
#define cvmx_printf printf
#define cvmx_vprintf vprintf
#if defined(DEBUG)
void cvmx_warn(const char *format, ...) __printf(1, 2);
#else
void cvmx_warn(const char *format, ...);
#endif
#define cvmx_warn_if(expression, format, ...) \
if (expression) \
cvmx_warn(format, ##__VA_ARGS__)
#endif /* __CVMX_REGS_H__ */

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_RST_DEFS_H__
#define __CVMX_RST_DEFS_H__
#define CVMX_RST_CTLX(offset) (0x0001180006001640ull + ((offset) & 3) * 8)
#define CVMX_RST_SOFT_PRSTX(offset) (0x00011800060016C0ull + ((offset) & 3) * 8)
/**
* cvmx_rst_ctl#
*/
union cvmx_rst_ctlx {
u64 u64;
struct cvmx_rst_ctlx_s {
u64 reserved_10_63 : 54;
u64 prst_link : 1;
u64 rst_done : 1;
u64 rst_link : 1;
u64 host_mode : 1;
u64 reserved_4_5 : 2;
u64 rst_drv : 1;
u64 rst_rcv : 1;
u64 rst_chip : 1;
u64 rst_val : 1;
} s;
struct cvmx_rst_ctlx_s cn70xx;
struct cvmx_rst_ctlx_s cn70xxp1;
struct cvmx_rst_ctlx_s cn73xx;
struct cvmx_rst_ctlx_s cn78xx;
struct cvmx_rst_ctlx_s cn78xxp1;
struct cvmx_rst_ctlx_s cnf75xx;
};
typedef union cvmx_rst_ctlx cvmx_rst_ctlx_t;
/**
* cvmx_rst_soft_prst#
*/
union cvmx_rst_soft_prstx {
u64 u64;
struct cvmx_rst_soft_prstx_s {
u64 reserved_1_63 : 63;
u64 soft_prst : 1;
} s;
struct cvmx_rst_soft_prstx_s cn70xx;
struct cvmx_rst_soft_prstx_s cn70xxp1;
struct cvmx_rst_soft_prstx_s cn73xx;
struct cvmx_rst_soft_prstx_s cn78xx;
struct cvmx_rst_soft_prstx_s cn78xxp1;
struct cvmx_rst_soft_prstx_s cnf75xx;
};
typedef union cvmx_rst_soft_prstx cvmx_rst_soft_prstx_t;
/**
* cvmx_rst_soft_rst
*/
union cvmx_rst_soft_rst {
u64 u64;
struct cvmx_rst_soft_rst_s {
u64 reserved_1_63 : 63;
u64 soft_rst : 1;
} s;
struct cvmx_rst_soft_rst_s cn70xx;
struct cvmx_rst_soft_rst_s cn70xxp1;
struct cvmx_rst_soft_rst_s cn73xx;
struct cvmx_rst_soft_rst_s cn78xx;
struct cvmx_rst_soft_rst_s cn78xxp1;
struct cvmx_rst_soft_rst_s cnf75xx;
};
typedef union cvmx_rst_soft_rst cvmx_rst_soft_rst_t;
#endif

View File

@ -0,0 +1,311 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_SATA_DEFS_H__
#define __CVMX_SATA_DEFS_H__
#define CVMX_SATA_UCTL_CTL (0x000118006C000000ull)
#define CVMX_SATA_UCTL_SHIM_CFG (0x000118006C0000E8ull)
#define CVMX_SATA_UCTL_BIST_STATUS (0x000118006C000008ull)
#define CVMX_SATA_UAHC_GBL_PI (0x00016C000000000Cull)
#define CVMX_SATA_UAHC_GBL_TIMER1MS (0x00016C00000000E0ull)
#define CVMX_SATA_UAHC_GBL_CAP (0x00016C0000000000ull)
#define CVMX_SATA_UAHC_PX_CMD(offset) (0x00016C0000000118ull + ((offset) & 1) * 128)
#define CVMX_SATA_UAHC_PX_SCTL(offset) (0x00016C000000012Cull + ((offset) & 1) * 128)
#define CVMX_SATA_UAHC_PX_SERR(offset) (0x00016C0000000130ull + ((offset) & 1) * 128)
#define CVMX_SATA_UAHC_PX_IS(offset) (0x00016C0000000110ull + ((offset) & 1) * 128)
#define CVMX_SATA_UAHC_PX_SSTS(offset) (0x00016C0000000128ull + ((offset) & 1) * 128)
#define CVMX_SATA_UAHC_PX_TFD(offset) (0x00016C0000000120ull + ((offset) & 1) * 128)
/**
* cvmx_sata_uctl_ctl
*
* This register controls clocks, resets, power, and BIST for the SATA.
*
* Accessible always.
*
* Reset by IOI reset.
*/
union cvmx_sata_uctl_ctl {
u64 u64;
struct cvmx_sata_uctl_ctl_s {
u64 clear_bist : 1;
u64 start_bist : 1;
u64 reserved_31_61 : 31;
u64 a_clk_en : 1;
u64 a_clk_byp_sel : 1;
u64 a_clkdiv_rst : 1;
u64 reserved_27_27 : 1;
u64 a_clkdiv_sel : 3;
u64 reserved_5_23 : 19;
u64 csclk_en : 1;
u64 reserved_2_3 : 2;
u64 sata_uahc_rst : 1;
u64 sata_uctl_rst : 1;
} s;
struct cvmx_sata_uctl_ctl_s cn70xx;
struct cvmx_sata_uctl_ctl_s cn70xxp1;
struct cvmx_sata_uctl_ctl_s cn73xx;
};
typedef union cvmx_sata_uctl_ctl cvmx_sata_uctl_ctl_t;
/**
* cvmx_sata_uctl_bist_status
*
* Results from BIST runs of SATA's memories.
* Wait for NDONE==0, then look at defect indication.
*
* Accessible always.
*
* Reset by IOI reset.
*/
union cvmx_sata_uctl_bist_status {
u64 u64;
struct cvmx_sata_uctl_bist_status_s {
u64 reserved_42_63 : 22;
u64 uctl_xm_r_bist_ndone : 1;
u64 uctl_xm_w_bist_ndone : 1;
u64 reserved_36_39 : 4;
u64 uahc_p0_rxram_bist_ndone : 1;
u64 uahc_p1_rxram_bist_ndone : 1;
u64 uahc_p0_txram_bist_ndone : 1;
u64 uahc_p1_txram_bist_ndone : 1;
u64 reserved_10_31 : 22;
u64 uctl_xm_r_bist_status : 1;
u64 uctl_xm_w_bist_status : 1;
u64 reserved_4_7 : 4;
u64 uahc_p0_rxram_bist_status : 1;
u64 uahc_p1_rxram_bist_status : 1;
u64 uahc_p0_txram_bist_status : 1;
u64 uahc_p1_txram_bist_status : 1;
} s;
struct cvmx_sata_uctl_bist_status_s cn70xx;
struct cvmx_sata_uctl_bist_status_s cn70xxp1;
struct cvmx_sata_uctl_bist_status_s cn73xx;
};
typedef union cvmx_sata_uctl_bist_status cvmx_sata_uctl_bist_status_t;
/**
* cvmx_sata_uctl_shim_cfg
*
* This register allows configuration of various shim (UCTL) features.
*
* Fields XS_NCB_OOB_* are captured when there are no outstanding OOB errors indicated in INTSTAT
* and a new OOB error arrives.
*
* Fields XS_BAD_DMA_* are captured when there are no outstanding DMA errors indicated in INTSTAT
* and a new DMA error arrives.
*
* Accessible only when SATA_UCTL_CTL[A_CLK_EN].
*
* Reset by IOI reset or SATA_UCTL_CTL[SATA_UCTL_RST].
*/
union cvmx_sata_uctl_shim_cfg {
u64 u64;
struct cvmx_sata_uctl_shim_cfg_s {
u64 xs_ncb_oob_wrn : 1;
u64 reserved_60_62 : 3;
u64 xs_ncb_oob_osrc : 12;
u64 xm_bad_dma_wrn : 1;
u64 reserved_44_46 : 3;
u64 xm_bad_dma_type : 4;
u64 reserved_14_39 : 26;
u64 dma_read_cmd : 2;
u64 reserved_11_11 : 1;
u64 dma_write_cmd : 1;
u64 dma_endian_mode : 2;
u64 reserved_2_7 : 6;
u64 csr_endian_mode : 2;
} s;
struct cvmx_sata_uctl_shim_cfg_cn70xx {
u64 xs_ncb_oob_wrn : 1;
u64 reserved_57_62 : 6;
u64 xs_ncb_oob_osrc : 9;
u64 xm_bad_dma_wrn : 1;
u64 reserved_44_46 : 3;
u64 xm_bad_dma_type : 4;
u64 reserved_13_39 : 27;
u64 dma_read_cmd : 1;
u64 reserved_10_11 : 2;
u64 dma_endian_mode : 2;
u64 reserved_2_7 : 6;
u64 csr_endian_mode : 2;
} cn70xx;
struct cvmx_sata_uctl_shim_cfg_cn70xx cn70xxp1;
struct cvmx_sata_uctl_shim_cfg_s cn73xx;
};
typedef union cvmx_sata_uctl_shim_cfg cvmx_sata_uctl_shim_cfg_t;
/**
* cvmx_sata_uahc_gbl_cap
*
* See AHCI specification v1.3 section 3.1
*
*/
union cvmx_sata_uahc_gbl_cap {
u32 u32;
struct cvmx_sata_uahc_gbl_cap_s {
u32 s64a : 1;
u32 sncq : 1;
u32 ssntf : 1;
u32 smps : 1;
u32 sss : 1;
u32 salp : 1;
u32 sal : 1;
u32 sclo : 1;
u32 iss : 4;
u32 snzo : 1;
u32 sam : 1;
u32 spm : 1;
u32 fbss : 1;
u32 pmd : 1;
u32 ssc : 1;
u32 psc : 1;
u32 ncs : 5;
u32 cccs : 1;
u32 ems : 1;
u32 sxs : 1;
u32 np : 5;
} s;
struct cvmx_sata_uahc_gbl_cap_s cn70xx;
struct cvmx_sata_uahc_gbl_cap_s cn70xxp1;
struct cvmx_sata_uahc_gbl_cap_s cn73xx;
};
typedef union cvmx_sata_uahc_gbl_cap cvmx_sata_uahc_gbl_cap_t;
/**
* cvmx_sata_uahc_p#_sctl
*/
union cvmx_sata_uahc_px_sctl {
u32 u32;
struct cvmx_sata_uahc_px_sctl_s {
u32 reserved_10_31 : 22;
u32 ipm : 2;
u32 reserved_6_7 : 2;
u32 spd : 2;
u32 reserved_3_3 : 1;
u32 det : 3;
} s;
struct cvmx_sata_uahc_px_sctl_s cn70xx;
struct cvmx_sata_uahc_px_sctl_s cn70xxp1;
struct cvmx_sata_uahc_px_sctl_s cn73xx;
};
typedef union cvmx_sata_uahc_px_sctl cvmx_sata_uahc_px_sctl_t;
/**
* cvmx_sata_uahc_p#_cmd
*/
union cvmx_sata_uahc_px_cmd {
u32 u32;
struct cvmx_sata_uahc_px_cmd_s {
u32 icc : 4;
u32 asp : 1;
u32 alpe : 1;
u32 dlae : 1;
u32 atapi : 1;
u32 apste : 1;
u32 fbscp : 1;
u32 esp : 1;
u32 cpd : 1;
u32 mpsp : 1;
u32 hpcp : 1;
u32 pma : 1;
u32 cps : 1;
u32 cr : 1;
u32 fr : 1;
u32 mpss : 1;
u32 ccs : 5;
u32 reserved_5_7 : 3;
u32 fre : 1;
u32 clo : 1;
u32 pod : 1;
u32 sud : 1;
u32 st : 1;
} s;
struct cvmx_sata_uahc_px_cmd_s cn70xx;
struct cvmx_sata_uahc_px_cmd_s cn70xxp1;
struct cvmx_sata_uahc_px_cmd_s cn73xx;
};
typedef union cvmx_sata_uahc_px_cmd cvmx_sata_uahc_px_cmd_t;
/**
* cvmx_sata_uahc_gbl_pi
*
* See AHCI specification v1.3 section 3.1.
*
*/
union cvmx_sata_uahc_gbl_pi {
u32 u32;
struct cvmx_sata_uahc_gbl_pi_s {
u32 reserved_2_31 : 30;
u32 pi : 2;
} s;
struct cvmx_sata_uahc_gbl_pi_s cn70xx;
struct cvmx_sata_uahc_gbl_pi_s cn70xxp1;
struct cvmx_sata_uahc_gbl_pi_s cn73xx;
};
typedef union cvmx_sata_uahc_gbl_pi cvmx_sata_uahc_gbl_pi_t;
/**
* cvmx_sata_uahc_p#_ssts
*/
union cvmx_sata_uahc_px_ssts {
u32 u32;
struct cvmx_sata_uahc_px_ssts_s {
u32 reserved_12_31 : 20;
u32 ipm : 4;
u32 spd : 4;
u32 det : 4;
} s;
struct cvmx_sata_uahc_px_ssts_s cn70xx;
struct cvmx_sata_uahc_px_ssts_s cn70xxp1;
struct cvmx_sata_uahc_px_ssts_s cn73xx;
};
typedef union cvmx_sata_uahc_px_ssts cvmx_sata_uahc_px_ssts_t;
/**
* cvmx_sata_uahc_p#_tfd
*/
union cvmx_sata_uahc_px_tfd {
u32 u32;
struct cvmx_sata_uahc_px_tfd_s {
u32 reserved_16_31 : 16;
u32 tferr : 8;
u32 sts : 8;
} s;
struct cvmx_sata_uahc_px_tfd_s cn70xx;
struct cvmx_sata_uahc_px_tfd_s cn70xxp1;
struct cvmx_sata_uahc_px_tfd_s cn73xx;
};
typedef union cvmx_sata_uahc_px_tfd cvmx_sata_uahc_px_tfd_t;
/**
* cvmx_sata_uahc_gbl_timer1ms
*/
union cvmx_sata_uahc_gbl_timer1ms {
u32 u32;
struct cvmx_sata_uahc_gbl_timer1ms_s {
u32 reserved_20_31 : 12;
u32 timv : 20;
} s;
struct cvmx_sata_uahc_gbl_timer1ms_s cn70xx;
struct cvmx_sata_uahc_gbl_timer1ms_s cn70xxp1;
struct cvmx_sata_uahc_gbl_timer1ms_s cn73xx;
};
typedef union cvmx_sata_uahc_gbl_timer1ms cvmx_sata_uahc_gbl_timer1ms_t;
#endif

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* This file provides support for the processor local scratch memory.
* Scratch memory is byte addressable - all addresses are byte addresses.
*/
#ifndef __CVMX_SCRATCH_H__
#define __CVMX_SCRATCH_H__
/* Note: This define must be a long, not a long long in order to compile
without warnings for both 32bit and 64bit. */
#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
/* Scratch line for LMTST/LMTDMA on Octeon3 models */
#ifdef CVMX_CAVIUM_OCTEON3
#define CVMX_PKO_LMTLINE 2ull
#endif
/**
* Reads an 8 bit value from the processor local scratchpad memory.
*
* @param address byte address to read from
*
* @return value read
*/
static inline u8 cvmx_scratch_read8(u64 address)
{
return *CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 16 bit value from the processor local scratchpad memory.
*
* @param address byte address to read from
*
* @return value read
*/
static inline u16 cvmx_scratch_read16(u64 address)
{
return *CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 32 bit value from the processor local scratchpad memory.
*
* @param address byte address to read from
*
* @return value read
*/
static inline u32 cvmx_scratch_read32(u64 address)
{
return *CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 64 bit value from the processor local scratchpad memory.
*
* @param address byte address to read from
*
* @return value read
*/
static inline u64 cvmx_scratch_read64(u64 address)
{
return *CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address);
}
/**
* Writes an 8 bit value to the processor local scratchpad memory.
*
* @param address byte address to write to
* @param value value to write
*/
static inline void cvmx_scratch_write8(u64 address, u64 value)
{
*CASTPTR(volatile u8, CVMX_SCRATCH_BASE + address) = (u8)value;
}
/**
* Writes a 32 bit value to the processor local scratchpad memory.
*
* @param address byte address to write to
* @param value value to write
*/
static inline void cvmx_scratch_write16(u64 address, u64 value)
{
*CASTPTR(volatile u16, CVMX_SCRATCH_BASE + address) = (u16)value;
}
/**
* Writes a 16 bit value to the processor local scratchpad memory.
*
* @param address byte address to write to
* @param value value to write
*/
static inline void cvmx_scratch_write32(u64 address, u64 value)
{
*CASTPTR(volatile u32, CVMX_SCRATCH_BASE + address) = (u32)value;
}
/**
* Writes a 64 bit value to the processor local scratchpad memory.
*
* @param address byte address to write to
* @param value value to write
*/
static inline void cvmx_scratch_write64(u64 address, u64 value)
{
*CASTPTR(volatile u64, CVMX_SCRATCH_BASE + address) = value;
}
#endif /* __CVMX_SCRATCH_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,360 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* Configuration and status register (CSR) type definitions for
* Octeon smix.
*/
#ifndef __CVMX_SMIX_DEFS_H__
#define __CVMX_SMIX_DEFS_H__
static inline u64 CVMX_SMIX_CLK(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0001180000001818ull + (offset) * 256;
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0001180000003818ull + (offset) * 128;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 0x0001180000003818ull + (offset) * 128;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x0001180000003818ull + (offset) * 128;
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
return 0x0001180000003818ull + (offset) * 128;
}
return 0x0001180000003818ull + (offset) * 128;
}
static inline u64 CVMX_SMIX_CMD(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0001180000001800ull + (offset) * 256;
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0001180000003800ull + (offset) * 128;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 0x0001180000003800ull + (offset) * 128;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x0001180000003800ull + (offset) * 128;
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
return 0x0001180000003800ull + (offset) * 128;
}
return 0x0001180000003800ull + (offset) * 128;
}
static inline u64 CVMX_SMIX_EN(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0001180000001820ull + (offset) * 256;
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0001180000003820ull + (offset) * 128;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 0x0001180000003820ull + (offset) * 128;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x0001180000003820ull + (offset) * 128;
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
return 0x0001180000003820ull + (offset) * 128;
}
return 0x0001180000003820ull + (offset) * 128;
}
static inline u64 CVMX_SMIX_RD_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0001180000001810ull + (offset) * 256;
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0001180000003810ull + (offset) * 128;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 0x0001180000003810ull + (offset) * 128;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x0001180000003810ull + (offset) * 128;
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
return 0x0001180000003810ull + (offset) * 128;
}
return 0x0001180000003810ull + (offset) * 128;
}
static inline u64 CVMX_SMIX_WR_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0001180000001808ull + (offset) * 256;
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0001180000003808ull + (offset) * 128;
if (OCTEON_IS_MODEL(OCTEON_CN78XX))
return 0x0001180000003808ull + (offset) * 128;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return 0x0001180000003808ull + (offset) * 128;
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
return 0x0001180000003808ull + (offset) * 128;
}
return 0x0001180000003808ull + (offset) * 128;
}
/**
* cvmx_smi#_clk
*
* This register determines the SMI timing characteristics.
* If software wants to change SMI CLK timing parameters ([SAMPLE]/[SAMPLE_HI]), software
* must delay the SMI_()_CLK CSR write by at least 512 coprocessor-clock cycles after the
* previous SMI operation is finished.
*/
union cvmx_smix_clk {
u64 u64;
struct cvmx_smix_clk_s {
u64 reserved_25_63 : 39;
u64 mode : 1;
u64 reserved_21_23 : 3;
u64 sample_hi : 5;
u64 sample_mode : 1;
u64 reserved_14_14 : 1;
u64 clk_idle : 1;
u64 preamble : 1;
u64 sample : 4;
u64 phase : 8;
} s;
struct cvmx_smix_clk_cn30xx {
u64 reserved_21_63 : 43;
u64 sample_hi : 5;
u64 sample_mode : 1;
u64 reserved_14_14 : 1;
u64 clk_idle : 1;
u64 preamble : 1;
u64 sample : 4;
u64 phase : 8;
} cn30xx;
struct cvmx_smix_clk_cn30xx cn31xx;
struct cvmx_smix_clk_cn30xx cn38xx;
struct cvmx_smix_clk_cn30xx cn38xxp2;
struct cvmx_smix_clk_s cn50xx;
struct cvmx_smix_clk_s cn52xx;
struct cvmx_smix_clk_s cn52xxp1;
struct cvmx_smix_clk_s cn56xx;
struct cvmx_smix_clk_s cn56xxp1;
struct cvmx_smix_clk_cn30xx cn58xx;
struct cvmx_smix_clk_cn30xx cn58xxp1;
struct cvmx_smix_clk_s cn61xx;
struct cvmx_smix_clk_s cn63xx;
struct cvmx_smix_clk_s cn63xxp1;
struct cvmx_smix_clk_s cn66xx;
struct cvmx_smix_clk_s cn68xx;
struct cvmx_smix_clk_s cn68xxp1;
struct cvmx_smix_clk_s cn70xx;
struct cvmx_smix_clk_s cn70xxp1;
struct cvmx_smix_clk_s cn73xx;
struct cvmx_smix_clk_s cn78xx;
struct cvmx_smix_clk_s cn78xxp1;
struct cvmx_smix_clk_s cnf71xx;
struct cvmx_smix_clk_s cnf75xx;
};
typedef union cvmx_smix_clk cvmx_smix_clk_t;
/**
* cvmx_smi#_cmd
*
* This register forces a read or write command to the PHY. Write operations to this register
* create SMI transactions. Software will poll (depending on the transaction type).
*/
union cvmx_smix_cmd {
u64 u64;
struct cvmx_smix_cmd_s {
u64 reserved_18_63 : 46;
u64 phy_op : 2;
u64 reserved_13_15 : 3;
u64 phy_adr : 5;
u64 reserved_5_7 : 3;
u64 reg_adr : 5;
} s;
struct cvmx_smix_cmd_cn30xx {
u64 reserved_17_63 : 47;
u64 phy_op : 1;
u64 reserved_13_15 : 3;
u64 phy_adr : 5;
u64 reserved_5_7 : 3;
u64 reg_adr : 5;
} cn30xx;
struct cvmx_smix_cmd_cn30xx cn31xx;
struct cvmx_smix_cmd_cn30xx cn38xx;
struct cvmx_smix_cmd_cn30xx cn38xxp2;
struct cvmx_smix_cmd_s cn50xx;
struct cvmx_smix_cmd_s cn52xx;
struct cvmx_smix_cmd_s cn52xxp1;
struct cvmx_smix_cmd_s cn56xx;
struct cvmx_smix_cmd_s cn56xxp1;
struct cvmx_smix_cmd_cn30xx cn58xx;
struct cvmx_smix_cmd_cn30xx cn58xxp1;
struct cvmx_smix_cmd_s cn61xx;
struct cvmx_smix_cmd_s cn63xx;
struct cvmx_smix_cmd_s cn63xxp1;
struct cvmx_smix_cmd_s cn66xx;
struct cvmx_smix_cmd_s cn68xx;
struct cvmx_smix_cmd_s cn68xxp1;
struct cvmx_smix_cmd_s cn70xx;
struct cvmx_smix_cmd_s cn70xxp1;
struct cvmx_smix_cmd_s cn73xx;
struct cvmx_smix_cmd_s cn78xx;
struct cvmx_smix_cmd_s cn78xxp1;
struct cvmx_smix_cmd_s cnf71xx;
struct cvmx_smix_cmd_s cnf75xx;
};
typedef union cvmx_smix_cmd cvmx_smix_cmd_t;
/**
* cvmx_smi#_en
*
* Enables the SMI interface.
*
*/
union cvmx_smix_en {
u64 u64;
struct cvmx_smix_en_s {
u64 reserved_1_63 : 63;
u64 en : 1;
} s;
struct cvmx_smix_en_s cn30xx;
struct cvmx_smix_en_s cn31xx;
struct cvmx_smix_en_s cn38xx;
struct cvmx_smix_en_s cn38xxp2;
struct cvmx_smix_en_s cn50xx;
struct cvmx_smix_en_s cn52xx;
struct cvmx_smix_en_s cn52xxp1;
struct cvmx_smix_en_s cn56xx;
struct cvmx_smix_en_s cn56xxp1;
struct cvmx_smix_en_s cn58xx;
struct cvmx_smix_en_s cn58xxp1;
struct cvmx_smix_en_s cn61xx;
struct cvmx_smix_en_s cn63xx;
struct cvmx_smix_en_s cn63xxp1;
struct cvmx_smix_en_s cn66xx;
struct cvmx_smix_en_s cn68xx;
struct cvmx_smix_en_s cn68xxp1;
struct cvmx_smix_en_s cn70xx;
struct cvmx_smix_en_s cn70xxp1;
struct cvmx_smix_en_s cn73xx;
struct cvmx_smix_en_s cn78xx;
struct cvmx_smix_en_s cn78xxp1;
struct cvmx_smix_en_s cnf71xx;
struct cvmx_smix_en_s cnf75xx;
};
typedef union cvmx_smix_en cvmx_smix_en_t;
/**
* cvmx_smi#_rd_dat
*
* This register contains the data in a read operation.
*
*/
union cvmx_smix_rd_dat {
u64 u64;
struct cvmx_smix_rd_dat_s {
u64 reserved_18_63 : 46;
u64 pending : 1;
u64 val : 1;
u64 dat : 16;
} s;
struct cvmx_smix_rd_dat_s cn30xx;
struct cvmx_smix_rd_dat_s cn31xx;
struct cvmx_smix_rd_dat_s cn38xx;
struct cvmx_smix_rd_dat_s cn38xxp2;
struct cvmx_smix_rd_dat_s cn50xx;
struct cvmx_smix_rd_dat_s cn52xx;
struct cvmx_smix_rd_dat_s cn52xxp1;
struct cvmx_smix_rd_dat_s cn56xx;
struct cvmx_smix_rd_dat_s cn56xxp1;
struct cvmx_smix_rd_dat_s cn58xx;
struct cvmx_smix_rd_dat_s cn58xxp1;
struct cvmx_smix_rd_dat_s cn61xx;
struct cvmx_smix_rd_dat_s cn63xx;
struct cvmx_smix_rd_dat_s cn63xxp1;
struct cvmx_smix_rd_dat_s cn66xx;
struct cvmx_smix_rd_dat_s cn68xx;
struct cvmx_smix_rd_dat_s cn68xxp1;
struct cvmx_smix_rd_dat_s cn70xx;
struct cvmx_smix_rd_dat_s cn70xxp1;
struct cvmx_smix_rd_dat_s cn73xx;
struct cvmx_smix_rd_dat_s cn78xx;
struct cvmx_smix_rd_dat_s cn78xxp1;
struct cvmx_smix_rd_dat_s cnf71xx;
struct cvmx_smix_rd_dat_s cnf75xx;
};
typedef union cvmx_smix_rd_dat cvmx_smix_rd_dat_t;
/**
* cvmx_smi#_wr_dat
*
* This register provides the data for a write operation.
*
*/
union cvmx_smix_wr_dat {
u64 u64;
struct cvmx_smix_wr_dat_s {
u64 reserved_18_63 : 46;
u64 pending : 1;
u64 val : 1;
u64 dat : 16;
} s;
struct cvmx_smix_wr_dat_s cn30xx;
struct cvmx_smix_wr_dat_s cn31xx;
struct cvmx_smix_wr_dat_s cn38xx;
struct cvmx_smix_wr_dat_s cn38xxp2;
struct cvmx_smix_wr_dat_s cn50xx;
struct cvmx_smix_wr_dat_s cn52xx;
struct cvmx_smix_wr_dat_s cn52xxp1;
struct cvmx_smix_wr_dat_s cn56xx;
struct cvmx_smix_wr_dat_s cn56xxp1;
struct cvmx_smix_wr_dat_s cn58xx;
struct cvmx_smix_wr_dat_s cn58xxp1;
struct cvmx_smix_wr_dat_s cn61xx;
struct cvmx_smix_wr_dat_s cn63xx;
struct cvmx_smix_wr_dat_s cn63xxp1;
struct cvmx_smix_wr_dat_s cn66xx;
struct cvmx_smix_wr_dat_s cn68xx;
struct cvmx_smix_wr_dat_s cn68xxp1;
struct cvmx_smix_wr_dat_s cn70xx;
struct cvmx_smix_wr_dat_s cn70xxp1;
struct cvmx_smix_wr_dat_s cn73xx;
struct cvmx_smix_wr_dat_s cn78xx;
struct cvmx_smix_wr_dat_s cn78xxp1;
struct cvmx_smix_wr_dat_s cnf71xx;
struct cvmx_smix_wr_dat_s cnf75xx;
};
typedef union cvmx_smix_wr_dat cvmx_smix_wr_dat_t;
#endif

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_SRIOMAINTX_DEFS_H__
#define __CVMX_SRIOMAINTX_DEFS_H__
static inline u64 CVMX_SRIOMAINTX_PORT_0_CTL2(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
return 0x0000010000000154ull + (offset) * 0x100000000ull;
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
return 0x0000000000000154ull;
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return 0x0000000000000154ull + (offset) * 0x100000000ull;
}
return 0x0000010000000154ull + (offset) * 0x100000000ull;
}
/**
* cvmx_sriomaint#_port_0_ctl2
*
* These registers are accessed when a local processor or an external
* device wishes to examine the port baudrate information. The automatic
* baud rate feature is not available on this device. The SUP_* and ENB_*
* fields are set directly by the SRIO()_STATUS_REG[SPD] bits as a
* reference but otherwise have no effect.
*
* WARNING!! Writes to this register will reinitialize the SRIO link.
*/
union cvmx_sriomaintx_port_0_ctl2 {
u32 u32;
struct cvmx_sriomaintx_port_0_ctl2_s {
u32 sel_baud : 4;
u32 baud_sup : 1;
u32 baud_enb : 1;
u32 sup_125g : 1;
u32 enb_125g : 1;
u32 sup_250g : 1;
u32 enb_250g : 1;
u32 sup_312g : 1;
u32 enb_312g : 1;
u32 sub_500g : 1;
u32 enb_500g : 1;
u32 sup_625g : 1;
u32 enb_625g : 1;
u32 reserved_2_15 : 14;
u32 tx_emph : 1;
u32 emph_en : 1;
} s;
struct cvmx_sriomaintx_port_0_ctl2_s cn63xx;
struct cvmx_sriomaintx_port_0_ctl2_s cn63xxp1;
struct cvmx_sriomaintx_port_0_ctl2_s cn66xx;
struct cvmx_sriomaintx_port_0_ctl2_s cnf75xx;
};
typedef union cvmx_sriomaintx_port_0_ctl2 cvmx_sriomaintx_port_0_ctl2_t;
#endif

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_SRIOX_DEFS_H__
#define __CVMX_SRIOX_DEFS_H__
#define CVMX_SRIOX_STATUS_REG(offset) (0x00011800C8000100ull + ((offset) & 3) * 0x1000000ull)
/**
* cvmx_srio#_status_reg
*
* The SRIO field displays if the port has been configured for SRIO operation. This register
* can be read regardless of whether the SRIO is selected or being reset. Although some other
* registers can be accessed while the ACCESS bit is zero (see individual registers for details),
* the majority of SRIO registers and all the SRIOMAINT registers can be used only when the
* ACCESS bit is asserted.
*
* This register is reset by the coprocessor-clock reset.
*/
union cvmx_sriox_status_reg {
u64 u64;
struct cvmx_sriox_status_reg_s {
u64 reserved_9_63 : 55;
u64 host : 1;
u64 spd : 4;
u64 run_type : 2;
u64 access : 1;
u64 srio : 1;
} s;
struct cvmx_sriox_status_reg_cn63xx {
u64 reserved_2_63 : 62;
u64 access : 1;
u64 srio : 1;
} cn63xx;
struct cvmx_sriox_status_reg_cn63xx cn63xxp1;
struct cvmx_sriox_status_reg_cn63xx cn66xx;
struct cvmx_sriox_status_reg_s cnf75xx;
};
typedef union cvmx_sriox_status_reg cvmx_sriox_status_reg_t;
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More