- Octeon TX: Add NAND driver (Suneel)
- Octeon TX: Add NIC driver driver (Suneel)
- Octeon TX2: Add NIC driver driver (Suneel)
- Armada 8040: Add iEi Puzzle-M80 board support (Luka)
- Armada A37xx SPI: Add support for CS-GPIO (George)
- Espressobin: Use Linux model/compatible strings (Andre)
- Espressobin: Add armada-3720-espressobin-emmc.dts from Linux (Andre)
- Armada A37xx: Small cleanup of config header (Pali)
This commit is contained in:
Tom Rini 2020-10-14 13:51:56 -04:00
commit 0f35d96bfd
46 changed files with 14918 additions and 179 deletions

View File

@ -202,6 +202,7 @@ dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
dtb-$(CONFIG_ARCH_MVEBU) += \
armada-3720-db.dtb \
armada-3720-espressobin.dtb \
armada-3720-espressobin-emmc.dtb \
armada-3720-turris-mox.dtb \
armada-3720-uDPU.dtb \
armada-375-db.dtb \
@ -218,6 +219,7 @@ dtb-$(CONFIG_ARCH_MVEBU) += \
armada-8040-clearfog-gt-8k.dtb \
armada-8040-db.dtb \
armada-8040-mcbin.dtb \
armada-8040-puzzle-m801.dtb \
armada-xp-crs305-1g-4s.dtb \
armada-xp-crs305-1g-4s-bit.dtb \
armada-xp-crs326-24g-2s.dtb \

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Globalscale Marvell ESPRESSOBin Board with eMMC
* Copyright (C) 2018 Marvell
*
* Romain Perier <romain.perier@free-electrons.com>
* Konstantin Porotchkin <kostap@marvell.com>
*
*/
/*
* Schematic available at http://espressobin.net/wp-content/uploads/2017/08/ESPRESSObin_V5_Schematics.pdf
*/
/dts-v1/;
#include "armada-3720-espressobin.dtsi"
/ {
model = "Globalscale Marvell ESPRESSOBin Board (eMMC)";
compatible = "globalscale,espressobin-emmc", "globalscale,espressobin",
"marvell,armada3720", "marvell,armada3710";
};
/* U11 */
&sdhci1 {
non-removable;
bus-width = <8>;
mmc-ddr-1_8v;
mmc-hs400-1_8v;
marvell,xenon-emmc;
marvell,xenon-tun-count = <9>;
marvell,pad-type = "fixed-1-8v";
pinctrl-names = "default";
pinctrl-0 = <&mmc_pins>;
status = "okay";
#address-cells = <1>;
#size-cells = <0>;
mmccard: mmccard@0 {
compatible = "mmc-card";
reg = <0>;
};
};

View File

@ -1,170 +1,20 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Marvell Armada 3720 community board
* (ESPRESSOBin)
* Device Tree file for Globalscale Marvell ESPRESSOBin Board
* Copyright (C) 2016 Marvell
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Konstantin Porotchkin <kostap@marvell.com>
* Romain Perier <romain.perier@free-electrons.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Or, alternatively
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Schematic available at http://espressobin.net/wp-content/uploads/2017/08/ESPRESSObin_V5_Schematics.pdf
*/
/dts-v1/;
#include "armada-372x.dtsi"
#include "armada-3720-espressobin.dtsi"
/ {
model = "Marvell Armada 3720 Community Board ESPRESSOBin";
compatible = "marvell,armada-3720-espressobin", "marvell,armada3720", "marvell,armada3710";
chosen {
stdout-path = "serial0:115200n8";
};
aliases {
ethernet0 = &eth0;
i2c0 = &i2c0;
spi0 = &spi0;
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
};
vcc_sd_reg0: regulator@0 {
compatible = "regulator-gpio";
regulator-name = "vcc_sd0";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-type = "voltage";
states = <1800000 0x1
3300000 0x0>;
gpios = <&gpionb 4 GPIO_ACTIVE_HIGH>;
};
};
&comphy {
max-lanes = <3>;
phy0 {
phy-type = <PHY_TYPE_USB3_HOST0>;
phy-speed = <PHY_SPEED_5G>;
};
phy1 {
phy-type = <PHY_TYPE_PEX0>;
phy-speed = <PHY_SPEED_2_5G>;
};
phy2 {
phy-type = <PHY_TYPE_SATA0>;
phy-speed = <PHY_SPEED_5G>;
};
};
&eth0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
phy-mode = "rgmii";
phy_addr = <0x1>;
fixed-link {
speed = <1000>;
full-duplex;
};
};
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
status = "okay";
};
/* CON3 */
&sata {
status = "okay";
};
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&sdio_pins>;
bus-width = <4>;
cd-gpios = <&gpionb 3 GPIO_ACTIVE_LOW>;
vqmmc-supply = <&vcc_sd_reg0>;
status = "okay";
};
&spi0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_quad_pins>;
spi-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "st,m25p128", "jedec,spi-nor";
reg = <0>; /* Chip select 0 */
spi-max-frequency = <50000000>;
m25p,fast-read;
};
};
/* Exported on the micro USB connector CON32 through an FTDI */
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
status = "okay";
};
/* CON29 */
&usb2 {
status = "okay";
};
/* CON31 */
&usb3 {
status = "okay";
};
&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
status = "okay";
model = "Globalscale Marvell ESPRESSOBin Board";
compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
};

View File

@ -0,0 +1,167 @@
/*
* Device Tree file for Marvell Armada 3720 community board
* (ESPRESSOBin)
* Copyright (C) 2016 Marvell
*
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Konstantin Porotchkin <kostap@marvell.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
* licensing only applies to this file, and not this project as a
* whole.
*
* a) This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Or, alternatively
*
* b) Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "armada-372x.dtsi"
/ {
chosen {
stdout-path = "serial0:115200n8";
};
aliases {
ethernet0 = &eth0;
i2c0 = &i2c0;
spi0 = &spi0;
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
};
vcc_sd_reg0: regulator@0 {
compatible = "regulator-gpio";
regulator-name = "vcc_sd0";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-type = "voltage";
states = <1800000 0x1
3300000 0x0>;
gpios = <&gpionb 4 GPIO_ACTIVE_HIGH>;
};
};
&comphy {
max-lanes = <3>;
phy0 {
phy-type = <PHY_TYPE_USB3_HOST0>;
phy-speed = <PHY_SPEED_5G>;
};
phy1 {
phy-type = <PHY_TYPE_PEX0>;
phy-speed = <PHY_SPEED_2_5G>;
};
phy2 {
phy-type = <PHY_TYPE_SATA0>;
phy-speed = <PHY_SPEED_5G>;
};
};
&eth0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
phy-mode = "rgmii";
phy_addr = <0x1>;
fixed-link {
speed = <1000>;
full-duplex;
};
};
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
status = "okay";
};
/* CON3 */
&sata {
status = "okay";
};
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&sdio_pins>;
bus-width = <4>;
cd-gpios = <&gpionb 3 GPIO_ACTIVE_LOW>;
vqmmc-supply = <&vcc_sd_reg0>;
status = "okay";
};
&spi0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_quad_pins>;
spi-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "st,m25p128", "jedec,spi-nor";
reg = <0>; /* Chip select 0 */
spi-max-frequency = <50000000>;
m25p,fast-read;
};
};
/* Exported on the micro USB connector CON32 through an FTDI */
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
status = "okay";
};
/* CON29 */
&usb2 {
status = "okay";
};
/* CON31 */
&usb3 {
status = "okay";
};
&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
status = "okay";
};

View File

@ -0,0 +1,389 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Marvell International Ltd.
* Copyright (C) 2020 Sartura Ltd.
*/
#include "armada-8040.dtsi" /* include SoC device tree */
/ {
model = "iEi-Puzzle-M801";
compatible = "marvell,armada8040-puzzle-m801",
"marvell,armada8040";
chosen {
stdout-path = "serial0:115200n8";
};
aliases {
i2c0 = &i2c0;
i2c1 = &cpm_i2c0;
i2c2 = &cpm_i2c1;
i2c3 = &i2c_switch;
spi0 = &spi0;
gpio0 = &ap_gpio0;
gpio1 = &cpm_gpio0;
gpio2 = &cpm_gpio1;
gpio3 = &sfpplus_gpio;
};
memory@00000000 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
simple-bus {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <0>;
reg_usb3h0_vbus: usb3-vbus0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&cpm_xhci_vbus_pins>;
regulator-name = "reg-usb3h0-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
startup-delay-us = <500000>;
enable-active-high;
regulator-always-on;
regulator-boot-on;
gpio = <&cpm_gpio1 15 GPIO_ACTIVE_HIGH>; /* GPIO[47] */
};
};
};
&i2c0 {
status = "okay";
clock-frequency = <100000>;
rtc@32 {
compatible = "epson,rx8010";
reg = <0x32>;
};
};
&uart0 {
status = "okay";
};
&ap_pinctl {
/*
* MPP Bus:
* AP SPI0 [0-3]
* AP I2C [4-5]
* AP GPIO [6]
* AP UART 1 RX/TX [7-8]
* AP GPIO [9-10]
* AP GPIO [12]
* UART0 [11,19]
*/
/* 0 1 2 3 4 5 6 7 8 9 */
pin-func = < 3 3 3 3 3 3 3 3 3 0
0 3 0 0 0 0 0 0 0 3 >;
};
&cpm_pinctl {
/*
* MPP Bus:
* [0-31] = 0xff: Keep default CP0_shared_pins:
* [11] CLKOUT_MPP_11 (out)
* [23] LINK_RD_IN_CP2CP (in)
* [25] CLKOUT_MPP_25 (out)
* [29] AVS_FB_IN_CP2CP (in)
* [32,34] SMI
* [33] MSS power down
* [35-38] CP0 I2C1 and I2C0
* [39] MSS CKE Enable
* [40,41] CP0 UART1 TX/RX
* [42,43] XSMI (controls two 10G phys)
* [47] USB VBUS EN
* [48] FAN PWM
* [49] 10G port 1 interrupt
* [50] 10G port 0 interrupt
* [51] 2.5G SFP TX fault
* [52] PCIe reset out
* [53] 2.5G SFP mode
* [54] 2.5G SFP LOS
* [55] Micro SD card detect
* [56-61] Micro SD
* [62] CP1 SFI SFP FAULT
*/
/* 0 1 2 3 4 5 6 7 8 9 */
pin-func = < 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0 7 0xa 7 2 2 2 2 0xa
7 7 8 8 0 0 0 0 0 0
0 0 0 0 0 0 0xe 0xe 0xe 0xe
0xe 0xe 0 >;
cpm_xhci_vbus_pins: cpm-xhci-vbus-pins {
marvell,pins = < 47 >;
marvell,function = <0>;
};
cpm_pcie_reset_pins: cpm-pcie-reset-pins {
marvell,pins = < 52 >;
marvell,function = <0>;
};
};
&cpm_sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&cpm_sdhci_pins>;
bus-width= <4>;
status = "okay";
};
&cpm_pcie0 {
num-lanes = <1>;
pinctrl-names = "default";
pinctrl-0 = <&cpm_pcie_reset_pins>;
marvell,reset-gpio = <&cpm_gpio1 20 GPIO_ACTIVE_LOW>; /* GPIO[52] */
status = "okay";
};
&cpm_i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&cpm_i2c0_pins>;
status = "okay";
clock-frequency = <100000>;
sfpplus_gpio: gpio@21 {
compatible = "nxp,pca9555";
reg = <0x21>;
gpio-controller;
#gpio-cells = <2>;
};
};
&cpm_i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&cpm_i2c1_pins>;
status = "okay";
clock-frequency = <100000>;
i2c_switch: i2c-switch@70 {
compatible = "nxp,pca9544";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
};
};
&cpm_sata0 {
status = "okay";
};
&cpm_ethernet {
pinctrl-names = "default";
status = "okay";
};
&cpm_mdio {
status = "okay";
cpm_ge_phy0: ethernet-phy@1 {
reg = <0>;
};
cpm_ge_phy1: ethernet-phy@2 {
reg = <1>;
};
};
&cpm_eth0 {
status = "okay";
phy-mode = "sfi";
};
&cpm_eth1 {
status = "okay";
phy-mode = "sgmii";
phy = <&cpm_ge_phy0>;
};
&cpm_eth2 {
status = "okay";
phy-mode = "sgmii";
phy = <&cpm_ge_phy1>;
};
&cpm_comphy {
/*
* CP0 Serdes Configuration:
* Lane 0: PCIe0 (x1)
* Lane 1: SGMII2
* Lane 2: SATA0
* Lane 3: SGMII1
* Lane 4: SFI (10G)
* Lane 5: SATA1
*/
phy0 {
phy-type = <PHY_TYPE_PEX0>;
};
phy1 {
phy-type = <PHY_TYPE_SGMII2>;
phy-speed = <PHY_SPEED_1_25G>;
};
phy2 {
phy-type = <PHY_TYPE_SATA0>;
};
phy3 {
phy-type = <PHY_TYPE_SGMII1>;
phy-speed = <PHY_SPEED_1_25G>;
};
phy4 {
phy-type = <PHY_TYPE_SFI>;
};
phy5 {
phy-type = <PHY_TYPE_SATA1>;
};
};
&cps_mdio {
status = "okay";
cps_ge_phy0: ethernet-phy@3 {
reg = <1>;
};
cps_ge_phy1: ethernet-phy@4 {
reg = <0>;
};
};
&cps_pcie0 {
num-lanes = <2>;
pinctrl-names = "default";
status = "okay";
};
&cps_usb3_0 {
vbus-supply = <&reg_usb3h0_vbus>;
status = "okay";
};
&cps_utmi0 {
status = "okay";
};
&cps_ethernet {
status = "okay";
};
&cps_eth0 {
status = "okay";
phy-mode = "sfi";
};
&cps_eth1 {
status = "okay";
phy = <&cps_ge_phy0>;
phy-mode = "sgmii";
};
&cps_eth2 {
status = "okay";
phy = <&cps_ge_phy1>;
phy-mode = "sgmii";
};
&cps_pinctl {
/*
* MPP Bus:
* [0-5] TDM
* [6,7] CP1_UART 0
* [8] CP1 10G SFP LOS
* [9] CP1 10G PHY RESET
* [10] CP1 10G SFP TX Disable
* [11] CP1 10G SFP Mode
* [12] SPI1 CS1n
* [13] SPI1 MISO (TDM and SPI ROM shared)
* [14] SPI1 CS0n
* [15] SPI1 MOSI (TDM and SPI ROM shared)
* [16] SPI1 CLK (TDM and SPI ROM shared)
* [24] CP1 2.5G SFP TX Disable
* [26] CP0 10G SFP TX Fault
* [27] CP0 10G SFP Mode
* [28] CP0 10G SFP LOS
* [29] CP0 10G SFP TX Disable
* [30] USB Over current indication
* [31] 10G Port 0 phy reset
* [32-62] = 0xff: Keep default CP1_shared_pins:
*/
/* 0 1 2 3 4 5 6 7 8 9 */
pin-func = < 0x4 0x4 0x4 0x4 0x4 0x4 0x8 0x8 0x0 0x0
0x0 0x0 0x3 0x3 0x3 0x3 0x3 0xff 0xff 0xff
0xff 0xff 0xff 0xff 0x0 0xff 0x0 0x0 0x0 0x0
0x0 0x0 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff 0xff
0xff 0xff 0xff>;
};
&spi0 {
status = "okay";
spi-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <10000000>;
partitions {
compatible = "fixed-partitions";
#address-cells = <1>;
#size-cells = <1>;
partition@u-boot {
reg = <0x00000000 0x001f0000>;
label = "u-boot";
};
partition@u-boot-env {
reg = <0x001f0000 0x00010000>;
label = "u-boot-env";
};
partition@ubi1 {
reg = <0x00200000 0x03f00000>;
label = "ubi1";
};
partition@ubi2 {
reg = <0x04100000 0x03f00000>;
label = "ubi2";
};
};
};
};
&cps_comphy {
/*
* CP1 Serdes Configuration:
* Lane 0: PCIe0 (x2)
* Lane 1: PCIe0 (x2)
* Lane 2: USB HOST 0
* Lane 3: SGMII1
* Lane 4: SFI (10G)
* Lane 5: SGMII2
*/
phy0 {
phy-type = <PHY_TYPE_PEX0>;
};
phy1 {
phy-type = <PHY_TYPE_PEX0>;
};
phy2 {
phy-type = <PHY_TYPE_USB3_HOST0>;
};
phy3 {
phy-type = <PHY_TYPE_SGMII1>;
phy-speed = <PHY_SPEED_1_25G>;
};
phy4 {
phy-type = <PHY_TYPE_SFI>;
};
phy5 {
phy-type = <PHY_TYPE_SGMII2>;
phy-speed = <PHY_SPEED_1_25G>;
};
};

View File

@ -88,14 +88,14 @@ int board_late_init(void)
if (env_get("fdtfile"))
return 0;
if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
if (!of_machine_is_compatible("globalscale,espressobin"))
return 0;
/* If the memory controller has been configured for DDR4, we're running on v7 */
ddr4 = ((readl(A3700_CH0_MC_CTRL2_REG) >> A3700_MC_CTRL2_SDRAM_TYPE_OFFS)
& A3700_MC_CTRL2_SDRAM_TYPE_MASK) == A3700_MC_CTRL2_SDRAM_TYPE_DDR4;
emmc = of_machine_is_compatible("marvell,armada-3720-espressobin-emmc");
emmc = of_machine_is_compatible("globalscale,espressobin-emmc");
if (ddr4 && emmc)
env_set("fdtfile", "marvell/armada-3720-espressobin-v7-emmc.dtb");
@ -248,7 +248,7 @@ static int mii_multi_chip_mode_write(struct mii_dev *bus, int dev_smi_addr,
/* Bring-up board-specific network stuff */
int board_network_enable(struct mii_dev *bus)
{
if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
if (!of_machine_is_compatible("globalscale,espressobin"))
return 0;
/*
@ -300,7 +300,7 @@ int ft_board_setup(void *blob, struct bd_info *bd)
int part_off;
/* Fill SPI MTD partitions for Linux kernel on Espressobin */
if (!of_machine_is_compatible("marvell,armada-3720-espressobin"))
if (!of_machine_is_compatible("globalscale,espressobin"))
return 0;
spi_off = fdt_node_offset_by_compatible(blob, -1, "jedec,spi-nor");

View File

@ -10,3 +10,9 @@ MACCHIATOBin BOARD
M: Konstantin Porotchkin <kostap@marvell.com>
S: Maintained
F: configs/mvebu_mcbin-88f8040_defconfig
Puzzle-M801 BOARD
M: Luka Kovacic <luka.kovacic@sartura.hr>
S: Maintained
F: configs/mvebu_puzzle-m801-88f8040_defconfig
F: arch/arm/dts/armada-8040-puzzle-m801.dts

View File

@ -34,6 +34,17 @@ DECLARE_GLOBAL_DATA_PTR;
#define I2C_IO_REG_CL ((1 << I2C_IO_REG_0_USB_H0_CL) | \
(1 << I2C_IO_REG_0_USB_H1_CL))
/*
* Information specific to the iEi Puzzle-M801 board.
*/
/* Internal configuration registers */
#define CP1_CONF_REG_BASE 0xf4440000
#define CONF_REG_MPP0 0x0
#define CONF_REG_MPP1 0x4
#define CONF_REG_MPP2 0x8
#define CONF_REG_MPP3 0xC
static int usb_enabled = 0;
/* Board specific xHCI dis-/enable code */
@ -141,7 +152,14 @@ int board_xhci_enable(fdt_addr_t base)
int board_early_init_f(void)
{
/* Nothing to do (yet), perhaps later some pin-muxing etc */
/* Initialize some platform specific memory locations */
if (of_machine_is_compatible("marvell,armada8040-puzzle-m801")) {
/* MPP setup */
writel(0x00444444, CP1_CONF_REG_BASE + CONF_REG_MPP0);
writel(0x00000000, CP1_CONF_REG_BASE + CONF_REG_MPP1);
writel(0x00000000, CP1_CONF_REG_BASE + CONF_REG_MPP2);
writel(0x08888000, CP1_CONF_REG_BASE + CONF_REG_MPP3);
}
return 0;
}

View File

@ -0,0 +1,91 @@
CONFIG_ARM=y
CONFIG_ARCH_CPU_INIT=y
CONFIG_ARCH_MVEBU=y
CONFIG_SYS_TEXT_BASE=0x00000000
CONFIG_SYS_MALLOC_F_LEN=0x2000
CONFIG_TARGET_MVEBU_ARMADA_8K=y
CONFIG_ENV_SIZE=0x10000
CONFIG_ENV_SECT_SIZE=0x10000
CONFIG_ENV_OFFSET=0x1F0000
CONFIG_NR_DRAM_BANKS=2
CONFIG_DEBUG_UART_BASE=0xf0512000
CONFIG_DEBUG_UART_CLOCK=200000000
CONFIG_DEBUG_UART=y
CONFIG_AHCI=y
CONFIG_DISTRO_DEFAULTS=y
# CONFIG_SYS_MALLOC_CLEAR_ON_INIT is not set
CONFIG_USE_PREBOOT=y
CONFIG_SYS_CONSOLE_INFO_QUIET=y
# CONFIG_DISPLAY_CPUINFO is not set
# CONFIG_DISPLAY_BOARDINFO is not set
CONFIG_DISPLAY_BOARDINFO_LATE=y
CONFIG_AUTOBOOT_KEYED=y
CONFIG_AUTOBOOT_PROMPT="Autoboot in %d seconds, to stop use 's' key\n"
CONFIG_AUTOBOOT_STOP_STR="s"
CONFIG_AUTOBOOT_KEYED_CTRLC=y
CONFIG_ARCH_EARLY_INIT_R=y
CONFIG_BOARD_EARLY_INIT_F=y
# CONFIG_EFI_LOADER is not set
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_GPIO=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MMC=y
CONFIG_CMD_PCI=y
CONFIG_CMD_SPI=y
CONFIG_CMD_USB=y
# CONFIG_CMD_SETEXPR is not set
CONFIG_CMD_TFTPPUT=y
CONFIG_CMD_CACHE=y
CONFIG_CMD_TIME=y
CONFIG_CMD_MVEBU_BUBT=y
CONFIG_CMD_REGULATOR=y
CONFIG_CMD_EXT4_WRITE=y
CONFIG_MAC_PARTITION=y
CONFIG_DEFAULT_DEVICE_TREE="armada-8040-puzzle-m801"
CONFIG_ENV_IS_IN_SPI_FLASH=y
CONFIG_SYS_RELOC_GD_ENV_ADDR=y
CONFIG_NET_RANDOM_ETHADDR=y
CONFIG_AHCI_MVEBU=y
CONFIG_DM_GPIO=y
CONFIG_DM_PCA953X=y
CONFIG_DM_I2C=y
CONFIG_SYS_I2C_MVTWSI=y
CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_DM_RTC=y
CONFIG_RTC_RX8010SJ=y
CONFIG_MISC=y
CONFIG_DM_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_XENON=y
CONFIG_SF_DEFAULT_MODE=0
CONFIG_SPI_FLASH_SFDP_SUPPORT=y
CONFIG_SPI_FLASH_BAR=y
CONFIG_SPI_FLASH_MACRONIX=y
# CONFIG_SPI_FLASH_SPANSION is not set
# CONFIG_SPI_FLASH_STMICRO is not set
# CONFIG_SPI_FLASH_WINBOND is not set
CONFIG_PHY_MARVELL=y
CONFIG_PHY_GIGE=y
CONFIG_MVPP2=y
CONFIG_NVME=y
CONFIG_PCI=y
CONFIG_DM_PCI=y
CONFIG_PCIE_DW_MVEBU=y
CONFIG_MVEBU_COMPHY_SUPPORT=y
CONFIG_PINCTRL=y
CONFIG_PINCTRL_ARMADA_8K=y
CONFIG_DM_REGULATOR_FIXED=y
CONFIG_DEBUG_UART_SHIFT=2
CONFIG_DEBUG_UART_ANNOUNCE=y
CONFIG_SYS_NS16550=y
CONFIG_KIRKWOOD_SPI=y
CONFIG_USB=y
CONFIG_DM_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_HOST_ETHER is not set
# CONFIG_USB_ETHER_ASIX is not set
# CONFIG_USB_ETHER_MCS7830 is not set
# CONFIG_USB_ETHER_RTL8152 is not set
# CONFIG_USB_ETHER_SMSC95XX is not set

View File

@ -43,8 +43,11 @@ Build Procedure
In order to prevent this, the required device-tree MUST be set during compilation.
All device-tree files are located in ./arch/arm/dts/ folder.
For other DB boards (MacchiatoBin, EspressoBin and 3700 DB board) compile u-boot with
just default device-tree from defconfig using:
For the EspressoBin board with populated eMMC device use
# make DEVICE_TREE=armada-3720-espressobin-emmc
For other DB boards (MacchiatoBin, EspressoBin without soldered eMMC and 3700 DB board)
compile u-boot with just default device-tree from defconfig using:
# make

View File

@ -291,6 +291,22 @@ config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
This flag prevent U-boot reconfigure NAND flash controller and reuse
the NAND timing from 1st stage bootloader.
config NAND_OCTEONTX
bool "Support for OcteonTX NAND controller"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand flash controller hardware found on the OcteonTX
processors.
config NAND_OCTEONTX_HW_ECC
bool "Support Hardware ECC for OcteonTX NAND controller"
depends on NAND_OCTEONTX
default y
help
This enables Hardware BCH engine found on the OcteonTX processors to
support ECC for NAND flash controller.
config NAND_STM32_FMC2
bool "Support for NAND controller on STM32MP SoCs"
depends on ARCH_STM32MP

View File

@ -58,6 +58,8 @@ obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_NAND_MXS) += mxs_nand.o
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
obj-$(CONFIG_NAND_OCTEONTX) += octeontx_nand.o
obj-$(CONFIG_NAND_OCTEONTX_HW_ECC) += octeontx_bch.o
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o

View File

@ -0,0 +1,425 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <dm/of_access.h>
#include <malloc.h>
#include <memalign.h>
#include <nand.h>
#include <pci.h>
#include <pci_ids.h>
#include <time.h>
#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/libfdt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand_bch.h>
#include <linux/mtd/nand_ecc.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/dma-mapping.h>
#include <asm/arch/clock.h>
#include "octeontx_bch.h"
#ifdef DEBUG
# undef CONFIG_LOGLEVEL
# define CONFIG_LOGLEVEL 8
#endif
LIST_HEAD(octeontx_bch_devices);
static unsigned int num_vfs = BCH_NR_VF;
static void *bch_pf;
static void *bch_vf;
static void *token;
static bool bch_pf_initialized;
static bool bch_vf_initialized;
static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
{
int ret;
ret = pci_sriov_init(dev, nr_virtfn);
if (ret)
printf("%s(%s): pci_sriov_init returned %d\n", __func__,
dev->name, ret);
return ret;
}
void *octeontx_bch_getv(void)
{
if (!bch_vf)
return NULL;
if (bch_vf_initialized && bch_pf_initialized)
return bch_vf;
else
return NULL;
}
void octeontx_bch_putv(void *token)
{
bch_vf_initialized = !!token;
bch_vf = token;
}
void *octeontx_bch_getp(void)
{
return token;
}
void octeontx_bch_putp(void *token)
{
bch_pf = token;
bch_pf_initialized = !!token;
}
static int do_bch_init(struct bch_device *bch)
{
return 0;
}
static void bch_reset(struct bch_device *bch)
{
writeq(1, bch->reg_base + BCH_CTL);
mdelay(2);
}
static void bch_disable(struct bch_device *bch)
{
writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
writeq(~0ull, bch->reg_base + BCH_ERR_INT);
bch_reset(bch);
}
static u32 bch_check_bist_status(struct bch_device *bch)
{
return readq(bch->reg_base + BCH_BIST_RESULT);
}
static int bch_device_init(struct bch_device *bch)
{
u64 bist;
int rc;
debug("%s: Resetting...\n", __func__);
/* Reset the PF when probed first */
bch_reset(bch);
debug("%s: Checking BIST...\n", __func__);
/* Check BIST status */
bist = (u64)bch_check_bist_status(bch);
if (bist) {
dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
/* Get max VQs/VFs supported by the device */
bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
debug("%s: %d vfs\n", __func__, bch->max_vfs);
if (num_vfs > bch->max_vfs) {
dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
num_vfs, bch->max_vfs);
num_vfs = bch->max_vfs;
}
bch->vfs_enabled = bch->max_vfs;
/* Get number of VQs/VFs to be enabled */
/* TODO: Get CLK frequency */
/* Reset device parameters */
debug("%s: Doing initialization\n", __func__);
rc = do_bch_init(bch);
return rc;
}
static int bch_sriov_configure(struct udevice *dev, int numvfs)
{
struct bch_device *bch = dev_get_priv(dev);
int ret = -EBUSY;
debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
if (bch->vfs_in_use)
goto exit;
ret = 0;
if (numvfs > 0) {
debug("%s: Enabling sriov\n", __func__);
ret = pci_enable_sriov(dev, numvfs);
if (ret == 0) {
bch->flags |= BCH_FLAG_SRIOV_ENABLED;
ret = numvfs;
bch->vfs_enabled = numvfs;
}
}
debug("VFs enabled: %d\n", ret);
exit:
debug("%s: Returning %d\n", __func__, ret);
return ret;
}
static int octeontx_pci_bchpf_probe(struct udevice *dev)
{
struct bch_device *bch;
int ret;
debug("%s(%s)\n", __func__, dev->name);
bch = dev_get_priv(dev);
if (!bch)
return -ENOMEM;
bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
bch->dev = dev;
debug("%s: base address: %p\n", __func__, bch->reg_base);
ret = bch_device_init(bch);
if (ret) {
printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
return ret;
}
INIT_LIST_HEAD(&bch->list);
list_add(&bch->list, &octeontx_bch_devices);
token = (void *)dev;
debug("%s: Configuring SRIOV\n", __func__);
bch_sriov_configure(dev, num_vfs);
debug("%s: Done.\n", __func__);
octeontx_bch_putp(bch);
return 0;
}
static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
{},
};
static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
{},
};
/**
* Given a data block calculate the ecc data and fill in the response
*
* @param[in] block 8-byte aligned pointer to data block to calculate ECC
* @param block_size Size of block in bytes, must be a multiple of two.
* @param bch_level Number of errors that must be corrected. The number of
* parity bytes is equal to ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] ecc 8-byte aligned pointer to where ecc data should go
* @param[in] resp pointer to where responses will be written.
*
* @return Zero on success, negative on failure.
*/
int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_gen;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = ecc;
cmd.s.iword.ptr = block;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
/**
* Given a data block and ecc data correct the data block
*
* @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
* data concatenated to the end to correct
* @param block_size Size of block in bytes, must be a multiple of
* two.
* @param bch_level Number of errors that must be corrected. The
* number of parity bytes is equal to
* ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] block_out 8-byte aligned pointer to corrected data buffer.
* This should not be the same as block_ecc_in.
* @param[in] resp pointer to where responses will be written.
*
* @return Zero on success, negative on failure.
*/
int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
u16 block_size, u8 bch_level,
dma_addr_t block_out, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_correct;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = block_out;
cmd.s.iword.ptr = block_ecc_in;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
EXPORT_SYMBOL(octeontx_bch_decode);
int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
dma_addr_t handle)
{
ulong start = get_timer(0);
__iormb(); /* HW is updating *resp */
while (!resp->s.done && get_timer(start) < 10)
__iormb(); /* HW is updating *resp */
if (resp->s.done)
return 0;
return -ETIMEDOUT;
}
struct bch_q octeontx_bch_q[QID_MAX];
static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
/* some params are for later merge with CPT or cn83xx */
struct bch_q *q = &octeontx_bch_q[queue_id];
unsigned long paddr;
u64 *chunk_buffer;
int chunk = max_depth + 1;
int i, size;
if ((unsigned int)queue_id >= QID_MAX)
return -EINVAL;
if (max_depth & chunk) /* must be 2^N - 1 */
return -EINVAL;
size = NQS * chunk * sizeof(u64);
chunk_buffer = dma_alloc_coherent(size, &paddr);
if (!chunk_buffer)
return -ENOMEM;
q->base_paddr = paddr;
q->dev = dev;
q->index = 0;
q->max_depth = max_depth;
q->pool_size_m1 = pool_size;
q->base_vaddr = chunk_buffer;
for (i = 0; i < NQS; i++) {
u64 *ixp;
int inext = (i + 1) * chunk - 1;
int j = (i + 1) % NQS;
int jnext = j * chunk;
dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
ixp = &chunk_buffer[inext];
*ixp = jbase;
}
return 0;
}
static int octeontx_pci_bchvf_probe(struct udevice *dev)
{
struct bch_vf *vf;
union bch_vqx_ctl ctl;
union bch_vqx_cmd_buf cbuf;
int err;
debug("%s(%s)\n", __func__, dev->name);
vf = dev_get_priv(dev);
if (!vf)
return -ENOMEM;
vf->dev = dev;
/* Map PF's configuration registers */
vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
debug("%s: reg base: %p\n", __func__, vf->reg_base);
err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
sizeof(union bch_cmd) * QDEPTH);
if (err) {
dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
goto release;
}
ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
cbuf.u = 0;
cbuf.s.ldwb = 1;
cbuf.s.dfb = 1;
cbuf.s.size = QDEPTH;
writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
writeq(octeontx_bch_q[QID_BCH].base_paddr,
vf->reg_base + BCH_VQX_CMD_PTR(0));
octeontx_bch_putv(vf);
debug("%s: bch vf initialization complete\n", __func__);
if (octeontx_bch_getv())
return octeontx_pci_nand_deferred_probe();
return -1;
release:
return err;
}
static int octeontx_pci_bchpf_remove(struct udevice *dev)
{
struct bch_device *bch = dev_get_priv(dev);
bch_disable(bch);
return 0;
}
U_BOOT_DRIVER(octeontx_pci_bchpf) = {
.name = BCHPF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchpf_probe,
.remove = octeontx_pci_bchpf_remove,
.priv_auto_alloc_size = sizeof(struct bch_device),
.flags = DM_FLAG_OS_PREPARE,
};
U_BOOT_DRIVER(octeontx_pci_bchvf) = {
.name = BCHVF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchvf_probe,
.priv_auto_alloc_size = sizeof(struct bch_vf),
};
U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);

View File

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __OCTEONTX_BCH_H__
#define __OCTEONTX_BCH_H__
#include "octeontx_bch_regs.h"
/* flags to indicate the features supported */
#define BCH_FLAG_SRIOV_ENABLED BIT(1)
/*
* BCH Registers map for 81xx
*/
/* PF registers */
#define BCH_CTL 0x0ull
#define BCH_ERR_CFG 0x10ull
#define BCH_BIST_RESULT 0x80ull
#define BCH_ERR_INT 0x88ull
#define BCH_ERR_INT_W1S 0x90ull
#define BCH_ERR_INT_ENA_W1C 0xA0ull
#define BCH_ERR_INT_ENA_W1S 0xA8ull
/* VF registers */
#define BCH_VQX_CTL(z) 0x0ull
#define BCH_VQX_CMD_BUF(z) 0x8ull
#define BCH_VQX_CMD_PTR(z) 0x20ull
#define BCH_VQX_DOORBELL(z) 0x800ull
#define BCHPF_DRIVER_NAME "octeontx-bchpf"
#define BCHVF_DRIVER_NAME "octeontx-bchvf"
struct bch_device {
struct list_head list;
u8 max_vfs;
u8 vfs_enabled;
u8 vfs_in_use;
u32 flags;
void __iomem *reg_base;
struct udevice *dev;
};
struct bch_vf {
u16 flags;
u8 vfid;
u8 node;
u8 priority;
struct udevice *dev;
void __iomem *reg_base;
};
struct buf_ptr {
u8 *vptr;
dma_addr_t dma_addr;
u16 size;
};
void *octeontx_bch_getv(void);
void octeontx_bch_putv(void *token);
void *octeontx_bch_getp(void);
void octeontx_bch_putp(void *token);
int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
dma_addr_t handle);
/**
* Given a data block calculate the ecc data and fill in the response
*
* @param[in] block 8-byte aligned pointer to data block to calculate ECC
* @param block_size Size of block in bytes, must be a multiple of two.
* @param bch_level Number of errors that must be corrected. The number of
* parity bytes is equal to ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] ecc 8-byte aligned pointer to where ecc data should go
* @param[in] resp pointer to where responses will be written.
*
* @return Zero on success, negative on failure.
*/
int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
u8 bch_level, dma_addr_t ecc, dma_addr_t resp);
/**
* Given a data block and ecc data correct the data block
*
* @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
* data concatenated to the end to correct
* @param block_size Size of block in bytes, must be a multiple of
* two.
* @param bch_level Number of errors that must be corrected. The
* number of parity bytes is equal to
* ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] block_out 8-byte aligned pointer to corrected data buffer.
* This should not be the same as block_ecc_in.
* @param[in] resp pointer to where responses will be written.
*
* @return Zero on success, negative on failure.
*/
int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
u16 block_size, u8 bch_level,
dma_addr_t block_out, dma_addr_t resp);
/**
* Ring the BCH doorbell telling it that new commands are
* available.
*
* @param num_commands Number of new commands
* @param vf virtual function handle
*/
static inline void octeontx_bch_write_doorbell(u64 num_commands,
struct bch_vf *vf)
{
u64 num_words = num_commands * sizeof(union bch_cmd) / sizeof(uint64_t);
writeq(num_words, vf->reg_base + BCH_VQX_DOORBELL(0));
}
/**
* Since it's possible (and even likely) that the NAND device will be probed
* before the BCH device has been probed, we may need to defer the probing.
*
* In this case, the initial probe returns success but the actual probing
* is deferred until the BCH VF has been probed.
*
* @return 0 for success, otherwise error
*/
int octeontx_pci_nand_deferred_probe(void);
#endif /* __OCTEONTX_BCH_H__ */

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __OCTEONTX_BCH_REGS_H__
#define __OCTEONTX_BCH_REGS_H__
#define BCH_NR_VF 1
union bch_cmd {
u64 u[4];
struct fields {
struct {
u64 size:12;
u64 reserved_12_31:20;
u64 ecc_level:4;
u64 reserved_36_61:26;
u64 ecc_gen:2;
} cword;
struct {
u64 ptr:49;
u64 reserved_49_55:7;
u64 nc:1;
u64 fw:1;
u64 reserved_58_63:6;
} oword;
struct {
u64 ptr:49;
u64 reserved_49_55:7;
u64 nc:1;
u64 reserved_57_63:7;
} iword;
struct {
u64 ptr:49;
u64 reserved_49_63:15;
} rword;
} s;
};
enum ecc_gen {
eg_correct,
eg_copy,
eg_gen,
eg_copy3,
};
/** Response from BCH instruction */
union bch_resp {
u16 u16;
struct {
u16 num_errors:7; /** Number of errors in block */
u16 zero:6; /** Always zero, ignore */
u16 erased:1; /** Block is erased */
u16 uncorrectable:1;/** too many bits flipped */
u16 done:1; /** Block is done */
} s;
};
union bch_vqx_ctl {
u64 u;
struct {
u64 reserved_0:1;
u64 cmd_be:1;
u64 max_read:4;
u64 reserved_6_15:10;
u64 erase_disable:1;
u64 one_cmd:1;
u64 early_term:4;
u64 reserved_22_63:42;
} s;
};
union bch_vqx_cmd_buf {
u64 u;
struct {
u64 reserved_0_32:33;
u64 size:13;
u64 dfb:1;
u64 ldwb:1;
u64 reserved_48_63:16;
} s;
};
/* keep queue state indexed, even though just one supported here,
* for later generalization to similarly-shaped queues on other Cavium devices
*/
enum {
QID_BCH,
QID_MAX
};
struct bch_q {
struct udevice *dev;
int index;
u16 max_depth;
u16 pool_size_m1;
u64 *base_vaddr;
dma_addr_t base_paddr;
};
extern struct bch_q octeontx_bch_q[QID_MAX];
/* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */
static inline dma_addr_t qphys(int qid, void *v)
{
struct bch_q *q = &octeontx_bch_q[qid];
int off = (u8 *)v - (u8 *)q->base_vaddr;
return q->base_paddr + off;
}
#define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v))
static inline void *qvirt(int qid, dma_addr_t p)
{
struct bch_q *q = &octeontx_bch_q[qid];
int off = p - q->base_paddr;
return q->base_vaddr + off;
}
#define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p))
/* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */
/* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */
#define QDEPTH 256 /* u64s in a command queue chunk, incl next-pointer */
#define NQS 1 /* linked chunks in the chain */
/**
* Write an arbitrary number of command words to a command queue.
* This is a generic function; the fixed number of command word
* functions yield higher performance.
*
* Could merge with crypto version for FPA use on cn83xx
*/
static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking,
int cmd_count, const u64 *cmds)
{
int ret = 0;
u64 *cmd_ptr;
struct bch_q *qptr = &octeontx_bch_q[queue_id];
if (unlikely(cmd_count < 1 || cmd_count > 32))
return -EINVAL;
if (unlikely(!cmds))
return -EINVAL;
cmd_ptr = qptr->base_vaddr;
while (cmd_count > 0) {
int slot = qptr->index % (QDEPTH * NQS);
if (slot % QDEPTH != QDEPTH - 1) {
cmd_ptr[slot] = *cmds++;
cmd_count--;
}
qptr->index++;
}
__iowmb(); /* flush commands before ringing bell */
return ret;
}
#endif /* __OCTEONTX_BCH_REGS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -407,6 +407,37 @@ config MT7628_ETH
The MediaTek MT7628 ethernet interface is used on MT7628 and
MT7688 based boards.
config NET_OCTEONTX
bool "OcteonTX Ethernet support"
depends on ARCH_OCTEONTX
depends on PCI_SRIOV
help
You must select Y to enable network device support for
OcteonTX SoCs. If unsure, say n
config NET_OCTEONTX2
bool "OcteonTX2 Ethernet support"
depends on ARCH_OCTEONTX2
select OCTEONTX2_CGX_INTF
help
You must select Y to enable network device support for
OcteonTX2 SoCs. If unsure, say n
config OCTEONTX_SMI
bool "OcteonTX SMI Device support"
depends on ARCH_OCTEONTX || ARCH_OCTEONTX2
help
You must select Y to enable SMI controller support for
OcteonTX or OcteonTX2 SoCs. If unsure, say n
config OCTEONTX2_CGX_INTF
bool "OcteonTX2 CGX ATF interface support"
depends on ARCH_OCTEONTX2
default y if ARCH_OCTEONTX2
help
You must select Y to enable CGX ATF interface support for
OcteonTX2 SoCs. If unsure, say n
config PCH_GBE
bool "Intel Platform Controller Hub EG20T GMAC driver"
depends on DM_ETH && DM_PCI

View File

@ -65,6 +65,10 @@ obj-$(CONFIG_RENESAS_RAVB) += ravb.o
obj-$(CONFIG_SMC91111) += smc91111.o
obj-$(CONFIG_SMC911X) += smc911x.o
obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o
obj-$(CONFIG_NET_OCTEONTX) += octeontx/
obj-$(CONFIG_NET_OCTEONTX2) += octeontx2/
obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o
obj-$(CONFIG_OCTEONTX2_CGX_INTF) += octeontx2/cgx_intf.o
obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
obj-$(CONFIG_ULI526X) += uli526x.o
obj-$(CONFIG_VSC7385_ENET) += vsc7385.o

View File

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2018 Marvell International Ltd.
#
obj-$(CONFIG_NET_OCTEONTX) += bgx.o nic_main.o nicvf_queues.o nicvf_main.o \
xcv.o

1565
drivers/net/octeontx/bgx.c Normal file

File diff suppressed because it is too large Load Diff

259
drivers/net/octeontx/bgx.h Normal file
View File

@ -0,0 +1,259 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef BGX_H
#define BGX_H
#include <asm/arch/board.h>
/* PCI device IDs */
#define PCI_DEVICE_ID_OCTEONTX_BGX 0xA026
#define PCI_DEVICE_ID_OCTEONTX_RGX 0xA054
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
#define MAX_LMAC (MAX_BGX_PER_NODE * MAX_LMAC_PER_BGX)
#define NODE_ID_MASK 0x300000000000
#define NODE_ID(x) (((x) & NODE_ID_MASK) >> 44)
/* Registers */
#define GSERX_CFG(x) (0x87E090000080ull + (x) * 0x1000000ull)
#define GSERX_SCRATCH(x) (0x87E090000020ull + (x) * 0x1000000ull)
#define GSERX_PHY_CTL(x) (0x87E090000000ull + (x) * 0x1000000ull)
#define GSERX_CFG_BGX BIT(2)
#define GSER_RX_EIE_DETSTS(x) (0x87E090000150ull + (x) * 0x1000000ull)
#define GSER_CDRLOCK (8)
#define GSER_BR_RXX_CTL(x, y) (0x87E090000400ull + (x) * 0x1000000ull + \
(y) * 0x80)
#define GSER_BR_RXX_CTL_RXT_SWM BIT(2)
#define GSER_BR_RXX_EER(x, y) (0x87E090000418ull + (x) * 0x1000000ull + \
(y) * 0x80)
#define GSER_BR_RXX_EER_RXT_ESV BIT(14)
#define GSER_BR_RXX_EER_RXT_EER BIT(15)
#define EER_RXT_ESV (14)
#define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13)
#define CMR_PKT_RX_EN BIT_ULL(14)
#define CMR_EN BIT_ULL(15)
#define BGX_CMR_GLOBAL_CFG 0x08
#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
#define BGX_CMRX_RX_ID_MAP 0x60
#define BGX_CMRX_RX_STAT0 0x70
#define BGX_CMRX_RX_STAT1 0x78
#define BGX_CMRX_RX_STAT2 0x80
#define BGX_CMRX_RX_STAT3 0x88
#define BGX_CMRX_RX_STAT4 0x90
#define BGX_CMRX_RX_STAT5 0x98
#define BGX_CMRX_RX_STAT6 0xA0
#define BGX_CMRX_RX_STAT7 0xA8
#define BGX_CMRX_RX_STAT8 0xB0
#define BGX_CMRX_RX_STAT9 0xB8
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) ((x) << 49)
#define RX_DMAC_COUNT 32
#define BGX_CMR_RX_STREERING 0x300
#define RX_TRAFFIC_STEER_RULE_COUNT 8
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
#define BGX_CMRX_TX_STAT3 0x618
#define BGX_CMRX_TX_STAT4 0x620
#define BGX_CMRX_TX_STAT5 0x628
#define BGX_CMRX_TX_STAT6 0x630
#define BGX_CMRX_TX_STAT7 0x638
#define BGX_CMRX_TX_STAT8 0x640
#define BGX_CMRX_TX_STAT9 0x648
#define BGX_CMRX_TX_STAT10 0x650
#define BGX_CMRX_TX_STAT11 0x658
#define BGX_CMRX_TX_STAT12 0x660
#define BGX_CMRX_TX_STAT13 0x668
#define BGX_CMRX_TX_STAT14 0x670
#define BGX_CMRX_TX_STAT15 0x678
#define BGX_CMRX_TX_STAT16 0x680
#define BGX_CMRX_TX_STAT17 0x688
#define BGX_CMR_TX_LMACS 0x1000
#define BGX_SPUX_CONTROL1 0x10000
#define SPU_CTL_LOW_POWER BIT_ULL(11)
#define SPU_CTL_LOOPBACK BIT_ULL(14)
#define SPU_CTL_RESET BIT_ULL(15)
#define BGX_SPUX_STATUS1 0x10008
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
#define BGX_SPUX_STATUS2 0x10020
#define SPU_STATUS2_RCVFLT BIT_ULL(10)
#define BGX_SPUX_BX_STATUS 0x10028
#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
#define BGX_SPUX_BR_STATUS1 0x10030
#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
#define BGX_SPUX_BR_PMD_CRTL 0x10068
#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
#define BGX_SPUX_BR_PMD_LD_REP 0x10090
#define BGX_SPUX_FEC_CONTROL 0x100A0
#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
#define BGX_SPUX_AN_CONTROL 0x100C8
#define SPU_AN_CTL_AN_EN BIT_ULL(12)
#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
#define SPU_AN_CTL_AN_RESTART BIT_ULL(15)
#define BGX_SPUX_AN_STATUS 0x100D0
#define SPU_AN_STS_AN_COMPLETE BIT_ULL(5)
#define BGX_SPUX_AN_ADV 0x100D8
#define BGX_SPUX_MISC_CONTROL 0x10218
#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
#define BGX_SPUX_INT_W1S 0x10228
#define BGX_SPUX_INT_ENA_W1C 0x10230
#define BGX_SPUX_INT_ENA_W1S 0x10238
#define BGX_SPU_DBG_CONTROL 0x10300
#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
#define BGX_SMUX_RX_INT 0x20000
#define BGX_SMUX_RX_JABBER 0x20030
#define BGX_SMUX_RX_CTL 0x20048
#define SMU_RX_CTL_STATUS (3ull << 0)
#define BGX_SMUX_TX_APPEND 0x20100
#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
#define BGX_SMUX_TX_MIN_PKT 0x20118
#define BGX_SMUX_TX_INT 0x20140
#define BGX_SMUX_TX_CTL 0x20178
#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
#define BGX_SMUX_TX_THRESH 0x20180
#define BGX_SMUX_CTL 0x20200
#define SMU_CTL_RX_IDLE BIT_ULL(0)
#define SMU_CTL_TX_IDLE BIT_ULL(1)
#define BGX_GMP_PCS_MRX_CTL 0x30000
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
#define PCS_MRX_CTL_RESET BIT_ULL(15)
#define BGX_GMP_PCS_MRX_STATUS 0x30008
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISCX_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define PCS_MISC_CTL_MODE BIT_ULL(8)
#define BGX_GMP_GMI_PRTX_CFG 0x38020
#define GMI_PORT_CFG_SPEED BIT_ULL(1)
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
#define BGX_GMP_GMI_RXX_JABBER 0x38038
#define BGX_GMP_GMI_TXX_THRESH 0x38210
#define BGX_GMP_GMI_TXX_APPEND 0x38218
#define BGX_GMP_GMI_TXX_SLOT 0x38220
#define BGX_GMP_GMI_TXX_BURST 0x38228
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
#define BGX_MSIX_VEC_0_29_CTL 0x400008
#define BGX_MSIX_PBA_0 0x4F0000
/* MSI-X interrupts */
#define BGX_MSIX_VECTORS 30
#define BGX_LMAC_VEC_OFFSET 7
#define BGX_MSIX_VEC_SHIFT 4
#define CMRX_INT 0
#define SPUX_INT 1
#define SMUX_RX_INT 2
#define SMUX_TX_INT 3
#define GMPX_PCS_INT 4
#define GMPX_GMI_RX_INT 5
#define GMPX_GMI_TX_INT 6
#define CMR_MEM_INT 28
#define SPU_MEM_INT 29
#define LMAC_INTR_LINK_UP BIT(0)
#define LMAC_INTR_LINK_DOWN BIT(1)
/* RX_DMAC_CTL configuration*/
enum MCAST_MODE {
MCAST_MODE_REJECT,
MCAST_MODE_ACCEPT,
MCAST_MODE_CAM_FILTER,
RSVD
};
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
int octeontx_bgx_initialize(unsigned int bgx_idx, unsigned int node);
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
void bgx_get_count(int node, int *bgx_count);
int bgx_get_lmac_count(int node, int bgx);
void bgx_print_stats(int bgx_idx, int lmac);
void xcv_init_hw(void);
void xcv_setup_link(bool link_up, int link_speed);
#undef LINK_INTR_ENABLE
enum qlm_mode {
QLM_MODE_SGMII, /* SGMII, each lane independent */
QLM_MODE_XAUI, /* 1 XAUI or DXAUI, 4 lanes */
QLM_MODE_RXAUI, /* 2 RXAUI, 2 lanes each */
QLM_MODE_XFI, /* 4 XFI, 1 lane each */
QLM_MODE_XLAUI, /* 1 XLAUI, 4 lanes each */
QLM_MODE_10G_KR, /* 4 10GBASE-KR, 1 lane each */
QLM_MODE_40G_KR4, /* 1 40GBASE-KR4, 4 lanes each */
QLM_MODE_QSGMII, /* 4 QSGMII, each lane independent */
QLM_MODE_RGMII, /* 1 RGX */
};
struct phy_info {
int mdio_bus;
int phy_addr;
bool autoneg_dis;
};
struct bgx_board_info {
struct phy_info phy_info[MAX_LMAC_PER_BGX];
bool lmac_reg[MAX_LMAC_PER_BGX];
bool lmac_enable[MAX_LMAC_PER_BGX];
};
enum LMAC_TYPE {
BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
BGX_MODE_RGMII = 5,
BGX_MODE_QSGMII = 6,
BGX_MODE_INVALID = 7,
};
int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr);
#endif /* BGX_H */

508
drivers/net/octeontx/nic.h Normal file
View File

@ -0,0 +1,508 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef NIC_H
#define NIC_H
#include <linux/netdevice.h>
#include "bgx.h"
#define PCI_DEVICE_ID_CAVIUM_NICVF_1 0x0011
/* Subsystem device IDs */
#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
#define NIC_CHANS_PER_INF 128
#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4
/* NIC SRIOV VF count */
#define MAX_NUM_VFS_SUPPORTED 128
#define DEFAULT_NUM_VF_ENABLED 8
#define NIC_TNS_BYPASS_MODE 0
#define NIC_TNS_MODE 1
/* NIC priv flags */
#define NIC_SRIOV_ENABLED BIT(0)
#define NIC_TNS_ENABLED BIT(1)
/* VNIC HW optimiation features */
#define VNIC_RX_CSUM_OFFLOAD_SUPPORT
#undef VNIC_TX_CSUM_OFFLOAD_SUPPORT
#undef VNIC_SG_SUPPORT
#undef VNIC_TSO_SUPPORT
#undef VNIC_LRO_SUPPORT
#undef VNIC_RSS_SUPPORT
/* TSO not supported in Thunder pass1 */
#ifdef VNIC_TSO_SUPPORT
#define VNIC_SW_TSO_SUPPORT
#undef VNIC_HW_TSO_SUPPORT
#endif
/* ETHTOOL enable or disable, undef this to disable */
#define NICVF_ETHTOOL_ENABLE
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
/* Max when CPI_ALG is IP diffserv */
#define NIC_MAX_CPI_PER_LMAC 64
/* NIC VF Interrupts */
#define NICVF_INTR_CQ 0
#define NICVF_INTR_SQ 1
#define NICVF_INTR_RBDR 2
#define NICVF_INTR_PKT_DROP 3
#define NICVF_INTR_TCP_TIMER 4
#define NICVF_INTR_MBOX 5
#define NICVF_INTR_QS_ERR 6
#define NICVF_INTR_CQ_SHIFT 0
#define NICVF_INTR_SQ_SHIFT 8
#define NICVF_INTR_RBDR_SHIFT 16
#define NICVF_INTR_PKT_DROP_SHIFT 20
#define NICVF_INTR_TCP_TIMER_SHIFT 21
#define NICVF_INTR_MBOX_SHIFT 22
#define NICVF_INTR_QS_ERR_SHIFT 23
#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
/* MSI-X interrupts */
#define NIC_PF_MSIX_VECTORS 10
#define NIC_VF_MSIX_VECTORS 20
#define NIC_PF_INTR_ID_ECC0_SBE 0
#define NIC_PF_INTR_ID_ECC0_DBE 1
#define NIC_PF_INTR_ID_ECC1_SBE 2
#define NIC_PF_INTR_ID_ECC1_DBE 3
#define NIC_PF_INTR_ID_ECC2_SBE 4
#define NIC_PF_INTR_ID_ECC2_DBE 5
#define NIC_PF_INTR_ID_ECC3_SBE 6
#define NIC_PF_INTR_ID_ECC3_DBE 7
#define NIC_PF_INTR_ID_MBOX0 8
#define NIC_PF_INTR_ID_MBOX1 9
/* Global timer for CQ timer thresh interrupts
* Calculated for SCLK of 700Mhz
* value written should be a 1/16thof what is expected
*
* 1 tick per ms
*/
#define NICPF_CLK_PER_INT_TICK 43750
struct nicvf_cq_poll {
u8 cq_idx; /* Completion queue index */
};
#define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE BIT(NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
#ifdef VNIC_RSS_SUPPORT
struct nicvf_rss_info {
bool enable;
#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
#define RSS_IP_HASH_ENA BIT(1)
#define RSS_TCP_HASH_ENA BIT(2)
#define RSS_TCP_SYN_DIS BIT(3)
#define RSS_UDP_HASH_ENA BIT(4)
#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
#define RSS_ROCE_ENA BIT(6)
#define RSS_L3_BI_DIRECTION_ENA BIT(7)
#define RSS_L4_BI_DIRECTION_ENA BIT(8)
u64 cfg;
u8 hash_bits;
u16 rss_size;
u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
u64 key[RSS_HASH_KEY_SIZE];
};
#endif
enum rx_stats_reg_offset {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_RED = 0x4,
RX_RED_OCTS = 0x5,
RX_ORUN = 0x6,
RX_ORUN_OCTS = 0x7,
RX_FCS = 0x8,
RX_L2ERR = 0x9,
RX_DRP_BCAST = 0xa,
RX_DRP_MCAST = 0xb,
RX_DRP_L3BCAST = 0xc,
RX_DRP_L3MCAST = 0xd,
RX_STATS_ENUM_LAST,
};
enum tx_stats_reg_offset {
TX_OCTS = 0x0,
TX_UCAST = 0x1,
TX_BCAST = 0x2,
TX_MCAST = 0x3,
TX_DROP = 0x4,
TX_STATS_ENUM_LAST,
};
struct nicvf_hw_stats {
u64 rx_bytes_ok;
u64 rx_ucast_frames_ok;
u64 rx_bcast_frames_ok;
u64 rx_mcast_frames_ok;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
u64 rx_drop_overrun_bytes;
u64 rx_drop_bcast;
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
u64 tx_mcast_frames_ok;
u64 tx_drops;
};
struct nicvf_drv_stats {
/* Rx */
u64 rx_frames_ok;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_busy;
u64 tx_tso;
};
struct hw_info {
u8 bgx_cnt;
u8 chans_per_lmac;
u8 chans_per_bgx; /* Rx/Tx chans */
u8 chans_per_rgx;
u8 chans_per_lbk;
u16 cpi_cnt;
u16 rssi_cnt;
u16 rss_ind_tbl_size;
u16 tl4_cnt;
u16 tl3_cnt;
u8 tl2_cnt;
u8 tl1_cnt;
bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
u8 model_id;
};
struct nicvf {
struct udevice *dev;
u8 vf_id;
bool sqs_mode:1;
bool loopback_supported:1;
u8 tns_mode;
u8 node;
u16 mtu;
struct queue_set *qs;
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
u8 num_qs;
void *addnl_qs;
u16 vf_mtu;
void __iomem *reg_base;
#define MAX_QUEUES_PER_QSET 8
struct nicvf_cq_poll *napi[8];
u8 cpi_alg;
struct nicvf_hw_stats stats;
struct nicvf_drv_stats drv_stats;
struct nicpf *nicpf;
/* VF <-> PF mailbox communication */
bool pf_acked;
bool pf_nacked;
bool set_mac_pending;
bool link_up;
u8 duplex;
u32 speed;
u8 rev_id;
u8 rx_queues;
u8 tx_queues;
bool open;
bool rb_alloc_fail;
void *rcv_buf;
bool hw_tso;
};
static inline int node_id(void *addr)
{
return ((uintptr_t)addr >> 44) & 0x3;
}
struct nicpf {
struct udevice *udev;
struct hw_info *hw;
u8 node;
unsigned int flags;
u16 total_vf_cnt; /* Total num of VF supported */
u16 num_vf_en; /* No of VF enabled */
void __iomem *reg_base; /* Register start address */
u16 rss_ind_tbl_size;
u8 num_sqs_en; /* Secondary qsets enabled */
u64 nicvf[MAX_NUM_VFS_SUPPORTED];
u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
bool sqs_used[MAX_NUM_VFS_SUPPORTED];
struct pkind_cfg pkind;
u8 bgx_cnt;
u8 rev_id;
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF))
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF)
u8 vf_lmac_map[MAX_LMAC];
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u64 mac[MAX_NUM_VFS_SUPPORTED];
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
u8 link[MAX_LMAC];
u8 duplex[MAX_LMAC];
u32 speed[MAX_LMAC];
bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
u8 lmac_cnt;
};
/* PF <--> VF Mailbox communication
* Eight 64bit registers are shared between PF and VF.
* Separate set for each VF.
* Writing '1' into last register mbx7 means end of message.
*/
/* PF <--> VF mailbox communication */
#define NIC_PF_VF_MAILBOX_SIZE 2
#define NIC_PF_VF_MBX_TIMEOUT 2000 /* ms */
/* Mailbox message types */
#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
struct nic_cfg_msg {
u8 msg;
u8 vf_id;
u8 node_id;
bool tns_mode:1;
bool sqs_mode:1;
bool loopback_supported:1;
u8 mac_addr[6];
};
/* Qset configuration */
struct qs_cfg_msg {
u8 msg;
u8 num;
u8 sqs_count;
u64 cfg;
};
/* Receive queue configuration */
struct rq_cfg_msg {
u8 msg;
u8 qs_num;
u8 rq_num;
u64 cfg;
};
/* Send queue configuration */
struct sq_cfg_msg {
u8 msg;
u8 qs_num;
u8 sq_num;
bool sqs_mode;
u64 cfg;
};
/* Set VF's MAC address */
struct set_mac_msg {
u8 msg;
u8 vf_id;
u8 mac_addr[6];
};
/* Set Maximum frame size */
struct set_frs_msg {
u8 msg;
u8 vf_id;
u16 max_frs;
};
/* Set CPI algorithm type */
struct cpi_cfg_msg {
u8 msg;
u8 vf_id;
u8 rq_cnt;
u8 cpi_alg;
};
/* Get RSS table size */
struct rss_sz_msg {
u8 msg;
u8 vf_id;
u16 ind_tbl_size;
};
/* Set RSS configuration */
struct rss_cfg_msg {
u8 msg;
u8 vf_id;
u8 hash_bits;
u8 tbl_len;
u8 tbl_offset;
#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
};
struct bgx_stats_msg {
u8 msg;
u8 vf_id;
u8 rx;
u8 idx;
u64 stats;
};
/* Physical interface link status */
struct bgx_link_status {
u8 msg;
u8 link_up;
u8 duplex;
u32 speed;
};
#ifdef VNIC_MULTI_QSET_SUPPORT
/* Get Extra Qset IDs */
struct sqs_alloc {
u8 msg;
u8 vf_id;
u8 qs_count;
};
struct nicvf_ptr {
u8 msg;
u8 vf_id;
bool sqs_mode;
u8 sqs_id;
u64 nicvf;
};
#endif
/* Set interface in loopback mode */
struct set_loopback {
u8 msg;
u8 vf_id;
bool enable;
};
/* 128 bit shared memory between PF and each VF */
union nic_mbx {
struct { u8 msg; } msg;
struct nic_cfg_msg nic_cfg;
struct qs_cfg_msg qs;
struct rq_cfg_msg rq;
struct sq_cfg_msg sq;
struct set_mac_msg mac;
struct set_frs_msg frs;
struct cpi_cfg_msg cpi_cfg;
struct rss_sz_msg rss_size;
struct rss_cfg_msg rss_cfg;
struct bgx_stats_msg bgx_stats;
struct bgx_link_status link_status;
#ifdef VNIC_MULTI_QSET_SUPPORT
struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf;
#endif
struct set_loopback lbk;
};
int nicvf_set_real_num_queues(struct udevice *dev,
int tx_queues, int rx_queues);
int nicvf_open(struct udevice *dev);
void nicvf_stop(struct udevice *dev);
int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
void nicvf_update_stats(struct nicvf *nic);
void nic_handle_mbx_intr(struct nicpf *nic, int vf);
int bgx_poll_for_link(int node, int bgx_idx, int lmacid);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable);
static inline bool pass1_silicon(unsigned int revision, int model_id)
{
return ((revision < 8) && (model_id == 0x88));
}
static inline bool pass2_silicon(unsigned int revision, int model_id)
{
return ((revision >= 8) && (model_id == 0x88));
}
#endif /* NIC_H */

View File

@ -0,0 +1,778 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <config.h>
#include <net.h>
#include <netdev.h>
#include <malloc.h>
#include <miiphy.h>
#include <dm.h>
#include <misc.h>
#include <pci.h>
#include <pci_ids.h>
#include <asm/io.h>
#include <linux/delay.h>
#include "nic_reg.h"
#include "nic.h"
#include "q_struct.h"
unsigned long rounddown_pow_of_two(unsigned long n)
{
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
return(n + 1);
}
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg);
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq);
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf);
static int nic_rcv_queue_sw_sync(struct nicpf *nic);
/* Register read/write APIs */
static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
{
writeq(val, nic->reg_base + offset);
}
static u64 nic_reg_read(struct nicpf *nic, u64 offset)
{
return readq(nic->reg_base + offset);
}
static u64 nic_get_mbx_addr(int vf)
{
return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
}
static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
{
void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf));
u64 *msg = (u64 *)mbx;
/* In first revision HW, mbox interrupt is triggerred
* when PF writes to MBOX(1), in next revisions when
* PF writes to MBOX(0)
*/
if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
/* see the comment for nic_reg_write()/nic_reg_read()
* functions above
*/
writeq(msg[0], mbx_addr);
writeq(msg[1], mbx_addr + 8);
} else {
writeq(msg[1], mbx_addr + 8);
writeq(msg[0], mbx_addr);
}
}
static void nic_mbx_send_ready(struct nicpf *nic, int vf)
{
union nic_mbx mbx = {};
int bgx_idx, lmac, timeout = 5, link = -1;
const u8 *mac;
mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
mbx.nic_cfg.vf_id = vf;
if (nic->flags & NIC_TNS_ENABLED)
mbx.nic_cfg.tns_mode = NIC_TNS_MODE;
else
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
if (vf < nic->num_vf_en) {
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
if (mac)
memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6);
while (timeout-- && (link <= 0)) {
link = bgx_poll_for_link(nic->node, bgx_idx, lmac);
debug("Link status: %d\n", link);
if (link <= 0)
mdelay(2000);
}
}
#ifdef VNIC_MULTI_QSET_SUPPORT
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
#endif
mbx.nic_cfg.node_id = nic->node;
mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
nic_send_msg_to_vf(nic, vf, &mbx);
}
/* ACKs VF's mailbox message
* @vf: VF to which ACK to be sent
*/
static void nic_mbx_send_ack(struct nicpf *nic, int vf)
{
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_ACK;
nic_send_msg_to_vf(nic, vf, &mbx);
}
/* NACKs VF's mailbox message that PF is not able to
* complete the action
* @vf: VF to which ACK to be sent
*/
static void nic_mbx_send_nack(struct nicpf *nic, int vf)
{
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_NACK;
nic_send_msg_to_vf(nic, vf, &mbx);
}
static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
{
int bgx_idx, lmac_idx;
if (lbk->vf_id > nic->num_vf_en)
return -1;
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
return 0;
}
/* Interrupt handler to handle mailbox messages from VFs */
void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
union nic_mbx mbx = {};
u64 *mbx_data;
u64 mbx_addr;
u64 reg_addr;
u64 cfg;
int bgx, lmac;
int i;
int ret = 0;
nic->mbx_lock[vf] = true;
mbx_addr = nic_get_mbx_addr(vf);
mbx_data = (u64 *)&mbx;
for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
*mbx_data = nic_reg_read(nic, mbx_addr);
mbx_data++;
mbx_addr += sizeof(u64);
}
debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic_mbx_send_ready(nic, vf);
if (vf < nic->num_vf_en) {
nic->link[vf] = 0;
nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
ret = 1;
break;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
cfg = mbx.qs.cfg;
#ifdef VNIC_MULTI_QSET_SUPPORT
/* Check if its a secondary Qset */
if (vf >= nic->num_vf_en) {
cfg = cfg & (~0x7FULL);
/* Assign this Qset to primary Qset's VF */
cfg |= nic->pqs_vf[vf];
}
#endif
nic_reg_write(nic, reg_addr, cfg);
break;
case NIC_MBOX_MSG_RQ_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
/* Enable CQE_RX2_S extension in CQE_RX descriptor.
* This gets appended by default on 81xx/83xx chips,
* for consistency enabling the same on 88xx pass2
* where this is introduced.
*/
if (pass2_silicon(nic->rev_id, nic->hw->model_id))
nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
break;
case NIC_MBOX_MSG_RQ_BP_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
break;
case NIC_MBOX_MSG_RQ_SW_SYNC:
ret = nic_rcv_queue_sw_sync(nic);
break;
case NIC_MBOX_MSG_RQ_DROP_CFG:
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
break;
case NIC_MBOX_MSG_SQ_CFG:
reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
(mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
(mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
nic_reg_write(nic, reg_addr, mbx.sq.cfg);
nic_tx_channel_cfg(nic, mbx.qs.num,
(struct sq_cfg_msg *)&mbx.sq);
break;
case NIC_MBOX_MSG_SET_MAC:
#ifdef VNIC_MULTI_QSET_SUPPORT
if (vf >= nic->num_vf_en)
break;
#endif
lmac = mbx.mac.vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
break;
case NIC_MBOX_MSG_SET_MAX_FRS:
ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
mbx.frs.vf_id);
break;
case NIC_MBOX_MSG_CPI_CFG:
nic_config_cpi(nic, &mbx.cpi_cfg);
break;
#ifdef VNIC_RSS_SUPPORT
case NIC_MBOX_MSG_RSS_SIZE:
nic_send_rss_size(nic, vf);
goto unlock;
case NIC_MBOX_MSG_RSS_CFG:
case NIC_MBOX_MSG_RSS_CFG_CONT:
nic_config_rss(nic, &mbx.rss_cfg);
break;
#endif
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
nic->vf_enabled[vf] = true;
if (vf >= nic->lmac_cnt)
goto unlock;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
goto unlock;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
nic->vf_enabled[vf] = false;
#ifdef VNIC_MULTI_QSET_SUPPORT
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
#endif
if (vf >= nic->lmac_cnt)
break;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
break;
#ifdef VNIC_MULTI_QSET_SUPPORT
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
goto unlock;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
goto unlock;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
goto unlock;
#endif
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
default:
printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
break;
}
if (!ret)
nic_mbx_send_ack(nic, vf);
else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
nic_mbx_send_nack(nic, vf);
unlock:
nic->mbx_lock[vf] = false;
}
static int nic_rcv_queue_sw_sync(struct nicpf *nic)
{
int timeout = 20;
nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
while (timeout) {
if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
break;
udelay(2000);
timeout--;
}
nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
if (!timeout) {
printf("Recevie queue software sync failed");
return 1;
}
return 0;
}
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{
u64 *pkind = (u64 *)&nic->pkind;
if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) {
printf("Invalid MTU setting from VF%d rejected,", vf);
printf(" should be between %d and %d\n", NIC_HW_MIN_FRS,
NIC_HW_MAX_FRS);
return 1;
}
new_frs += ETH_HLEN;
if (new_frs <= nic->pkind.maxlen)
return 0;
nic->pkind.maxlen = new_frs;
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind);
return 0;
}
/* Set minimum transmit packet size */
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
{
int lmac;
u64 lmac_cfg;
struct hw_info *hw = nic->hw;
int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
/* Max value that can be set is 60 */
if (size > 52)
size = 52;
/* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */
if (hw->chans_per_rgx)
max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
for (lmac = 0; lmac < max_lmac; lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
lmac_cfg &= ~(0xF << 2);
lmac_cfg |= ((size / 4) << 2);
nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
}
}
/* Function to check number of LMACs present and set VF to LMAC mapping.
* Mapping will be used while initializing channels.
*/
static void nic_set_lmac_vf_mapping(struct nicpf *nic)
{
int bgx, bgx_count, next_bgx_lmac = 0;
int lmac, lmac_cnt = 0;
u64 lmac_credit;
nic->num_vf_en = 0;
if (nic->flags & NIC_TNS_ENABLED) {
nic->num_vf_en = DEFAULT_NUM_VF_ENABLED;
return;
}
bgx_get_count(nic->node, &bgx_count);
debug("bgx_count: %d\n", bgx_count);
for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
if (!(bgx_count & (1 << bgx)))
continue;
nic->bgx_cnt++;
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx);
for (lmac = 0; lmac < lmac_cnt; lmac++)
nic->vf_lmac_map[next_bgx_lmac++] =
NIC_SET_VF_LMAC_MAP(bgx, lmac);
nic->num_vf_en += lmac_cnt;
/* Program LMAC credits */
lmac_credit = (1ull << 1); /* chennel credit enable */
lmac_credit |= (0x1ff << 2);
lmac_credit |= (((((48 * 1024) / lmac_cnt) -
NIC_HW_MAX_FRS) / 16) << 12);
lmac = bgx * MAX_LMAC_PER_BGX;
for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
lmac_credit);
}
}
static void nic_get_hw_info(struct nicpf *nic)
{
u16 sdevid;
struct hw_info *hw = nic->hw;
dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
switch (sdevid) {
case PCI_SUBSYS_DEVID_88XX_NIC_PF:
hw->bgx_cnt = MAX_BGX_PER_NODE;
hw->chans_per_lmac = 16;
hw->chans_per_bgx = 128;
hw->cpi_cnt = 2048;
hw->rssi_cnt = 4096;
hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
hw->tl3_cnt = 256;
hw->tl2_cnt = 64;
hw->tl1_cnt = 2;
hw->tl1_per_bgx = true;
hw->model_id = 0x88;
break;
case PCI_SUBSYS_DEVID_81XX_NIC_PF:
hw->bgx_cnt = MAX_BGX_PER_NODE;
hw->chans_per_lmac = 8;
hw->chans_per_bgx = 32;
hw->chans_per_rgx = 8;
hw->chans_per_lbk = 24;
hw->cpi_cnt = 512;
hw->rssi_cnt = 256;
hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
hw->tl3_cnt = 64;
hw->tl2_cnt = 16;
hw->tl1_cnt = 10;
hw->tl1_per_bgx = false;
hw->model_id = 0x81;
break;
case PCI_SUBSYS_DEVID_83XX_NIC_PF:
hw->bgx_cnt = MAX_BGX_PER_NODE;
hw->chans_per_lmac = 8;
hw->chans_per_bgx = 32;
hw->chans_per_lbk = 64;
hw->cpi_cnt = 2048;
hw->rssi_cnt = 1024;
hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
hw->tl3_cnt = 256;
hw->tl2_cnt = 64;
hw->tl1_cnt = 18;
hw->tl1_per_bgx = false;
hw->model_id = 0x83;
break;
}
hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev);
}
static void nic_init_hw(struct nicpf *nic)
{
int i;
u64 reg;
u64 *pkind = (u64 *)&nic->pkind;
/* Get HW capability info */
nic_get_hw_info(nic);
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
/* Enable backpressure */
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08);
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
(1ULL << 63) | 0x09);
for (i = 0; i < NIC_MAX_CHANS; i++)
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1);
if (nic->flags & NIC_TNS_ENABLED) {
reg = NIC_TNS_MODE << 7;
reg |= 0x06;
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
reg &= ~0xFull;
reg |= 0x07;
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
} else {
/* Disable TNS mode on both interfaces */
reg = NIC_TNS_BYPASS_MODE << 7;
reg |= 0x08; /* Block identifier */
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
reg &= ~0xFull;
reg |= 0x09;
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
}
/* PKIND configuration */
nic->pkind.minlen = 0;
nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0;
for (i = 0; i < NIC_MAX_PKIND; i++)
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind);
nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
/* Timer config */
nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
}
/* Channel parse index configuration */
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
{
struct hw_info *hw = nic->hw;
u32 vnic, bgx, lmac, chan;
u32 padd, cpi_count = 0;
u64 cpi_base, cpi, rssi_base, rssi;
u8 qset, rq_idx = 0;
vnic = cfg->vf_id;
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
rssi_base = vnic * hw->rss_ind_tbl_size;
/* Rx channel configuration */
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
(1ull << 63) | (vnic << 0));
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
if (cfg->cpi_alg == CPI_ALG_NONE)
cpi_count = 1;
else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
cpi_count = 8;
else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
cpi_count = 16;
else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
cpi_count = NIC_MAX_CPI_PER_LMAC;
/* RSS Qset, Qidx mapping */
qset = cfg->vf_id;
rssi = rssi_base;
for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
(qset << 3) | rq_idx);
rq_idx++;
}
rssi = 0;
cpi = cpi_base;
for (; cpi < (cpi_base + cpi_count); cpi++) {
/* Determine port to channel adder */
if (cfg->cpi_alg != CPI_ALG_DIFF)
padd = cpi % cpi_count;
else
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
/* Leave RSS_SIZE as '0' to disable RSS */
if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (padd << 16) |
(rssi_base + rssi));
} else {
/* Set MPI_ALG to '0' to disable MCAM parsing */
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(padd << 16));
/* MPI index is same as CPI if MPI_ALG is not enabled */
nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (rssi_base + rssi));
}
if ((rssi + 1) >= cfg->rq_cnt)
continue;
if (cfg->cpi_alg == CPI_ALG_VLAN)
rssi++;
else if (cfg->cpi_alg == CPI_ALG_VLAN16)
rssi = ((cpi - cpi_base) & 0xe) >> 1;
else if (cfg->cpi_alg == CPI_ALG_DIFF)
rssi = ((cpi - cpi_base) & 0x38) >> 3;
}
nic->cpi_base[cfg->vf_id] = cpi_base;
nic->rssi_base[cfg->vf_id] = rssi_base;
}
/* Transmit channel configuration (TL4 -> TL3 -> Chan)
* VNIC0-SQ0 -> TL4(0) -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0
* VNIC1-SQ0 -> TL4(8) -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0
* VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0
* VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0
* VNIC4-SQ0 -> TL4(512) -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0
* VNIC5-SQ0 -> TL4(520) -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0
* VNIC6-SQ0 -> TL4(528) -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0
* VNIC7-SQ0 -> TL4(536) -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0
*/
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
struct sq_cfg_msg *sq)
{
struct hw_info *hw = nic->hw;
u32 bgx, lmac, chan;
u32 tl2, tl3, tl4;
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic = vnic;
int svf;
u16 sdevid;
dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
/* For 88xx 0-511 TL4 transmits via BGX0 and
* 512-1023 TL4s transmit via BGX1.
*/
if (hw->tl1_per_bgx) {
tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
if (!sq->sqs_mode) {
tl4 += (lmac * MAX_QUEUES_PER_QSET);
} else {
for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) {
if (nic->vf_sqs[pqs_vnic][svf] == vnic)
break;
}
tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
tl4 += (lmac * MAX_QUEUES_PER_QSET *
MAX_SQS_PER_VF_SINGLE_NODE);
tl4 += (svf * MAX_QUEUES_PER_QSET);
}
} else {
tl4 = (vnic * MAX_QUEUES_PER_QSET);
}
tl4 += sq_idx;
tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
((u64)vnic << NIC_QS_ID_SHIFT) |
((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
/* On 88xx 0-127 channels are for BGX0 and
* 127-255 channels for BGX1.
*
* On 81xx/83xx TL3_CHAN reg should be configured with channel
* within LMAC i.e 0-7 and not the actual channel number like on 88xx
*/
chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
if (hw->tl1_per_bgx)
nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
else
nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
/* Enable backpressure on the channel */
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
tl2 = tl3 >> 2;
nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
/* No priorities as of now */
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
/* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
* on 81xx/83xx TL2 needs to be configured to transmit to one of the
* possible LMACs.
*
* This register doesn't exist on 88xx.
*/
if (!hw->tl1_per_bgx)
nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
lmac + (bgx * MAX_LMAC_PER_BGX));
}
int nic_initialize(struct udevice *dev)
{
struct nicpf *nic = dev_get_priv(dev);
nic->udev = dev;
nic->hw = calloc(1, sizeof(struct hw_info));
if (!nic->hw)
return -ENOMEM;
/* MAP PF's configuration registers */
nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
PCI_REGION_MEM);
if (!nic->reg_base) {
printf("Cannot map config register space, aborting\n");
goto exit;
}
nic->node = node_id(nic->reg_base);
dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id);
/* By default set NIC in TNS bypass mode */
nic->flags &= ~NIC_TNS_ENABLED;
/* Initialize hardware */
nic_init_hw(nic);
nic_set_lmac_vf_mapping(nic);
/* Set RSS TBL size for each VF */
nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size);
return 0;
exit:
free(nic->hw);
return -ENODEV;
}
int octeontx_nic_probe(struct udevice *dev)
{
int ret = 0;
struct nicpf *nicpf = dev_get_priv(dev);
nicpf->udev = dev;
ret = nic_initialize(dev);
if (ret < 0) {
printf("couldn't initialize NIC PF\n");
return ret;
}
ret = pci_sriov_init(dev, nicpf->num_vf_en);
if (ret < 0)
printf("enabling SRIOV failed for num VFs %d\n",
nicpf->num_vf_en);
return ret;
}
U_BOOT_DRIVER(octeontx_nic) = {
.name = "octeontx_nic",
.id = UCLASS_MISC,
.probe = octeontx_nic_probe,
.priv_auto_alloc_size = sizeof(struct nicpf),
};
static struct pci_device_id octeontx_nic_supported[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) },
{}
};
U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported);

View File

@ -0,0 +1,250 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef NIC_REG_H
#define NIC_REG_H
#define NIC_PF_REG_COUNT 29573
#define NIC_VF_REG_COUNT 249
/* Physical function register offsets */
#define NIC_PF_CFG (0x0000)
#define NIC_PF_STATUS (0x0010)
#define NIC_PF_INTR_TIMER_CFG (0x0030)
#define NIC_PF_BIST_STATUS (0x0040)
#define NIC_PF_SOFT_RESET (0x0050)
#define NIC_PF_TCP_TIMER (0x0060)
#define NIC_PF_BP_CFG (0x0080)
#define NIC_PF_RRM_CFG (0x0088)
#define NIC_PF_CQM_CF (0x00A0)
#define NIC_PF_CNM_CF (0x00A8)
#define NIC_PF_CNM_STATUS (0x00B0)
#define NIC_PF_CQ_AVG_CFG (0x00C0)
#define NIC_PF_RRM_AVG_CFG (0x00C8)
#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
#define NIC_PF_MAILBOX_INT (0x0410)
#define NIC_PF_MAILBOX_INT_W1S (0x0430)
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
#define NIC_PF_RX_CFG (0x05D0)
#define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008)
#define NIC_PF_ECC2_FLIP0 (0x1010)
#define NIC_PF_ECC3_FLIP0 (0x1018)
#define NIC_PF_ECC0_FLIP1 (0x1080)
#define NIC_PF_ECC1_FLIP1 (0x1088)
#define NIC_PF_ECC2_FLIP1 (0x1090)
#define NIC_PF_ECC3_FLIP1 (0x1098)
#define NIC_PF_ECC0_CDIS (0x1100)
#define NIC_PF_ECC1_CDIS (0x1108)
#define NIC_PF_ECC2_CDIS (0x1110)
#define NIC_PF_ECC3_CDIS (0x1118)
#define NIC_PF_BIST0_STATUS (0x1280)
#define NIC_PF_BIST1_STATUS (0x1288)
#define NIC_PF_BIST2_STATUS (0x1290)
#define NIC_PF_BIST3_STATUS (0x1298)
#define NIC_PF_ECC0_SBE_INT (0x2000)
#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
#define NIC_PF_ECC0_DBE_INT (0x2100)
#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
#define NIC_PF_ECC1_SBE_INT (0x2200)
#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
#define NIC_PF_ECC1_DBE_INT (0x2300)
#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
#define NIC_PF_ECC2_SBE_INT (0x2400)
#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
#define NIC_PF_ECC2_DBE_INT (0x2500)
#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
#define NIC_PF_ECC3_SBE_INT (0x2600)
#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
#define NIC_PF_ECC3_DBE_INT (0x2700)
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
#define NIC_PF_CPI_0_2047_CFG (0x200000)
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
#define NIC_PF_SW_SYNC_RX (0x490000)
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000)
#define NIC_PF_TL2_LMAC (0x540000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000)
#define NIC_PF_TL3_0_255_CHAN (0x620000)
#define NIC_PF_TL3_0_255_PIR (0x640000)
#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
#define NIC_PF_TL4_0_1023_CFG (0x800000)
#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
#define NIC_PF_QSET_0_127_CFG (0x20010000)
#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
#define NIC_PF_MSIX_PBA_0 (0x0F0000)
/* Virtual function register offsets */
#define NIC_VNIC_CFG (0x000020)
#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
#define NIC_VF_INT (0x000200)
#define NIC_VF_INT_W1S (0x000220)
#define NIC_VF_ENA_W1C (0x000240)
#define NIC_VF_ENA_W1S (0x000260)
#define NIC_VNIC_RSS_CFG (0x0020E0)
#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
#define NIC_VNIC_TX_STAT_0_4 (0x004000)
#define NIC_VNIC_RX_STAT_0_13 (0x004100)
#define NIC_QSET_RQ_GEN_CFG (0x010010)
#define NIC_QSET_CQ_0_7_CFG (0x010400)
#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
#define NIC_QSET_CQ_0_7_THRESH (0x010410)
#define NIC_QSET_CQ_0_7_BASE (0x010420)
#define NIC_QSET_CQ_0_7_HEAD (0x010428)
#define NIC_QSET_CQ_0_7_TAIL (0x010430)
#define NIC_QSET_CQ_0_7_DOOR (0x010438)
#define NIC_QSET_CQ_0_7_STATUS (0x010440)
#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
#define NIC_QSET_RQ_0_7_CFG (0x010600)
#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
#define NIC_QSET_SQ_0_7_CFG (0x010800)
#define NIC_QSET_SQ_0_7_THRESH (0x010810)
#define NIC_QSET_SQ_0_7_BASE (0x010820)
#define NIC_QSET_SQ_0_7_HEAD (0x010828)
#define NIC_QSET_SQ_0_7_TAIL (0x010830)
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
#define NIC_VF_MSIX_PBA (0x0F0000)
/* Offsets within registers */
#define NIC_MSIX_VEC_SHIFT 4
#define NIC_Q_NUM_SHIFT 18
#define NIC_QS_ID_SHIFT 21
#define NIC_VF_NUM_SHIFT 21
/* Port kind configuration register */
struct pkind_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
uint64_t reserved_42_63:22;
uint64_t hdr_sl:5; /* Header skip length */
uint64_t rx_hdr:3; /* TNS Receive header present */
uint64_t lenerr_en:1; /* L2 length error check enable */
uint64_t reserved_32_32:1;
uint64_t maxlen:16; /* Max frame size */
uint64_t minlen:16; /* Min frame size */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
uint64_t minlen:16;
uint64_t maxlen:16;
uint64_t reserved_32_32:1;
uint64_t lenerr_en:1;
uint64_t rx_hdr:3;
uint64_t hdr_sl:5;
uint64_t reserved_42_63:22;
#endif
};
static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
__attribute__ ((pure, always_inline));
static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
{
assert(param1 <= 1);
return 0x87E0E0000000 + (param1 << 24);
}
#define BGXX_PF_BAR0_SIZE 0x400000
#define NIC_PF_BAR0 0x843000000000
#define NIC_PF_BAR0_SIZE 0x40000000
static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
__attribute__ ((pure, always_inline));
static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
{
assert(param1 <= 127);
return 0x8430A0000000 + (param1 << 21);
}
#define NIC_VFX_BAR0_SIZE 0x200000
#endif /* NIC_REG_H */

View File

@ -0,0 +1,581 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <pci.h>
#include <pci_ids.h>
#include <phy.h>
#include <asm/io.h>
#include <linux/delay.h>
#include "nic_reg.h"
#include "nic.h"
#include "nicvf_queues.h"
/* Register read/write APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
{
writeq(val, nic->reg_base + offset);
}
u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
{
return readq(nic->reg_base + offset);
}
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val)
{
void *addr = nic->reg_base + offset;
writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
}
u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
{
void *addr = nic->reg_base + offset;
return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
}
static void nicvf_handle_mbx_intr(struct nicvf *nic);
/* VF -> PF mailbox communication */
static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
{
u64 *msg = (u64 *)mbx;
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
}
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
{
int timeout = NIC_PF_VF_MBX_TIMEOUT;
int sleep = 10;
nic->pf_acked = false;
nic->pf_nacked = false;
nicvf_write_to_mbx(nic, mbx);
nic_handle_mbx_intr(nic->nicpf, nic->vf_id);
/* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) {
if (nic->pf_nacked)
return -1;
mdelay(sleep);
nicvf_handle_mbx_intr(nic);
if (nic->pf_acked)
break;
timeout -= sleep;
if (!timeout) {
printf("PF didn't ack to mbox msg %d from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return -1;
}
}
return 0;
}
/* Checks if VF is able to comminicate with PF
* and also gets the VNIC number this VF is associated to.
*/
static int nicvf_check_pf_ready(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_READY;
if (nicvf_send_msg_to_pf(nic, &mbx)) {
printf("PF didn't respond to READY msg\n");
return 0;
}
return 1;
}
static void nicvf_handle_mbx_intr(struct nicvf *nic)
{
union nic_mbx mbx = {};
struct eth_pdata *pdata = dev_get_platdata(nic->dev);
u64 *mbx_data;
u64 mbx_addr;
int i;
mbx_addr = NIC_VF_PF_MAILBOX_0_1;
mbx_data = (u64 *)&mbx;
for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
*mbx_data = nicvf_reg_read(nic, mbx_addr);
mbx_data++;
mbx_addr += sizeof(u64);
}
debug("Mbox message: msg: 0x%x\n", mbx.msg.msg);
switch (mbx.msg.msg) {
case NIC_MBOX_MSG_READY:
nic->pf_acked = true;
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
memcpy(pdata->enetaddr,
mbx.nic_cfg.mac_addr, 6);
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
nic->duplex = 0;
nic->speed = 0;
break;
case NIC_MBOX_MSG_ACK:
nic->pf_acked = true;
break;
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = true;
break;
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
nic->pf_acked = true;
nic->link_up = mbx.link_status.link_up;
nic->duplex = mbx.link_status.duplex;
nic->speed = mbx.link_status.speed;
if (nic->link_up) {
printf("%s: Link is Up %d Mbps %s\n",
nic->dev->name, nic->speed,
nic->duplex == 1 ?
"Full duplex" : "Half duplex");
} else {
printf("%s: Link is Down\n", nic->dev->name);
}
break;
default:
printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
break;
}
nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
}
static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev)
{
union nic_mbx mbx = {};
struct eth_pdata *pdata = dev_get_platdata(dev);
mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
mbx.mac.vf_id = nic->vf_id;
memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6);
return nicvf_send_msg_to_pf(nic, &mbx);
}
static void nicvf_config_cpi(struct nicvf *nic)
{
union nic_mbx mbx = {};
mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
mbx.cpi_cfg.vf_id = nic->vf_id;
mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
nicvf_send_msg_to_pf(nic, &mbx);
}
static int nicvf_init_resources(struct nicvf *nic)
{
int err;
nic->num_qs = 1;
/* Enable Qset */
nicvf_qset_config(nic, true);
/* Initialize queues and HW for data transfer */
err = nicvf_config_data_transfer(nic, true);
if (err) {
printf("Failed to alloc/config VF's QSet resources\n");
return err;
}
return 0;
}
static void nicvf_snd_pkt_handler(struct nicvf *nic,
struct cmp_queue *cq,
void *cq_desc, int cqe_type)
{
struct cqe_send_t *cqe_tx;
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
cqe_tx = (struct cqe_send_t *)cq_desc;
sq = &nic->qs->sq[cqe_tx->sq_idx];
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
return;
nicvf_check_cqe_tx_errs(nic, cq, cq_desc);
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
static int nicvf_rcv_pkt_handler(struct nicvf *nic,
struct cmp_queue *cq, void *cq_desc,
void **ppkt, int cqe_type)
{
void *pkt;
size_t pkt_len;
struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc;
int err = 0;
/* Check for errors */
err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc);
if (err && !cqe_rx->rb_cnt)
return -1;
pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len);
if (!pkt) {
debug("Packet not received\n");
return -1;
}
if (pkt)
*ppkt = pkt;
return pkt_len;
}
int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len)
{
int cq_qnum = 0;
int processed_sq_cqe = 0;
int processed_rq_cqe = 0;
int processed_cqe = 0;
unsigned long cqe_count, cqe_head;
struct queue_set *qs = nic->qs;
struct cmp_queue *cq = &qs->cq[cq_qnum];
struct cqe_rx_t *cq_desc;
/* Get num of valid CQ entries expect next one to be SQ completion */
cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum);
cqe_count &= 0xFFFF;
if (!cqe_count)
return 0;
/* Get head of the valid CQ entries */
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum);
cqe_head >>= 9;
cqe_head &= 0xFFFF;
if (cqe_count) {
/* Get the CQ descriptor */
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
cqe_head++;
cqe_head &= (cq->dmem.q_len - 1);
/* Initiate prefetch for next descriptor */
prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
debug("%s: Got Rx CQE\n", nic->dev->name);
*pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
ppkt, CQE_TYPE_RX);
processed_rq_cqe++;
break;
case CQE_TYPE_SEND:
debug("%s: Got Tx CQE\n", nic->dev->name);
nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND);
processed_sq_cqe++;
break;
default:
debug("%s: Got CQ type %u\n", nic->dev->name,
cq_desc->cqe_type);
break;
}
processed_cqe++;
}
/* Dequeue CQE */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
cq_qnum, processed_cqe);
asm volatile ("dsb sy");
return (processed_sq_cqe | processed_rq_cqe);
}
/* Qset error interrupt handler
*
* As of now only CQ errors are handled
*/
void nicvf_handle_qs_err(struct nicvf *nic)
{
struct queue_set *qs = nic->qs;
int qidx;
u64 status;
/* Check if it is CQ err */
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
qidx);
if (!(status & CQ_ERR_MASK))
continue;
/* Process already queued CQEs and reconfig CQ */
nicvf_sq_disable(nic, qidx);
nicvf_cmp_queue_config(nic, qs, qidx, true);
nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx);
nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
}
}
static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
{
struct nicvf *nic = dev_get_priv(dev);
if (pkt && pkt_len)
free(pkt);
nicvf_refill_rbdr(nic);
return 0;
}
static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len)
{
struct nicvf *nic = dev_get_priv(dev);
int ret = 0;
int rcv_len = 0;
unsigned int timeout = 5000;
void *rpkt = NULL;
if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) {
printf("VF%d: TX ring full\n", nic->vf_id);
return -1;
}
/* check and update CQ for pkt sent */
while (!ret && timeout--) {
ret = nicvf_cq_handler(nic, &rpkt, &rcv_len);
if (!ret) {
debug("%s: %d, Not sent\n", __func__, __LINE__);
udelay(10);
}
}
return 0;
}
static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp)
{
struct nicvf *nic = dev_get_priv(dev);
void *pkt;
int pkt_len = 0;
#ifdef DEBUG
u8 *dpkt;
int i, j;
#endif
nicvf_cq_handler(nic, &pkt, &pkt_len);
if (pkt_len) {
#ifdef DEBUG
dpkt = pkt;
printf("RX packet contents:\n");
for (i = 0; i < 8; i++) {
puts("\t");
for (j = 0; j < 10; j++)
printf("%02x ", dpkt[i * 10 + j]);
puts("\n");
}
#endif
*packetp = pkt;
}
return pkt_len;
}
void nicvf_stop(struct udevice *dev)
{
struct nicvf *nic = dev_get_priv(dev);
if (!nic->open)
return;
/* Free resources */
nicvf_config_data_transfer(nic, false);
/* Disable HW Qset */
nicvf_qset_config(nic, false);
nic->open = false;
}
int nicvf_open(struct udevice *dev)
{
int err;
struct nicvf *nic = dev_get_priv(dev);
nicvf_hw_set_mac_addr(nic, dev);
/* Configure CPI alorithm */
nic->cpi_alg = CPI_ALG_NONE;
nicvf_config_cpi(nic);
/* Initialize the queues */
err = nicvf_init_resources(nic);
if (err)
return -1;
if (!nicvf_check_pf_ready(nic))
return -1;
nic->open = true;
/* Make sure queue initialization is written */
asm volatile("dsb sy");
return 0;
}
int nicvf_write_hwaddr(struct udevice *dev)
{
unsigned char ethaddr[ARP_HLEN];
struct eth_pdata *pdata = dev_get_platdata(dev);
struct nicvf *nic = dev_get_priv(dev);
/* If lower level firmware fails to set proper MAC
* u-boot framework updates MAC to random address.
* Use this hook to update mac address in environment.
*/
if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) {
eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr);
debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
}
eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr);
if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) {
debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
nicvf_hw_set_mac_addr(nic, dev);
}
return 0;
}
static void nicvf_probe_mdio_devices(void)
{
struct udevice *pdev;
int err;
static int probed;
if (probed)
return;
err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_CAVIUM_SMI, 0,
&pdev);
if (err)
debug("%s couldn't find SMI device\n", __func__);
probed = 1;
}
int nicvf_initialize(struct udevice *dev)
{
struct nicvf *nicvf = dev_get_priv(dev);
struct eth_pdata *pdata = dev_get_platdata(dev);
int ret = 0, bgx, lmac;
char name[16];
unsigned char ethaddr[ARP_HLEN];
struct udevice *pfdev;
struct nicpf *pf;
static int vfid;
if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) {
printf("%s NIC PF device not found..VF probe failed\n",
__func__);
return -1;
}
pf = dev_get_priv(pfdev);
nicvf->vf_id = vfid++;
nicvf->dev = dev;
nicvf->nicpf = pf;
nicvf_probe_mdio_devices();
/* Enable TSO support */
nicvf->hw_tso = true;
nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
PCI_REGION_MEM);
debug("nicvf->reg_base: %p\n", nicvf->reg_base);
if (!nicvf->reg_base) {
printf("Cannot map config register space, aborting\n");
ret = -1;
goto fail;
}
ret = nicvf_set_qset_resources(nicvf);
if (ret)
return -1;
sprintf(name, "vnic%u", nicvf->vf_id);
debug("%s name %s\n", __func__, name);
device_set_name(dev, name);
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac);
debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n",
__func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev,
pdata);
fdt_board_get_ethaddr(bgx, lmac, ethaddr);
debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr);
if (is_valid_ethaddr(ethaddr)) {
memcpy(pdata->enetaddr, ethaddr, ARP_HLEN);
eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr);
}
debug("%s enetaddr %pM ethaddr %pM\n", __func__,
pdata->enetaddr, ethaddr);
fail:
return ret;
}
int octeontx_vnic_probe(struct udevice *dev)
{
return nicvf_initialize(dev);
}
static const struct eth_ops octeontx_vnic_ops = {
.start = nicvf_open,
.stop = nicvf_stop,
.send = nicvf_xmit,
.recv = nicvf_recv,
.free_pkt = nicvf_free_pkt,
.write_hwaddr = nicvf_write_hwaddr,
};
U_BOOT_DRIVER(octeontx_vnic) = {
.name = "vnic",
.id = UCLASS_ETH,
.probe = octeontx_vnic_probe,
.ops = &octeontx_vnic_ops,
.priv_auto_alloc_size = sizeof(struct nicvf),
.platdata_auto_alloc_size = sizeof(struct eth_pdata),
};
static struct pci_device_id octeontx_vnic_supported[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) },
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) },
{}
};
U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,353 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef NICVF_QUEUES_H
#define NICVF_QUEUES_H
#include "q_struct.h"
#define MAX_QUEUE_SET 128
#define MAX_RCV_QUEUES_PER_QS 8
#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
#define MAX_SND_QUEUES_PER_QS 8
#define MAX_CMP_QUEUES_PER_QS 8
/* VF's queue interrupt ranges */
#define NICVF_INTR_ID_CQ 0
#define NICVF_INTR_ID_SQ 8
#define NICVF_INTR_ID_RBDR 16
#define NICVF_INTR_ID_MISC 18
#define NICVF_INTR_ID_QS_ERR 19
#define RBDR_SIZE0 0ULL /* 8K entries */
#define RBDR_SIZE1 1ULL /* 16K entries */
#define RBDR_SIZE2 2ULL /* 32K entries */
#define RBDR_SIZE3 3ULL /* 64K entries */
#define RBDR_SIZE4 4ULL /* 126K entries */
#define RBDR_SIZE5 5ULL /* 256K entries */
#define RBDR_SIZE6 6ULL /* 512K entries */
#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */
#define RBDR_CNT 1
#define RCV_QUEUE_CNT 1
#define SND_QUEUE_CNT 1
#define CMP_QUEUE_CNT 1 /* Max of RCV and SND qcount */
#define SND_QSIZE SND_QUEUE_SIZE0
#define SND_QUEUE_LEN BIT_ULL((SND_QSIZE + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
#define MAX_CQE_PER_PKT_XMIT 2
#define CMP_QSIZE CMP_QUEUE_SIZE0
#define CMP_QUEUE_LEN BIT_ULL((CMP_QSIZE + 10))
#define CMP_QUEUE_CQE_THRESH 0
#define CMP_QUEUE_TIMER_THRESH 1 /* 1 ms */
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT BIT_ULL((RBDR_SIZE + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
#define RCV_FRAG_LEN DMA_BUFFER_LEN
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\
MAX_CQE_PER_PKT_XMIT)
#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
/* Descriptor size */
#define SND_QUEUE_DESC_SIZE 16 /* 128 bits */
#define CMP_QUEUE_DESC_SIZE 512
/* Buffer / descriptor alignments */
#define NICVF_RCV_BUF_ALIGN 7
#define NICVF_RCV_BUF_ALIGN_BYTES BIT_ULL(NICVF_RCV_BUF_ALIGN)
#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
/* Queue enable/disable */
#define NICVF_SQ_EN BIT_ULL(19)
/* Queue reset */
#define NICVF_CQ_RESET BIT_ULL(41)
#define NICVF_SQ_RESET BIT_ULL(17)
#define NICVF_RBDR_RESET BIT_ULL(43)
enum CQ_RX_ERRLVL_E {
CQ_ERRLVL_MAC,
CQ_ERRLVL_L2,
CQ_ERRLVL_L3,
CQ_ERRLVL_L4,
};
enum CQ_RX_ERROP_E {
CQ_RX_ERROP_RE_NONE = 0x0,
CQ_RX_ERROP_RE_PARTIAL = 0x1,
CQ_RX_ERROP_RE_JABBER = 0x2,
CQ_RX_ERROP_RE_FCS = 0x7,
CQ_RX_ERROP_RE_TERMINATE = 0x9,
CQ_RX_ERROP_RE_RX_CTL = 0xb,
CQ_RX_ERROP_PREL2_ERR = 0x1f,
CQ_RX_ERROP_L2_FRAGMENT = 0x20,
CQ_RX_ERROP_L2_OVERRUN = 0x21,
CQ_RX_ERROP_L2_PFCS = 0x22,
CQ_RX_ERROP_L2_PUNY = 0x23,
CQ_RX_ERROP_L2_MAL = 0x24,
CQ_RX_ERROP_L2_OVERSIZE = 0x25,
CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
CQ_RX_ERROP_L2_LENMISM = 0x27,
CQ_RX_ERROP_L2_PCLP = 0x28,
CQ_RX_ERROP_IP_NOT = 0x41,
CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
CQ_RX_ERROP_IP_MAL = 0x43,
CQ_RX_ERROP_IP_MALD = 0x44,
CQ_RX_ERROP_IP_HOP = 0x45,
CQ_RX_ERROP_L3_ICRC = 0x46,
CQ_RX_ERROP_L3_PCLP = 0x47,
CQ_RX_ERROP_L4_MAL = 0x61,
CQ_RX_ERROP_L4_CHK = 0x62,
CQ_RX_ERROP_UDP_LEN = 0x63,
CQ_RX_ERROP_L4_PORT = 0x64,
CQ_RX_ERROP_TCP_FLAG = 0x65,
CQ_RX_ERROP_TCP_OFFSET = 0x66,
CQ_RX_ERROP_L4_PCLP = 0x67,
CQ_RX_ERROP_RBDR_TRUNC = 0x70,
};
enum CQ_TX_ERROP_E {
CQ_TX_ERROP_GOOD = 0x0,
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
CQ_TX_ERROP_LOCK_VIOL = 0x83,
CQ_TX_ERROP_DATA_FAULT = 0x84,
CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
CQ_TX_ERROP_MEM_FAULT = 0x87,
CQ_TX_ERROP_CK_OVERLAP = 0x88,
CQ_TX_ERROP_CK_OFLOW = 0x89,
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
struct cmp_queue_stats {
struct rx_stats {
struct {
u64 mac_errs;
u64 l2_errs;
u64 l3_errs;
u64 l4_errs;
} errlvl;
struct {
u64 good;
u64 partial_pkts;
u64 jabber_errs;
u64 fcs_errs;
u64 terminate_errs;
u64 bgx_rx_errs;
u64 prel2_errs;
u64 l2_frags;
u64 l2_overruns;
u64 l2_pfcs;
u64 l2_puny;
u64 l2_hdr_malformed;
u64 l2_oversize;
u64 l2_undersize;
u64 l2_len_mismatch;
u64 l2_pclp;
u64 non_ip;
u64 ip_csum_err;
u64 ip_hdr_malformed;
u64 ip_payload_malformed;
u64 ip_hop_errs;
u64 l3_icrc_errs;
u64 l3_pclp;
u64 l4_malformed;
u64 l4_csum_errs;
u64 udp_len_err;
u64 bad_l4_port;
u64 bad_tcp_flag;
u64 tcp_offset_errs;
u64 l4_pclp;
u64 pkt_truncated;
} errop;
} rx;
struct tx_stats {
u64 good;
u64 desc_fault;
u64 hdr_cons_err;
u64 subdesc_err;
u64 imm_size_oflow;
u64 data_seq_err;
u64 mem_seq_err;
u64 lock_viol;
u64 data_fault;
u64 tstmp_conflict;
u64 tstmp_timeout;
u64 mem_fault;
u64 csum_overlap;
u64 csum_overflow;
} tx;
};
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
};
struct rx_tx_queue_stats {
u64 bytes;
u64 pkts;
};
struct q_desc_mem {
uintptr_t dma;
u64 size;
u16 q_len;
uintptr_t phys_base;
void *base;
void *unalign_base;
bool allocated;
};
struct rbdr {
bool enable;
u32 dma_size;
u32 thresh; /* Threshold level for interrupt */
void *desc;
u32 head;
u32 tail;
struct q_desc_mem dmem;
uintptr_t buf_mem;
uintptr_t buffers;
};
struct rcv_queue {
bool enable;
struct rbdr *rbdr_start;
struct rbdr *rbdr_cont;
bool en_tcp_reassembly;
u8 cq_qs; /* CQ's QS to which this RQ is assigned */
u8 cq_idx; /* CQ index (0 to 7) in the QS */
u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
u8 start_rbdr_qs; /* First buffer ptrs - QS num */
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
u8 caching;
struct rx_tx_queue_stats stats;
};
struct cmp_queue {
bool enable;
u16 intr_timer_thresh;
u16 thresh;
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
};
struct snd_queue {
bool enable;
u8 cq_qs; /* CQ's QS to which this SQ is pointing */
u8 cq_idx; /* CQ index (0 to 7) in the above QS */
u16 thresh;
u32 free_cnt;
u32 head;
u32 tail;
u64 *skbuff;
void *desc;
struct q_desc_mem dmem;
struct rx_tx_queue_stats stats;
};
struct queue_set {
bool enable;
bool be_en;
u8 vnic_id;
u8 rq_cnt;
u8 cq_cnt;
u64 cq_len;
u8 sq_cnt;
u64 sq_len;
u8 rbdr_cnt;
u64 rbdr_len;
struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
};
#define GET_RBDR_DESC(RING, idx)\
(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
#define GET_SQ_DESC(RING, idx)\
(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
#define GET_CQ_DESC(RING, idx)\
(&(((union cq_desc_t *)((RING)->desc))[idx]))
/* CQ status bits */
#define CQ_WR_FULL BIT(26)
#define CQ_WR_DISABLE BIT(25)
#define CQ_WR_FAULT BIT(24)
#define CQ_CQE_COUNT (0xFFFF << 0)
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
int nicvf_set_qset_resources(struct nicvf *nic);
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
void nicvf_qset_config(struct nicvf *nic, bool enable);
void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable);
void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
void nicvf_sq_disable(struct nicvf *nic, int qidx);
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
void nicvf_sq_free_used_descs(struct udevice *dev,
struct snd_queue *sq, int qidx);
int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len);
void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len);
void nicvf_refill_rbdr(struct nicvf *nic);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx);
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
struct cmp_queue *cq, void *cq_desc);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, void *cq_desc);
#endif /* NICVF_QUEUES_H */

View File

@ -0,0 +1,695 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef Q_STRUCT_H
#define Q_STRUCT_H
/* Load transaction types for reading segment bytes specified by
* NIC_SEND_GATHER_S[LD_TYPE].
*/
enum nic_send_ld_type_e {
NIC_SEND_LD_TYPE_E_LDD = 0x0,
NIC_SEND_LD_TYPE_E_LDT = 0x1,
NIC_SEND_LD_TYPE_E_LDWB = 0x2,
NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
};
enum ether_type_algorithm {
ETYPE_ALG_NONE = 0x0,
ETYPE_ALG_SKIP = 0x1,
ETYPE_ALG_ENDPARSE = 0x2,
ETYPE_ALG_VLAN = 0x3,
ETYPE_ALG_VLAN_STRIP = 0x4,
};
enum layer3_type {
L3TYPE_NONE = 0x00,
L3TYPE_GRH = 0x01,
L3TYPE_IPV4 = 0x04,
L3TYPE_IPV4_OPTIONS = 0x05,
L3TYPE_IPV6 = 0x06,
L3TYPE_IPV6_OPTIONS = 0x07,
L3TYPE_ET_STOP = 0x0D,
L3TYPE_OTHER = 0x0E,
};
enum layer4_type {
L4TYPE_NONE = 0x00,
L4TYPE_IPSEC_ESP = 0x01,
L4TYPE_IPFRAG = 0x02,
L4TYPE_IPCOMP = 0x03,
L4TYPE_TCP = 0x04,
L4TYPE_UDP = 0x05,
L4TYPE_SCTP = 0x06,
L4TYPE_GRE = 0x07,
L4TYPE_ROCE_BTH = 0x08,
L4TYPE_OTHER = 0x0E,
};
/* CPI and RSSI configuration */
enum cpi_algorithm_type {
CPI_ALG_NONE = 0x0,
CPI_ALG_VLAN = 0x1,
CPI_ALG_VLAN16 = 0x2,
CPI_ALG_DIFF = 0x3,
};
enum rss_algorithm_type {
RSS_ALG_NONE = 0x00,
RSS_ALG_PORT = 0x01,
RSS_ALG_IP = 0x02,
RSS_ALG_TCP_IP = 0x03,
RSS_ALG_UDP_IP = 0x04,
RSS_ALG_SCTP_IP = 0x05,
RSS_ALG_GRE_IP = 0x06,
RSS_ALG_ROCE = 0x07,
};
enum rss_hash_cfg {
RSS_HASH_L2ETC = 0x00,
RSS_HASH_IP = 0x01,
RSS_HASH_TCP = 0x02,
RSS_TCP_SYN_DIS = 0x03,
RSS_HASH_UDP = 0x04,
RSS_HASH_L4ETC = 0x05,
RSS_HASH_ROCE = 0x06,
RSS_L3_BIDI = 0x07,
RSS_L4_BIDI = 0x08,
};
/* Completion queue entry types */
enum cqe_type {
CQE_TYPE_INVALID = 0x0,
CQE_TYPE_RX = 0x2,
CQE_TYPE_RX_SPLIT = 0x3,
CQE_TYPE_RX_TCP = 0x4,
CQE_TYPE_SEND = 0x8,
CQE_TYPE_SEND_PTP = 0x9,
};
enum cqe_rx_tcp_status {
CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
};
enum cqe_send_status {
CQE_SEND_STATUS_GOOD = 0x00,
CQE_SEND_STATUS_DESC_FAULT = 0x01,
CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
CQE_SEND_STATUS_LOCK_VIOL = 0x84,
CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
CQE_SEND_STATUS_DATA_FAULT = 0x86,
CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
CQE_SEND_STATUS_MEM_FAULT = 0x89,
CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
};
enum cqe_rx_tcp_end_reason {
CQE_RX_TCP_END_FIN_FLAG_DET = 0,
CQE_RX_TCP_END_INVALID_FLAG = 1,
CQE_RX_TCP_END_TIMEOUT = 2,
CQE_RX_TCP_END_OUT_OF_SEQ = 3,
CQE_RX_TCP_END_PKT_ERR = 4,
CQE_RX_TCP_END_QS_DISABLED = 0x0F,
};
/* Packet protocol level error enumeration */
enum cqe_rx_err_level {
CQE_RX_ERRLVL_RE = 0x0,
CQE_RX_ERRLVL_L2 = 0x1,
CQE_RX_ERRLVL_L3 = 0x2,
CQE_RX_ERRLVL_L4 = 0x3,
};
/* Packet protocol level error type enumeration */
enum cqe_rx_err_opcode {
CQE_RX_ERR_RE_NONE = 0x0,
CQE_RX_ERR_RE_PARTIAL = 0x1,
CQE_RX_ERR_RE_JABBER = 0x2,
CQE_RX_ERR_RE_FCS = 0x7,
CQE_RX_ERR_RE_TERMINATE = 0x9,
CQE_RX_ERR_RE_RX_CTL = 0xb,
CQE_RX_ERR_PREL2_ERR = 0x1f,
CQE_RX_ERR_L2_FRAGMENT = 0x20,
CQE_RX_ERR_L2_OVERRUN = 0x21,
CQE_RX_ERR_L2_PFCS = 0x22,
CQE_RX_ERR_L2_PUNY = 0x23,
CQE_RX_ERR_L2_MAL = 0x24,
CQE_RX_ERR_L2_OVERSIZE = 0x25,
CQE_RX_ERR_L2_UNDERSIZE = 0x26,
CQE_RX_ERR_L2_LENMISM = 0x27,
CQE_RX_ERR_L2_PCLP = 0x28,
CQE_RX_ERR_IP_NOT = 0x41,
CQE_RX_ERR_IP_CHK = 0x42,
CQE_RX_ERR_IP_MAL = 0x43,
CQE_RX_ERR_IP_MALD = 0x44,
CQE_RX_ERR_IP_HOP = 0x45,
CQE_RX_ERR_L3_ICRC = 0x46,
CQE_RX_ERR_L3_PCLP = 0x47,
CQE_RX_ERR_L4_MAL = 0x61,
CQE_RX_ERR_L4_CHK = 0x62,
CQE_RX_ERR_UDP_LEN = 0x63,
CQE_RX_ERR_L4_PORT = 0x64,
CQE_RX_ERR_TCP_FLAG = 0x65,
CQE_RX_ERR_TCP_OFFSET = 0x66,
CQE_RX_ERR_L4_PCLP = 0x67,
CQE_RX_ERR_RBDR_TRUNC = 0x70,
};
struct cqe_rx_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 stdn_fault:1;
u64 rsvd0:1;
u64 rq_qs:7;
u64 rq_idx:3;
u64 rsvd1:12;
u64 rss_alg:4;
u64 rsvd2:4;
u64 rb_cnt:4;
u64 vlan_found:1;
u64 vlan_stripped:1;
u64 vlan2_found:1;
u64 vlan2_stripped:1;
u64 l4_type:4;
u64 l3_type:4;
u64 l2_present:1;
u64 err_level:3;
u64 err_opcode:8;
u64 pkt_len:16; /* W1 */
u64 l2_ptr:8;
u64 l3_ptr:8;
u64 l4_ptr:8;
u64 cq_pkt_len:8;
u64 align_pad:3;
u64 rsvd3:1;
u64 chan:12;
u64 rss_tag:32; /* W2 */
u64 vlan_tci:16;
u64 vlan_ptr:8;
u64 vlan2_ptr:8;
u64 rb3_sz:16; /* W3 */
u64 rb2_sz:16;
u64 rb1_sz:16;
u64 rb0_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb6_sz:16;
u64 rb5_sz:16;
u64 rb4_sz:16;
u64 rb11_sz:16; /* W5 */
u64 rb10_sz:16;
u64 rb9_sz:16;
u64 rb8_sz:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 err_opcode:8;
u64 err_level:3;
u64 l2_present:1;
u64 l3_type:4;
u64 l4_type:4;
u64 vlan2_stripped:1;
u64 vlan2_found:1;
u64 vlan_stripped:1;
u64 vlan_found:1;
u64 rb_cnt:4;
u64 rsvd2:4;
u64 rss_alg:4;
u64 rsvd1:12;
u64 rq_idx:3;
u64 rq_qs:7;
u64 rsvd0:1;
u64 stdn_fault:1;
u64 cqe_type:4; /* W0 */
u64 chan:12;
u64 rsvd3:1;
u64 align_pad:3;
u64 cq_pkt_len:8;
u64 l4_ptr:8;
u64 l3_ptr:8;
u64 l2_ptr:8;
u64 pkt_len:16; /* W1 */
u64 vlan2_ptr:8;
u64 vlan_ptr:8;
u64 vlan_tci:16;
u64 rss_tag:32; /* W2 */
u64 rb0_sz:16;
u64 rb1_sz:16;
u64 rb2_sz:16;
u64 rb3_sz:16; /* W3 */
u64 rb4_sz:16;
u64 rb5_sz:16;
u64 rb6_sz:16;
u64 rb7_sz:16; /* W4 */
u64 rb8_sz:16;
u64 rb9_sz:16;
u64 rb10_sz:16;
u64 rb11_sz:16; /* W5 */
#endif
u64 rb0_ptr:64;
u64 rb1_ptr:64;
u64 rb2_ptr:64;
u64 rb3_ptr:64;
u64 rb4_ptr:64;
u64 rb5_ptr:64;
u64 rb6_ptr:64;
u64 rb7_ptr:64;
u64 rb8_ptr:64;
u64 rb9_ptr:64;
u64 rb10_ptr:64;
u64 rb11_ptr:64;
};
struct cqe_rx_tcp_err_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:60;
u64 rsvd1:4; /* W1 */
u64 partial_first:1;
u64 rsvd2:27;
u64 rbdr_bytes:8;
u64 rsvd3:24;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 rsvd0:60;
u64 cqe_type:4;
u64 rsvd3:24;
u64 rbdr_bytes:8;
u64 rsvd2:27;
u64 partial_first:1;
u64 rsvd1:4;
#endif
};
struct cqe_rx_tcp_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:52;
u64 cq_tcp_status:8;
u64 rsvd1:32; /* W1 */
u64 tcp_cntx_bytes:8;
u64 rsvd2:8;
u64 tcp_err_bytes:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cq_tcp_status:8;
u64 rsvd0:52;
u64 cqe_type:4; /* W0 */
u64 tcp_err_bytes:16;
u64 rsvd2:8;
u64 tcp_cntx_bytes:8;
u64 rsvd1:32; /* W1 */
#endif
};
struct cqe_send_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 cqe_type:4; /* W0 */
u64 rsvd0:4;
u64 sqe_ptr:16;
u64 rsvd1:4;
u64 rsvd2:10;
u64 sq_qs:7;
u64 sq_idx:3;
u64 rsvd3:8;
u64 send_status:8;
u64 ptp_timestamp:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 send_status:8;
u64 rsvd3:8;
u64 sq_idx:3;
u64 sq_qs:7;
u64 rsvd2:10;
u64 rsvd1:4;
u64 sqe_ptr:16;
u64 rsvd0:4;
u64 cqe_type:4; /* W0 */
u64 ptp_timestamp:64; /* W1 */
#endif
};
union cq_desc_t {
u64 u[64];
struct cqe_send_t snd_hdr;
struct cqe_rx_t rx_hdr;
struct cqe_rx_tcp_t rx_tcp_hdr;
struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
};
struct rbdr_entry_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd0:15;
u64 buf_addr:42;
u64 cache_align:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 cache_align:7;
u64 buf_addr:42;
u64 rsvd0:15;
#endif
};
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 tcp_pkt_cnt:12;
u64 rsvd1:4;
u64 align_hdr_bytes:4;
u64 align_ptr_bytes:4;
u64 ptr_bytes:16;
u64 rsvd2:24;
u64 cqe_type:4;
u64 rsvd0:54;
u64 tcp_end_reason:2;
u64 tcp_status:4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_status:4;
u64 tcp_end_reason:2;
u64 rsvd0:54;
u64 cqe_type:4;
u64 rsvd2:24;
u64 ptr_bytes:16;
u64 align_ptr_bytes:4;
u64 align_hdr_bytes:4;
u64 rsvd1:4;
u64 tcp_pkt_cnt:12;
#endif
};
/* Always Big endian */
struct rx_hdr_t {
u64 opaque:32;
u64 rss_flow:8;
u64 skip_length:6;
u64 disable_rss:1;
u64 disable_tcp_reassembly:1;
u64 nodrop:1;
u64 dest_alg:2;
u64 rsvd0:2;
u64 dest_rq:11;
};
enum send_l4_csum_type {
SEND_L4_CSUM_DISABLE = 0x00,
SEND_L4_CSUM_UDP = 0x01,
SEND_L4_CSUM_TCP = 0x02,
SEND_L4_CSUM_SCTP = 0x03,
};
enum send_crc_alg {
SEND_CRCALG_CRC32 = 0x00,
SEND_CRCALG_CRC32C = 0x01,
SEND_CRCALG_ICRC = 0x02,
};
enum send_load_type {
SEND_LD_TYPE_LDD = 0x00,
SEND_LD_TYPE_LDT = 0x01,
SEND_LD_TYPE_LDWB = 0x02,
};
enum send_mem_alg_type {
SEND_MEMALG_SET = 0x00,
SEND_MEMALG_ADD = 0x08,
SEND_MEMALG_SUB = 0x09,
SEND_MEMALG_ADDLEN = 0x0A,
SEND_MEMALG_SUBLEN = 0x0B,
};
enum send_mem_dsz_type {
SEND_MEMDSZ_B64 = 0x00,
SEND_MEMDSZ_B32 = 0x01,
SEND_MEMDSZ_B8 = 0x03,
};
enum sq_subdesc_type {
SQ_DESC_TYPE_INVALID = 0x00,
SQ_DESC_TYPE_HEADER = 0x01,
SQ_DESC_TYPE_CRC = 0x02,
SQ_DESC_TYPE_IMMEDIATE = 0x03,
SQ_DESC_TYPE_GATHER = 0x04,
SQ_DESC_TYPE_MEMORY = 0x05,
};
struct sq_crc_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 rsvd1:32;
u64 crc_ival:32;
u64 subdesc_type:4;
u64 crc_alg:2;
u64 rsvd0:10;
u64 crc_insert_pos:16;
u64 hdr_start:16;
u64 crc_len:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 crc_len:16;
u64 hdr_start:16;
u64 crc_insert_pos:16;
u64 rsvd0:10;
u64 crc_alg:2;
u64 subdesc_type:4;
u64 crc_ival:32;
u64 rsvd1:32;
#endif
};
struct sq_gather_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 ld_type:2;
u64 rsvd0:42;
u64 size:16;
u64 rsvd1:15; /* W1 */
u64 addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 size:16;
u64 rsvd0:42;
u64 ld_type:2;
u64 subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
#endif
};
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 rsvd0:46;
u64 len:14;
u64 data:64; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 len:14;
u64 rsvd0:46;
u64 subdesc_type:4; /* W0 */
u64 data:64; /* W1 */
#endif
};
struct sq_mem_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4; /* W0 */
u64 mem_alg:4;
u64 mem_dsz:2;
u64 wmem:1;
u64 rsvd0:21;
u64 offset:32;
u64 rsvd1:15; /* W1 */
u64 addr:49;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 offset:32;
u64 rsvd0:21;
u64 wmem:1;
u64 mem_dsz:2;
u64 mem_alg:4;
u64 subdesc_type:4; /* W0 */
u64 addr:49;
u64 rsvd1:15; /* W1 */
#endif
};
struct sq_hdr_subdesc {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 subdesc_type:4;
u64 tso:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 dont_send:1;
u64 tstmp:1;
u64 subdesc_cnt:8;
u64 csum_l4:2;
u64 csum_l3:1;
u64 rsvd0:5;
u64 l4_offset:8;
u64 l3_offset:8;
u64 rsvd1:4;
u64 tot_len:20; /* W0 */
u64 tso_sdc_cont:8;
u64 tso_sdc_first:8;
u64 tso_l4_offset:8;
u64 tso_flags_last:12;
u64 tso_flags_first:12;
u64 rsvd2:2;
u64 tso_max_paysize:14; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tot_len:20;
u64 rsvd1:4;
u64 l3_offset:8;
u64 l4_offset:8;
u64 rsvd0:5;
u64 csum_l3:1;
u64 csum_l4:2;
u64 subdesc_cnt:8;
u64 tstmp:1;
u64 dont_send:1;
u64 post_cqe:1; /* Post CQE on no error also */
u64 tso:1;
u64 subdesc_type:4; /* W0 */
u64 tso_max_paysize:14;
u64 rsvd2:2;
u64 tso_flags_first:12;
u64 tso_flags_last:12;
u64 tso_l4_offset:8;
u64 tso_sdc_first:8;
u64 tso_sdc_cont:8; /* W1 */
#endif
};
/* Queue config register formats */
struct rq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_2_63:62;
u64 ena:1;
u64 tcp_ena:1;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tcp_ena:1;
u64 ena:1;
u64 reserved_2_63:62;
#endif
};
struct cq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_43_63:21;
u64 ena:1;
u64 reset:1;
u64 caching:1;
u64 reserved_35_39:5;
u64 qsize:3;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_0_15:16;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 reserved_0_15:16;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:3;
u64 reserved_35_39:5;
u64 caching:1;
u64 reset:1;
u64 ena:1;
u64 reserved_43_63:21;
#endif
};
struct sq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_20_63:44;
u64 ena:1;
u64 reserved_18_18:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_11_15:5;
u64 qsize:3;
u64 reserved_3_7:5;
u64 tstmp_bgx_intf:3;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tstmp_bgx_intf:3;
u64 reserved_3_7:5;
u64 qsize:3;
u64 reserved_11_15:5;
u64 ldwb:1;
u64 reset:1;
u64 reserved_18_18:1;
u64 ena:1;
u64 reserved_20_63:44;
#endif
};
struct rbdr_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_45_63:19;
u64 ena:1;
u64 reset:1;
u64 ldwb:1;
u64 reserved_36_41:6;
u64 qsize:4;
u64 reserved_25_31:7;
u64 avg_con:9;
u64 reserved_12_15:4;
u64 lines:12;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 lines:12;
u64 reserved_12_15:4;
u64 avg_con:9;
u64 reserved_25_31:7;
u64 qsize:4;
u64 reserved_36_41:6;
u64 ldwb:1;
u64 reset:1;
u64 ena: 1;
u64 reserved_45_63:19;
#endif
};
struct qs_cfg {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_32_63:32;
u64 ena:1;
u64 reserved_27_30:4;
u64 sq_ins_ena:1;
u64 sq_ins_pos:6;
u64 lock_ena:1;
u64 lock_viol_cqe_ena:1;
u64 send_tstmp_ena:1;
u64 be:1;
u64 reserved_7_15:9;
u64 vnic:7;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 vnic:7;
u64 reserved_7_15:9;
u64 be:1;
u64 send_tstmp_ena:1;
u64 lock_viol_cqe_ena:1;
u64 lock_ena:1;
u64 sq_ins_pos:6;
u64 sq_ins_ena:1;
u64 reserved_27_30:4;
u64 ena:1;
u64 reserved_32_63:32;
#endif
};
#endif /* Q_STRUCT_H */

380
drivers/net/octeontx/smi.c Normal file
View File

@ -0,0 +1,380 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <malloc.h>
#include <miiphy.h>
#include <misc.h>
#include <pci.h>
#include <pci_ids.h>
#include <phy.h>
#include <asm/io.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#define PCI_DEVICE_ID_OCTEONTX_SMI 0xA02B
DECLARE_GLOBAL_DATA_PTR;
enum octeontx_smi_mode {
CLAUSE22 = 0,
CLAUSE45 = 1,
};
enum {
SMI_OP_C22_WRITE = 0,
SMI_OP_C22_READ = 1,
SMI_OP_C45_ADDR = 0,
SMI_OP_C45_WRITE = 1,
SMI_OP_C45_PRIA = 2,
SMI_OP_C45_READ = 3,
};
union smi_x_clk {
u64 u;
struct smi_x_clk_s {
int phase:8;
int sample:4;
int preamble:1;
int clk_idle:1;
int reserved_14_14:1;
int sample_mode:1;
int sample_hi:5;
int reserved_21_23:3;
int mode:1;
} s;
};
union smi_x_cmd {
u64 u;
struct smi_x_cmd_s {
int reg_adr:5;
int reserved_5_7:3;
int phy_adr:5;
int reserved_13_15:3;
int phy_op:2;
} s;
};
union smi_x_wr_dat {
u64 u;
struct smi_x_wr_dat_s {
unsigned int dat:16;
int val:1;
int pending:1;
} s;
};
union smi_x_rd_dat {
u64 u;
struct smi_x_rd_dat_s {
unsigned int dat:16;
int val:1;
int pending:1;
} s;
};
union smi_x_en {
u64 u;
struct smi_x_en_s {
int en:1;
} s;
};
#define SMI_X_RD_DAT 0x10ull
#define SMI_X_WR_DAT 0x08ull
#define SMI_X_CMD 0x00ull
#define SMI_X_CLK 0x18ull
#define SMI_X_EN 0x20ull
struct octeontx_smi_priv {
void __iomem *baseaddr;
enum octeontx_smi_mode mode;
};
#define MDIO_TIMEOUT 10000
void octeontx_smi_setmode(struct mii_dev *bus, enum octeontx_smi_mode mode)
{
struct octeontx_smi_priv *priv = bus->priv;
union smi_x_clk smix_clk;
smix_clk.u = readq(priv->baseaddr + SMI_X_CLK);
smix_clk.s.mode = mode;
smix_clk.s.preamble = mode == CLAUSE45;
writeq(smix_clk.u, priv->baseaddr + SMI_X_CLK);
priv->mode = mode;
}
int octeontx_c45_addr(struct mii_dev *bus, int addr, int devad, int regnum)
{
struct octeontx_smi_priv *priv = bus->priv;
union smi_x_cmd smix_cmd;
union smi_x_wr_dat smix_wr_dat;
unsigned long timeout = MDIO_TIMEOUT;
smix_wr_dat.u = 0;
smix_wr_dat.s.dat = regnum;
writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
smix_cmd.u = 0;
smix_cmd.s.phy_op = SMI_OP_C45_ADDR;
smix_cmd.s.phy_adr = addr;
smix_cmd.s.reg_adr = devad;
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
do {
smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
udelay(100);
timeout--;
} while (smix_wr_dat.s.pending && timeout);
return timeout == 0;
}
int octeontx_phy_read(struct mii_dev *bus, int addr, int devad, int regnum)
{
struct octeontx_smi_priv *priv = bus->priv;
union smi_x_cmd smix_cmd;
union smi_x_rd_dat smix_rd_dat;
unsigned long timeout = MDIO_TIMEOUT;
int ret;
enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
debug("RD: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
mode, priv->baseaddr, addr, devad, regnum);
octeontx_smi_setmode(bus, mode);
if (mode == CLAUSE45) {
ret = octeontx_c45_addr(bus, addr, devad, regnum);
debug("RD: ret: %u\n", ret);
if (ret)
return 0;
}
smix_cmd.u = 0;
smix_cmd.s.phy_adr = addr;
if (mode == CLAUSE45) {
smix_cmd.s.reg_adr = devad;
smix_cmd.s.phy_op = SMI_OP_C45_READ;
} else {
smix_cmd.s.reg_adr = regnum;
smix_cmd.s.phy_op = SMI_OP_C22_READ;
}
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
do {
smix_rd_dat.u = readq(priv->baseaddr + SMI_X_RD_DAT);
udelay(10);
timeout--;
} while (smix_rd_dat.s.pending && timeout);
debug("SMIX_RD_DAT: %lx\n", (unsigned long)smix_rd_dat.u);
return smix_rd_dat.s.dat;
}
int octeontx_phy_write(struct mii_dev *bus, int addr, int devad, int regnum,
u16 value)
{
struct octeontx_smi_priv *priv = bus->priv;
union smi_x_cmd smix_cmd;
union smi_x_wr_dat smix_wr_dat;
unsigned long timeout = MDIO_TIMEOUT;
int ret;
enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
debug("WR: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
mode, priv->baseaddr, addr, devad, regnum);
if (mode == CLAUSE45) {
ret = octeontx_c45_addr(bus, addr, devad, regnum);
debug("WR: ret: %u\n", ret);
if (ret)
return ret;
}
smix_wr_dat.u = 0;
smix_wr_dat.s.dat = value;
writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
smix_cmd.u = 0;
smix_cmd.s.phy_adr = addr;
if (mode == CLAUSE45) {
smix_cmd.s.reg_adr = devad;
smix_cmd.s.phy_op = SMI_OP_C45_WRITE;
} else {
smix_cmd.s.reg_adr = regnum;
smix_cmd.s.phy_op = SMI_OP_C22_WRITE;
}
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
do {
smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
udelay(10);
timeout--;
} while (smix_wr_dat.s.pending && timeout);
debug("SMIX_WR_DAT: %lx\n", (unsigned long)smix_wr_dat.u);
return timeout == 0;
}
int octeontx_smi_reset(struct mii_dev *bus)
{
struct octeontx_smi_priv *priv = bus->priv;
union smi_x_en smi_en;
smi_en.s.en = 0;
writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
smi_en.s.en = 1;
writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
octeontx_smi_setmode(bus, CLAUSE22);
return 0;
}
/* PHY XS initialization, primarily for RXAUI
*
*/
int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr)
{
int reg;
ulong start_time;
int phy_id1, phy_id2;
int oui, model_number;
phy_id1 = octeontx_phy_read(bus, phy_addr, 1, 0x2);
phy_id2 = octeontx_phy_read(bus, phy_addr, 1, 0x3);
model_number = (phy_id2 >> 4) & 0x3F;
debug("%s model %x\n", __func__, model_number);
oui = phy_id1;
oui <<= 6;
oui |= (phy_id2 >> 10) & 0x3F;
debug("%s oui %x\n", __func__, oui);
switch (oui) {
case 0x5016:
if (model_number == 9) {
debug("%s +\n", __func__);
/* Perform hardware reset in XGXS control */
reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
if ((reg & 0xffff) < 0)
goto read_error;
reg |= 0x8000;
octeontx_phy_write(bus, phy_addr, 4, 0x0, reg);
start_time = get_timer(0);
do {
reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
if ((reg & 0xffff) < 0)
goto read_error;
} while ((reg & 0x8000) && get_timer(start_time) < 500);
if (reg & 0x8000) {
printf("HW reset for M88X3120 PHY failed");
printf("MII_BMCR: 0x%x\n", reg);
return -1;
}
/* program 4.49155 with 0x5 */
octeontx_phy_write(bus, phy_addr, 4, 0xc003, 0x5);
}
break;
default:
break;
}
return 0;
read_error:
debug("M88X3120 PHY config read failed\n");
return -1;
}
int octeontx_smi_probe(struct udevice *dev)
{
int ret, subnode, cnt = 0, node = dev->node.of_offset;
struct mii_dev *bus;
struct octeontx_smi_priv *priv;
pci_dev_t bdf = dm_pci_get_bdf(dev);
debug("SMI PCI device: %x\n", bdf);
dev->req_seq = PCI_FUNC(bdf);
if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) {
printf("Failed to map PCI region for bdf %x\n", bdf);
return -1;
}
fdt_for_each_subnode(subnode, gd->fdt_blob, node) {
ret = fdt_node_check_compatible(gd->fdt_blob, subnode,
"cavium,thunder-8890-mdio");
if (ret)
continue;
bus = mdio_alloc();
priv = malloc(sizeof(*priv));
if (!bus || !priv) {
printf("Failed to allocate OcteonTX MDIO bus # %u\n",
dev->seq);
return -1;
}
bus->read = octeontx_phy_read;
bus->write = octeontx_phy_write;
bus->reset = octeontx_smi_reset;
bus->priv = priv;
priv->mode = CLAUSE22;
priv->baseaddr = (void __iomem *)fdtdec_get_addr(gd->fdt_blob,
subnode,
"reg");
debug("mdio base addr %p\n", priv->baseaddr);
/* use given name or generate its own unique name */
snprintf(bus->name, MDIO_NAME_LEN, "smi%d", cnt++);
ret = mdio_register(bus);
if (ret)
return ret;
}
return 0;
}
static const struct udevice_id octeontx_smi_ids[] = {
{ .compatible = "cavium,thunder-8890-mdio-nexus" },
{}
};
U_BOOT_DRIVER(octeontx_smi) = {
.name = "octeontx_smi",
.id = UCLASS_MISC,
.probe = octeontx_smi_probe,
.of_match = octeontx_smi_ids,
};
static struct pci_device_id octeontx_smi_supported[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_SMI) },
{}
};
U_BOOT_PCI_DEVICE(octeontx_smi, octeontx_smi_supported);

124
drivers/net/octeontx/xcv.c Normal file
View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <config.h>
#include <dm.h>
#include <errno.h>
#include <fdt_support.h>
#include <pci.h>
#include <malloc.h>
#include <miiphy.h>
#include <misc.h>
#include <net.h>
#include <netdev.h>
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
#include <asm/arch/csrs/csrs-xcv.h>
#define XCVX_BASE 0x87E0DB000000ULL
/* Initialize XCV block */
void xcv_init_hw(void)
{
union xcvx_reset reset;
union xcvx_dll_ctl xcv_dll_ctl;
/* Take the DLL out of reset */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.dllrst = 0;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
/* Take the clock tree out of reset */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.clkrst = 0;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
/* Once the 125MHz ref clock is stable, wait 10us for DLL to lock */
udelay(10);
/* Optionally, bypass the DLL setting */
xcv_dll_ctl.u = readq(XCVX_BASE + XCVX_DLL_CTL(0));
xcv_dll_ctl.s.clkrx_set = 0;
xcv_dll_ctl.s.clkrx_byp = 1;
xcv_dll_ctl.s.clktx_byp = 0;
writeq(xcv_dll_ctl.u, XCVX_BASE + XCVX_DLL_CTL(0));
/* Enable the compensation controller */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.comp = 1;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
/* Wait for 1040 reference clock cycles for the compensation state
* machine lock.
*/
udelay(100);
/* Enable the XCV block */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.enable = 1;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
/* set XCV(0)_RESET[CLKRST] to 1 */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.clkrst = 1;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
}
/*
* Configure XCV link based on the speed
* link_up : Set to 1 when link is up otherwise 0
* link_speed: The speed of the link.
*/
void xcv_setup_link(bool link_up, int link_speed)
{
union xcvx_ctl xcv_ctl;
union xcvx_reset reset;
union xcvx_batch_crd_ret xcv_crd_ret;
int speed = 2;
/* Check RGMII link */
if (link_speed == 100)
speed = 1;
else if (link_speed == 10)
speed = 0;
if (link_up) {
/* Set operating speed */
xcv_ctl.u = readq(XCVX_BASE + XCVX_CTL(0));
xcv_ctl.s.speed = speed;
writeq(xcv_ctl.u, XCVX_BASE + XCVX_CTL(0));
/* Datapaths come out of reset
* - The datapath resets will disengage BGX from the
* RGMII interface
* - XCV will continue to return TX credits for each tick
* that is sent on the TX data path
*/
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.tx_dat_rst_n = 1;
reset.s.rx_dat_rst_n = 1;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
/* Enable packet flow */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.tx_pkt_rst_n = 1;
reset.s.rx_pkt_rst_n = 1;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
xcv_crd_ret.u = readq(XCVX_BASE + XCVX_BATCH_CRD_RET(0));
xcv_crd_ret.s.crd_ret = 1;
writeq(xcv_crd_ret.u, XCVX_BASE + XCVX_BATCH_CRD_RET(0));
} else {
/* Enable packet flow */
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
reset.s.tx_pkt_rst_n = 0;
reset.s.rx_pkt_rst_n = 0;
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
}
}

View File

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (C) 2018 Marvell International Ltd.
#
obj-$(CONFIG_NET_OCTEONTX2) += cgx.o nix_af.o nix.o rvu_pf.o \
rvu_af.o rvu_common.o

296
drivers/net/octeontx2/cgx.c Normal file
View File

@ -0,0 +1,296 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <pci_ids.h>
#include <linux/list.h>
#include <asm/arch/board.h>
#include <asm/arch/csrs/csrs-cgx.h>
#include <asm/io.h>
#include "cgx.h"
char lmac_type_to_str[][8] = {
"SGMII",
"XAUI",
"RXAUI",
"10G_R",
"40G_R",
"RGMII",
"QSGMII",
"25G_R",
"50G_R",
"100G_R",
"USXGMII",
};
char lmac_speed_to_str[][8] = {
"0",
"10M",
"100M",
"1G",
"2.5G",
"5G",
"10G",
"20G",
"25G",
"40G",
"50G",
"80G",
"100G",
};
/**
* Given an LMAC/PF instance number, return the lmac
* Per design, each PF has only one LMAC mapped.
*
* @param instance instance to find
*
* @return pointer to lmac data structure or NULL if not found
*/
struct lmac *nix_get_cgx_lmac(int lmac_instance)
{
struct cgx *cgx;
struct udevice *dev;
int i, idx, err;
for (i = 0; i < CGX_PER_NODE; i++) {
err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_OCTEONTX2_CGX, i,
&dev);
if (err)
continue;
cgx = dev_get_priv(dev);
debug("%s udev %p cgx %p instance %d\n", __func__, dev, cgx,
lmac_instance);
for (idx = 0; idx < cgx->lmac_count; idx++) {
if (cgx->lmac[idx]->instance == lmac_instance)
return cgx->lmac[idx];
}
}
return NULL;
}
void cgx_lmac_mac_filter_clear(struct lmac *lmac)
{
union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
void *reg_addr;
dmac_cam0.u = 0x0;
reg_addr = lmac->cgx->reg_base +
CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
writeq(dmac_cam0.u, reg_addr);
debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
dmac_ctl0.u = 0x0;
dmac_ctl0.s.bcst_accept = 1;
dmac_ctl0.s.mcst_mode = 1;
dmac_ctl0.s.cam_accept = 0;
reg_addr = lmac->cgx->reg_base +
CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
writeq(dmac_ctl0.u, reg_addr);
debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
}
void cgx_lmac_mac_filter_setup(struct lmac *lmac)
{
union cgxx_cmrx_rx_dmac_ctl0 dmac_ctl0;
union cgxx_cmr_rx_dmacx_cam0 dmac_cam0;
u64 mac, tmp;
void *reg_addr;
memcpy((void *)&tmp, lmac->mac_addr, 6);
debug("%s: tmp %llx\n", __func__, tmp);
debug("%s: swab tmp %llx\n", __func__, swab64(tmp));
mac = swab64(tmp) >> 16;
debug("%s: mac %llx\n", __func__, mac);
dmac_cam0.u = 0x0;
dmac_cam0.s.id = lmac->lmac_id;
dmac_cam0.s.adr = mac;
dmac_cam0.s.en = 1;
reg_addr = lmac->cgx->reg_base +
CGXX_CMR_RX_DMACX_CAM0(lmac->lmac_id * 8);
writeq(dmac_cam0.u, reg_addr);
debug("%s: reg %p dmac_cam0 %llx\n", __func__, reg_addr, dmac_cam0.u);
dmac_ctl0.u = 0x0;
dmac_ctl0.s.bcst_accept = 1;
dmac_ctl0.s.mcst_mode = 0;
dmac_ctl0.s.cam_accept = 1;
reg_addr = lmac->cgx->reg_base +
CGXX_CMRX_RX_DMAC_CTL0(lmac->lmac_id);
writeq(dmac_ctl0.u, reg_addr);
debug("%s: reg %p dmac_ctl0 %llx\n", __func__, reg_addr, dmac_ctl0.u);
}
int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind)
{
cgx_write(lmac->cgx, lmac_id, CGXX_CMRX_RX_ID_MAP(0),
(pkind & 0x3f));
return 0;
}
int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status)
{
int ret = 0;
ret = cgx_intf_get_link_sts(lmac->cgx->cgx_id, lmac_id, status);
if (ret) {
debug("%s request failed for cgx%d lmac%d\n",
__func__, lmac->cgx->cgx_id, lmac->lmac_id);
ret = -1;
}
return ret;
}
int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable)
{
struct cgx *cgx = lmac->cgx;
union cgxx_cmrx_config cmrx_config;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cmrx_config.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
cmrx_config.s.data_pkt_rx_en =
cmrx_config.s.data_pkt_tx_en = enable ? 1 : 0;
cgx_write(cgx, lmac_id, CGXX_CMRX_CONFIG(0), cmrx_config.u);
return 0;
}
int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
u64 *status)
{
int ret = 0;
ret = cgx_intf_link_up_dwn(lmac->cgx->cgx_id, lmac_id, enable,
status);
if (ret) {
debug("%s request failed for cgx%d lmac%d\n",
__func__, lmac->cgx->cgx_id, lmac->lmac_id);
ret = -1;
}
return ret;
}
int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable)
{
struct cgx *cgx = lmac->cgx;
union cgxx_cmrx_config cmrx_cfg;
union cgxx_gmp_pcs_mrx_control mrx_control;
union cgxx_spux_control1 spux_control1;
enum lmac_type lmac_type;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cmrx_cfg.u = cgx_read(cgx, lmac_id, CGXX_CMRX_CONFIG(0));
lmac_type = cmrx_cfg.s.lmac_type;
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
mrx_control.u = cgx_read(cgx, lmac_id,
CGXX_GMP_PCS_MRX_CONTROL(0));
mrx_control.s.loopbck1 = enable ? 1 : 0;
cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CONTROL(0),
mrx_control.u);
} else {
spux_control1.u = cgx_read(cgx, lmac_id,
CGXX_SPUX_CONTROL1(0));
spux_control1.s.loopbck = enable ? 1 : 0;
cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1(0),
spux_control1.u);
}
return 0;
}
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
union cgxx_cmrx_config cmrx_cfg;
static int instance = 1;
int i;
cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMR_RX_LMACS());
debug("%s: Found %d lmacs for cgx %d@%p\n", __func__, cgx->lmac_count,
cgx->cgx_id, cgx->reg_base);
for (i = 0; i < cgx->lmac_count; i++) {
lmac = calloc(1, sizeof(*lmac));
if (!lmac)
return -ENOMEM;
lmac->instance = instance++;
snprintf(lmac->name, sizeof(lmac->name), "cgx_fwi_%d_%d",
cgx->cgx_id, i);
/* Get LMAC type */
cmrx_cfg.u = cgx_read(cgx, i, CGXX_CMRX_CONFIG(0));
lmac->lmac_type = cmrx_cfg.s.lmac_type;
lmac->lmac_id = i;
lmac->cgx = cgx;
cgx->lmac[i] = lmac;
debug("%s: map id %d to lmac %p (%s), type:%d instance %d\n",
__func__, i, lmac, lmac->name, lmac->lmac_type,
lmac->instance);
lmac->init_pend = 1;
printf("CGX%d LMAC%d [%s]\n", lmac->cgx->cgx_id,
lmac->lmac_id, lmac_type_to_str[lmac->lmac_type]);
octeontx2_board_get_mac_addr((lmac->instance - 1),
lmac->mac_addr);
debug("%s: MAC %pM\n", __func__, lmac->mac_addr);
cgx_lmac_mac_filter_setup(lmac);
}
return 0;
}
int cgx_probe(struct udevice *dev)
{
struct cgx *cgx = dev_get_priv(dev);
int err;
cgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
PCI_REGION_MEM);
cgx->dev = dev;
cgx->cgx_id = ((u64)(cgx->reg_base) >> 24) & 0x7;
debug("%s CGX BAR %p, id: %d\n", __func__, cgx->reg_base,
cgx->cgx_id);
debug("%s CGX %p, udev: %p\n", __func__, cgx, dev);
err = cgx_lmac_init(cgx);
return err;
}
int cgx_remove(struct udevice *dev)
{
struct cgx *cgx = dev_get_priv(dev);
int i;
debug("%s: cgx remove reg_base %p cgx_id %d",
__func__, cgx->reg_base, cgx->cgx_id);
for (i = 0; i < cgx->lmac_count; i++)
cgx_lmac_mac_filter_clear(cgx->lmac[i]);
return 0;
}
U_BOOT_DRIVER(cgx) = {
.name = "cgx",
.id = UCLASS_MISC,
.probe = cgx_probe,
.remove = cgx_remove,
.priv_auto_alloc_size = sizeof(struct cgx),
};
static struct pci_device_id cgx_supported[] = {
{PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_CGX) },
{}
};
U_BOOT_PCI_DEVICE(cgx, cgx_supported);

105
drivers/net/octeontx2/cgx.h Normal file
View File

@ -0,0 +1,105 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __CGX_H__
#define __CGX_H__
#include "cgx_intf.h"
#define PCI_DEVICE_ID_OCTEONTX2_CGX 0xA059
#define MAX_LMAC_PER_CGX 4
#define CGX_PER_NODE 3
enum lmac_type {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
LMAC_MODE_RXAUI = 2,
LMAC_MODE_10G_R = 3,
LMAC_MODE_40G_R = 4,
LMAC_MODE_QSGMII = 6,
LMAC_MODE_25G_R = 7,
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
};
extern char lmac_type_to_str[][8];
extern char lmac_speed_to_str[][8];
struct lmac_priv {
u8 enable:1;
u8 full_duplex:1;
u8 speed:4;
u8 mode:1;
u8 rsvd:1;
u8 mac_addr[6];
};
struct cgx;
struct nix;
struct nix_af;
struct lmac {
struct cgx *cgx;
struct nix *nix;
char name[16];
enum lmac_type lmac_type;
bool init_pend;
u8 instance;
u8 lmac_id;
u8 pknd;
u8 link_num;
u32 chan_num;
u8 mac_addr[6];
};
struct cgx {
struct nix_af *nix_af;
void __iomem *reg_base;
struct udevice *dev;
struct lmac *lmac[MAX_LMAC_PER_CGX];
u8 cgx_id;
u8 lmac_count;
};
static inline void cgx_write(struct cgx *cgx, u8 lmac, u64 offset, u64 val)
{
writeq(val, cgx->reg_base + CMR_SHIFT(lmac) + offset);
}
static inline u64 cgx_read(struct cgx *cgx, u8 lmac, u64 offset)
{
return readq(cgx->reg_base + CMR_SHIFT(lmac) + offset);
}
/**
* Given an LMAC/PF instance number, return the lmac
* Per design, each PF has only one LMAC mapped.
*
* @param instance instance to find
*
* @return pointer to lmac data structure or NULL if not found
*/
struct lmac *nix_get_cgx_lmac(int lmac_instance);
int cgx_lmac_set_pkind(struct lmac *lmac, u8 lmac_id, int pkind);
int cgx_lmac_internal_loopback(struct lmac *lmac, int lmac_id, bool enable);
int cgx_lmac_rx_tx_enable(struct lmac *lmac, int lmac_id, bool enable);
int cgx_lmac_link_enable(struct lmac *lmac, int lmac_id, bool enable,
u64 *status);
int cgx_lmac_link_status(struct lmac *lmac, int lmac_id, u64 *status);
void cgx_lmac_mac_filter_setup(struct lmac *lmac);
int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts);
int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts);
int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac);
int cgx_intf_set_macaddr(struct udevice *dev);
int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane);
int cgx_intf_display_eye(u8 qlm, u8 lane);
int cgx_intf_display_serdes(u8 qlm, u8 lane);
#endif /* __CGX_H__ */

View File

@ -0,0 +1,715 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <asm/arch/board.h>
#include <asm/io.h>
#include "cgx_intf.h"
#include "cgx.h"
#include "nix.h"
static u64 cgx_rd_scrx(u8 cgx, u8 lmac, u8 index)
{
u64 addr;
addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
return readq(addr);
}
static void cgx_wr_scrx(u8 cgx, u8 lmac, u8 index, u64 val)
{
u64 addr;
addr = (index == 1) ? CGX_CMR_SCRATCH1 : CGX_CMR_SCRATCH0;
addr += CGX_SHIFT(cgx) + CMR_SHIFT(lmac);
writeq(val, addr);
}
static u64 cgx_rd_scr0(u8 cgx, u8 lmac)
{
return cgx_rd_scrx(cgx, lmac, 0);
}
static u64 cgx_rd_scr1(u8 cgx, u8 lmac)
{
return cgx_rd_scrx(cgx, lmac, 1);
}
static void cgx_wr_scr0(u8 cgx, u8 lmac, u64 val)
{
return cgx_wr_scrx(cgx, lmac, 0, val);
}
static void cgx_wr_scr1(u8 cgx, u8 lmac, u64 val)
{
return cgx_wr_scrx(cgx, lmac, 1, val);
}
static inline void set_ownership(u8 cgx, u8 lmac, u8 val)
{
union cgx_scratchx1 scr1;
scr1.u = cgx_rd_scr1(cgx, lmac);
scr1.s.own_status = val;
cgx_wr_scr1(cgx, lmac, scr1.u);
}
static int wait_for_ownership(u8 cgx, u8 lmac)
{
union cgx_scratchx1 scr1;
union cgx_scratchx0 scr0;
u64 cmrx_int;
int timeout = 5000;
do {
scr1.u = cgx_rd_scr1(cgx, lmac);
scr0.u = cgx_rd_scr0(cgx, lmac);
/* clear async events if any */
if (scr0.s.evt_sts.evt_type == CGX_EVT_ASYNC &&
scr0.s.evt_sts.ack) {
/* clear interrupt */
cmrx_int = readq(CGX_CMR_INT +
CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
cmrx_int |= 0x2; // Overflw bit
writeq(cmrx_int, CGX_CMR_INT +
CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
/* clear ack */
scr0.s.evt_sts.ack = 0;
cgx_wr_scr0(cgx, lmac, scr0.u);
}
if (timeout-- < 0) {
debug("timeout waiting for ownership\n");
return -ETIMEDOUT;
}
mdelay(1);
} while ((scr1.s.own_status == CGX_OWN_FIRMWARE) &&
scr0.s.evt_sts.ack);
return 0;
}
int cgx_intf_req(u8 cgx, u8 lmac, union cgx_cmd_s cmd_args, u64 *rsp,
int use_cmd_id_only)
{
union cgx_scratchx1 scr1;
union cgx_scratchx0 scr0;
u64 cmrx_int;
int timeout = 500;
int err = 0;
u8 cmd = cmd_args.cmd.id;
if (wait_for_ownership(cgx, lmac)) {
err = -ETIMEDOUT;
goto error;
}
/* send command */
scr1.u = cgx_rd_scr1(cgx, lmac);
if (use_cmd_id_only) {
scr1.s.cmd.id = cmd;
} else {
cmd_args.own_status = scr1.s.own_status;
scr1.s = cmd_args;
}
cgx_wr_scr1(cgx, lmac, scr1.u);
set_ownership(cgx, lmac, CGX_OWN_FIRMWARE);
/* wait for response and ownership */
do {
scr0.u = cgx_rd_scr0(cgx, lmac);
scr1.u = cgx_rd_scr1(cgx, lmac);
mdelay(10);
} while (timeout-- && (!scr0.s.evt_sts.ack) &&
(scr1.s.own_status == CGX_OWN_FIRMWARE));
if (timeout < 0) {
debug("%s timeout waiting for ack\n", __func__);
err = -ETIMEDOUT;
goto error;
}
if (cmd == CGX_CMD_INTF_SHUTDOWN)
goto error;
if (scr0.s.evt_sts.evt_type != CGX_EVT_CMD_RESP) {
debug("%s received async event instead of cmd resp event\n",
__func__);
err = -1;
goto error;
}
if (scr0.s.evt_sts.id != cmd) {
debug("%s received resp for cmd %d expected cmd %d\n",
__func__, scr0.s.evt_sts.id, cmd);
err = -1;
goto error;
}
if (scr0.s.evt_sts.stat != CGX_STAT_SUCCESS) {
debug("%s cmd%d failed on cgx%u lmac%u with errcode %d\n",
__func__, cmd, cgx, lmac, scr0.s.link_sts.err_type);
err = -1;
}
error:
/* clear interrupt */
cmrx_int = readq(CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
cmrx_int |= 0x2; // Overflw bit
writeq(cmrx_int, CGX_CMR_INT + CGX_SHIFT(cgx) + CMR_SHIFT(lmac));
/* clear ownership and ack */
scr0.s.evt_sts.ack = 0;
cgx_wr_scr0(cgx, lmac, scr0.u);
*rsp = err ? 0 : scr0.u;
return err;
}
int cgx_intf_get_mac_addr(u8 cgx, u8 lmac, u8 *mac)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_MAC_ADDR;
ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
if (ret)
return -1;
scr0.u >>= 9;
memcpy(mac, &scr0.u, 6);
return 0;
}
int cgx_intf_get_ver(u8 cgx, u8 lmac, u8 *ver)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_FW_VER;
ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
if (ret)
return -1;
scr0.u >>= 9;
*ver = scr0.u & 0xFFFF;
return 0;
}
int cgx_intf_get_link_sts(u8 cgx, u8 lmac, u64 *lnk_sts)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_LINK_STS;
ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
if (ret)
return -1;
scr0.u >>= 9;
/* pass the same format as cgx_lnk_sts_s
* err_type:10, speed:4, full_duplex:1, link_up:1
*/
*lnk_sts = scr0.u & 0xFFFF;
return 0;
}
int cgx_intf_link_up_dwn(u8 cgx, u8 lmac, u8 up_dwn, u64 *lnk_sts)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = up_dwn ? CGX_CMD_LINK_BRING_UP : CGX_CMD_LINK_BRING_DOWN;
ret = cgx_intf_req(cgx, lmac, cmd, &scr0.u, 1);
if (ret)
return -1;
scr0.u >>= 9;
/* pass the same format as cgx_lnk_sts_s
* err_type:10, speed:4, full_duplex:1, link_up:1
*/
*lnk_sts = scr0.u & 0xFFFF;
return 0;
}
void cgx_intf_shutdown(void)
{
union cgx_scratchx0 scr0;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_INTF_SHUTDOWN;
cgx_intf_req(0, 0, cmd, &scr0.u, 1);
}
int cgx_intf_prbs(u8 qlm, u8 mode, u32 time, u8 lane)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_PRBS;
cmd.prbs_args.qlm = qlm;
cmd.prbs_args.mode = mode;
cmd.prbs_args.time = time;
cmd.prbs_args.lane = lane;
ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
if (ret)
return -1;
return 0;
}
enum cgx_mode {
MODE_10G_C2C,
MODE_10G_C2M,
MODE_10G_KR,
MODE_25G_C2C,
MODE_25G_2_C2C,
MODE_50G_C2C,
MODE_50G_4_C2C
};
static char intf_speed_to_str[][8] = {
"10M",
"100M",
"1G",
"2.5G",
"5G",
"10G",
"20G",
"25G",
"40G",
"50G",
"80G",
"100G",
};
static void mode_to_args(int mode, struct cgx_mode_change_args *args)
{
args->an = 0;
args->duplex = 0;
args->port = 0;
switch (mode) {
case MODE_10G_C2C:
args->speed = CGX_LINK_10G;
args->mode = BIT_ULL(CGX_MODE_10G_C2C_BIT);
break;
case MODE_10G_C2M:
args->speed = CGX_LINK_10G;
args->mode = BIT_ULL(CGX_MODE_10G_C2M_BIT);
break;
case MODE_10G_KR:
args->speed = CGX_LINK_10G;
args->mode = BIT_ULL(CGX_MODE_10G_KR_BIT);
args->an = 1;
break;
case MODE_25G_C2C:
args->speed = CGX_LINK_25G;
args->mode = BIT_ULL(CGX_MODE_25G_C2C_BIT);
break;
case MODE_25G_2_C2C:
args->speed = CGX_LINK_25G;
args->mode = BIT_ULL(CGX_MODE_25G_2_C2C_BIT);
break;
case MODE_50G_C2C:
args->speed = CGX_LINK_50G;
args->mode = BIT_ULL(CGX_MODE_50G_C2C_BIT);
break;
case MODE_50G_4_C2C:
args->speed = CGX_LINK_50G;
args->mode = BIT_ULL(CGX_MODE_50G_4_C2C_BIT);
}
}
int cgx_intf_set_mode(struct udevice *ethdev, int mode)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_MODE_CHANGE;
mode_to_args(mode, &cmd.mode_change_args);
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 0);
if (ret) {
printf("Mode change command failed for %s\n", ethdev->name);
return -1;
}
cmd.cmd.id = CGX_CMD_GET_LINK_STS;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 1);
if (ret) {
printf("Get Link Status failed for %s\n", ethdev->name);
return -1;
}
printf("Current Link Status: ");
if (scr0.s.link_sts.speed) {
printf("%s\n", intf_speed_to_str[scr0.s.link_sts.speed]);
switch (scr0.s.link_sts.fec) {
case 0:
printf("FEC_NONE\n");
break;
case 1:
printf("FEC_BASE_R\n");
break;
case 2:
printf("FEC_RS\n");
break;
}
printf("Auto Negotiation %sabled\n",
scr0.s.link_sts.an ? "En" : "Dis");
printf("%s Duplex\n",
scr0.s.link_sts.full_duplex ? "Full" : "Half");
} else {
printf("Down\n");
}
return 0;
}
int cgx_intf_get_mode(struct udevice *ethdev)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_LINK_STS;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 1);
if (ret) {
printf("Get link status failed for %s\n", ethdev->name);
return -1;
}
printf("Current Interface Mode: ");
switch (scr0.s.link_sts.mode) {
case CGX_MODE_10G_C2C_BIT:
printf("10G_C2C\n");
break;
case CGX_MODE_10G_C2M_BIT:
printf("10G_C2M\n");
break;
case CGX_MODE_10G_KR_BIT:
printf("10G_KR\n");
break;
case CGX_MODE_25G_C2C_BIT:
printf("25G_C2C\n");
break;
case CGX_MODE_25G_2_C2C_BIT:
printf("25G_2_C2C\n");
break;
case CGX_MODE_50G_C2C_BIT:
printf("50G_C2C\n");
break;
case CGX_MODE_50G_4_C2C_BIT:
printf("50G_4_C2C\n");
break;
default:
printf("Unknown\n");
break;
}
return 0;
}
int cgx_intf_get_fec(struct udevice *ethdev)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_SUPPORTED_FEC;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 1);
if (ret) {
printf("Get supported FEC failed for %s\n", ethdev->name);
return -1;
}
printf("Supported FEC type: ");
switch (scr0.s.supported_fec.fec) {
case 0:
printf("FEC_NONE\n");
break;
case 1:
printf("FEC_BASE_R\n");
break;
case 2:
printf("FEC_RS\n");
break;
case 3:
printf("FEC_BASE_R FEC_RS\n");
break;
}
cmd.cmd.id = CGX_CMD_GET_LINK_STS;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 1);
if (ret) {
printf("Get active fec failed for %s\n", ethdev->name);
return -1;
}
printf("Active FEC type: ");
switch (scr0.s.link_sts.fec) {
case 0:
printf("FEC_NONE\n");
break;
case 1:
printf("FEC_BASE_R\n");
break;
case 2:
printf("FEC_RS\n");
break;
}
return 0;
}
int cgx_intf_set_fec(struct udevice *ethdev, int type)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_SET_FEC;
cmd.fec_args.fec = type;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 0);
if (ret) {
printf("Set FEC type %d failed for %s\n", type, ethdev->name);
return -1;
}
return 0;
}
int cgx_intf_get_phy_mod_type(struct udevice *ethdev)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_GET_PHY_MOD_TYPE;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 1);
if (ret) {
printf("Get PHYMOD type failed for %s\n", ethdev->name);
return -1;
}
printf("Current phy mod type %s\n",
scr0.s.phy_mod_type.mod ? "PAM4" : "NRZ");
return 0;
}
int cgx_intf_set_phy_mod_type(struct udevice *ethdev, int type)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_SET_PHY_MOD_TYPE;
cmd.phy_mod_args.mod = type;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 0);
if (ret) {
printf("Set PHYMOD type %d failed for %s\n", type,
ethdev->name);
return -1;
}
return 0;
}
int cgx_intf_set_an_lbk(struct udevice *ethdev, int enable)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_AN_LOOPBACK;
cmd.cmd_args.enable = enable;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 0);
if (ret) {
printf("Set AN loopback command failed on %s\n", ethdev->name);
return -1;
}
printf("AN loopback %s for %s\n", enable ? "set" : "clear",
ethdev->name);
return 0;
}
int cgx_intf_get_ignore(struct udevice *ethdev, int cgx, int lmac)
{
struct rvu_pf *rvu;
struct nix *nix;
union cgx_scratchx0 scr0;
int ret, cgx_id = cgx, lmac_id = lmac;
union cgx_cmd_s cmd;
if (ethdev) {
rvu = dev_get_priv(ethdev);
nix = rvu->nix;
cgx_id = nix->lmac->cgx->cgx_id;
lmac_id = nix->lmac->lmac_id;
}
cmd.cmd.id = CGX_CMD_GET_PERSIST_IGNORE;
ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 1);
if (ret) {
if (ethdev)
printf("Get ignore command failed for %s\n",
ethdev->name);
else
printf("Get ignore command failed for CGX%d LMAC%d\n",
cgx_id, lmac_id);
return -1;
}
if (ethdev)
printf("Persist settings %signored for %s\n",
scr0.s.persist.ignore ? "" : "not ", ethdev->name);
else
printf("Persist settings %signored for CGX%d LMAC%d\n",
scr0.s.persist.ignore ? "" : "not ", cgx_id, lmac_id);
return 0;
}
int cgx_intf_set_ignore(struct udevice *ethdev, int cgx, int lmac, int ignore)
{
struct rvu_pf *rvu;
struct nix *nix;
union cgx_scratchx0 scr0;
int ret, cgx_id = cgx, lmac_id = lmac;
union cgx_cmd_s cmd;
if (ethdev) {
rvu = dev_get_priv(ethdev);
nix = rvu->nix;
cgx_id = nix->lmac->cgx->cgx_id;
lmac_id = nix->lmac->lmac_id;
}
cmd.cmd.id = CGX_CMD_SET_PERSIST_IGNORE;
cmd.persist_args.ignore = ignore;
ret = cgx_intf_req(cgx_id, lmac_id, cmd, &scr0.u, 0);
if (ret) {
if (ethdev)
printf("Set ignore command failed for %s\n",
ethdev->name);
else
printf("Set ignore command failed for CGX%d LMAC%d\n",
cgx_id, lmac_id);
return -1;
}
return 0;
}
int cgx_intf_set_macaddr(struct udevice *ethdev)
{
struct rvu_pf *rvu = dev_get_priv(ethdev);
struct nix *nix = rvu->nix;
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
u64 mac, tmp;
memcpy((void *)&tmp, nix->lmac->mac_addr, 6);
mac = swab64(tmp) >> 16;
cmd.cmd.id = CGX_CMD_SET_MAC_ADDR;
cmd.mac_args.addr = mac;
cmd.mac_args.pf_id = rvu->pfid;
ret = cgx_intf_req(nix->lmac->cgx->cgx_id, nix->lmac->lmac_id,
cmd, &scr0.u, 0);
if (ret) {
printf("Set user mac addr failed for %s\n", ethdev->name);
return -1;
}
return 0;
}
int cgx_intf_display_eye(u8 qlm, u8 lane)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_DISPLAY_EYE;
cmd.dsp_eye_args.qlm = qlm;
cmd.dsp_eye_args.lane = lane;
ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
if (ret)
return -1;
return 0;
}
int cgx_intf_display_serdes(u8 qlm, u8 lane)
{
union cgx_scratchx0 scr0;
int ret;
union cgx_cmd_s cmd;
cmd.cmd.id = CGX_CMD_DISPLAY_SERDES;
cmd.dsp_eye_args.qlm = qlm;
cmd.dsp_eye_args.lane = lane;
ret = cgx_intf_req(0, 0, cmd, &scr0.u, 0);
if (ret)
return -1;
return 0;
}

View File

@ -0,0 +1,448 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __CGX_INTF_H__
#define __CGX_INTF_H__
#define CGX_FIRMWARE_MAJOR_VER 1
#define CGX_FIRMWARE_MINOR_VER 0
/* Register offsets */
#define CGX_CMR_INT 0x87e0e0000040
#define CGX_CMR_SCRATCH0 0x87e0e0001050
#define CGX_CMR_SCRATCH1 0x87e0e0001058
#define CGX_SHIFT(x) (0x1000000 * ((x) & 0x3))
#define CMR_SHIFT(x) (0x40000 * ((x) & 0x3))
/* CGX error types. set for cmd response status as CGX_STAT_FAIL */
enum cgx_error_type {
CGX_ERR_NONE,
CGX_ERR_LMAC_NOT_ENABLED,
CGX_ERR_LMAC_MODE_INVALID,
CGX_ERR_REQUEST_ID_INVALID,
CGX_ERR_PREV_ACK_NOT_CLEAR,
CGX_ERR_PHY_LINK_DOWN,
CGX_ERR_PCS_RESET_FAIL,
CGX_ERR_AN_CPT_FAIL,
CGX_ERR_TX_NOT_IDLE,
CGX_ERR_RX_NOT_IDLE,
CGX_ERR_SPUX_BR_BLKLOCK_FAIL,
CGX_ERR_SPUX_RX_ALIGN_FAIL,
CGX_ERR_SPUX_TX_FAULT,
CGX_ERR_SPUX_RX_FAULT,
CGX_ERR_SPUX_RESET_FAIL,
CGX_ERR_SPUX_AN_RESET_FAIL,
CGX_ERR_SPUX_USX_AN_RESET_FAIL,
CGX_ERR_SMUX_RX_LINK_NOT_OK,
CGX_ERR_PCS_LINK_FAIL,
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
CGX_ERR_SPUX_MARKER_LOCK_FAIL,
CGX_ERR_SET_FEC_INVALID,
CGX_ERR_SET_FEC_FAIL,
CGX_ERR_MODULE_INVALID,
CGX_ERR_MODULE_NOT_PRESENT,
CGX_ERR_SPEED_CHANGE_INVALID, /* = 28 */
/* FIXME : add more error types when adding support for new modes */
};
/* LINK speed types */
enum cgx_link_speed {
CGX_LINK_NONE,
CGX_LINK_10M,
CGX_LINK_100M,
CGX_LINK_1G,
CGX_LINK_2HG, /* 2.5 Gbps */
CGX_LINK_5G,
CGX_LINK_10G,
CGX_LINK_20G,
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_MAX,
};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE = 0,
CGX_CMD_GET_FW_VER,
CGX_CMD_GET_MAC_ADDR,
CGX_CMD_SET_MTU,
CGX_CMD_GET_LINK_STS, /* optional to user */
CGX_CMD_LINK_BRING_UP, /* = 5 */
CGX_CMD_LINK_BRING_DOWN,
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
CGX_CMD_LINK_STAT_CHANGE, /* = 10 */
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_SIZE,
CGX_CMD_GET_MKEX_PROFILE,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
CGX_CMD_SET_LINK_MODE,
CGX_CMD_GET_SUPPORTED_FEC,
CGX_CMD_SET_FEC,
CGX_CMD_GET_AN, /* = 20 */
CGX_CMD_SET_AN,
CGX_CMD_GET_ADV_LINK_MODES,
CGX_CMD_GET_ADV_FEC,
CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
CGX_CMD_SET_PHY_MOD_TYPE, /* = 25 */
CGX_CMD_PRBS,
CGX_CMD_DISPLAY_EYE,
CGX_CMD_GET_PHY_FEC_STATS,
CGX_CMD_DISPLAY_SERDES,
CGX_CMD_AN_LOOPBACK, /* = 30 */
CGX_CMD_GET_PERSIST_IGNORE,
CGX_CMD_SET_PERSIST_IGNORE,
CGX_CMD_SET_MAC_ADDR,
};
/* async event ids */
enum cgx_evt_id {
CGX_EVT_NONE,
CGX_EVT_LINK_CHANGE,
};
/* event types - cause of interrupt */
enum cgx_evt_type {
CGX_EVT_ASYNC,
CGX_EVT_CMD_RESP
};
enum cgx_stat {
CGX_STAT_SUCCESS,
CGX_STAT_FAIL
};
enum cgx_cmd_own {
/* default ownership with kernel/uefi/u-boot */
CGX_OWN_NON_SECURE_SW,
/* set by kernel/uefi/u-boot after posting a new request to ATF */
CGX_OWN_FIRMWARE,
};
/* Supported LINK MODE enums
* Each link mode is a bit mask of these
* enums which are represented as bits
*/
enum cgx_mode_t {
CGX_MODE_SGMII_BIT = 0,
CGX_MODE_1000_BASEX_BIT,
CGX_MODE_QSGMII_BIT,
CGX_MODE_10G_C2C_BIT,
CGX_MODE_10G_C2M_BIT,
CGX_MODE_10G_KR_BIT,
CGX_MODE_20G_C2C_BIT,
CGX_MODE_25G_C2C_BIT,
CGX_MODE_25G_C2M_BIT,
CGX_MODE_25G_2_C2C_BIT,
CGX_MODE_25G_CR_BIT,
CGX_MODE_25G_KR_BIT,
CGX_MODE_40G_C2C_BIT,
CGX_MODE_40G_C2M_BIT,
CGX_MODE_40G_CR4_BIT,
CGX_MODE_40G_KR4_BIT,
CGX_MODE_40GAUI_C2C_BIT,
CGX_MODE_50G_C2C_BIT,
CGX_MODE_50G_C2M_BIT,
CGX_MODE_50G_4_C2C_BIT,
CGX_MODE_50G_CR_BIT,
CGX_MODE_50G_KR_BIT,
CGX_MODE_80GAUI_C2C_BIT,
CGX_MODE_100G_C2C_BIT,
CGX_MODE_100G_C2M_BIT,
CGX_MODE_100G_CR4_BIT,
CGX_MODE_100G_KR4_BIT,
CGX_MODE_MAX_BIT /* = 29 */
};
/* scratchx(0) CSR used for ATF->non-secure SW communication.
* This acts as the status register
* Provides details on command ack/status, link status, error details
*/
/* CAUTION : below structures are placed in order based on the bit positions
* For any updates/new bitfields, corresponding structures needs to be updated
*/
struct cgx_evt_sts_s { /* start from bit 0 */
u64 ack:1;
u64 evt_type:1; /* cgx_evt_type */
u64 stat:1; /* cgx_stat */
u64 id:6; /* cgx_evt_id/cgx_cmd_id */
u64 reserved:55;
};
/* all the below structures are in the same memory location of SCRATCHX(0)
* value can be read/written based on command ID
*/
/* Resp to command IDs with command status as CGX_STAT_FAIL
* Not applicable for commands :
* CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE
* check struct cgx_lnk_sts_s comments
*/
struct cgx_err_sts_s { /* start from bit 9 */
u64 reserved1:9;
u64 type:10; /* cgx_error_type */
u64 reserved2:35;
};
/* Resp to cmd ID as CGX_CMD_GET_FW_VER with cmd status as CGX_STAT_SUCCESS */
struct cgx_ver_s { /* start from bit 9 */
u64 reserved1:9;
u64 major_ver:4;
u64 minor_ver:4;
u64 reserved2:47;
};
/* Resp to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as CGX_STAT_SUCCESS
* Returns each byte of MAC address in a separate bit field
*/
struct cgx_mac_addr_s { /* start from bit 9 */
u64 reserved1:9;
u64 addr_0:8;
u64 addr_1:8;
u64 addr_2:8;
u64 addr_3:8;
u64 addr_4:8;
u64 addr_5:8;
u64 reserved2:7;
};
/* Resp to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
* In case of CGX_STAT_FAIL, it indicates CGX configuration failed when
* processing link up/down/change command. Both err_type and current link status
* will be updated
* In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current
* link status will be updated
*/
struct cgx_lnk_sts_s {
u64 reserved1:9;
u64 link_up:1;
u64 full_duplex:1;
u64 speed:4; /* cgx_link_speed */
u64 err_type:10;
u64 an:1; /* Current AN state : enabled/disabled */
u64 fec:2; /* Current FEC type if enabled, if not 0 */
u64 port:8; /* Share the current port info if required */
u64 mode:8; /* cgx_mode_t enum integer value */
u64 reserved2:20;
};
struct sh_fwd_base_s {
u64 reserved1:9;
u64 addr:55;
};
struct cgx_link_modes_s {
u64 reserved1:9;
u64 modes:55;
};
/* Resp to cmd ID - CGX_CMD_GET_ADV_FEC/CGX_CMD_GET_SUPPORTED_FEC
* fec : 2 bits
* typedef enum cgx_fec_type {
* CGX_FEC_NONE,
* CGX_FEC_BASE_R,
* CGX_FEC_RS
* } fec_type_t;
*/
struct cgx_fec_types_s {
u64 reserved1:9;
u64 fec:2;
u64 reserved2:53;
};
/* Resp to cmd ID - CGX_CMD_GET_AN */
struct cgx_get_an_s {
u64 reserved1:9;
u64 an:1;
u64 reserved2:54;
};
/* Resp to cmd ID - CGX_CMD_GET_PHY_MOD_TYPE */
struct cgx_get_phy_mod_type_s {
u64 reserved1:9;
u64 mod:1; /* 0=NRZ, 1=PAM4 */
u64 reserved2:54;
};
/* Resp to cmd ID - CGX_CMD_GET_PERSIST_IGNORE */
struct cgx_get_flash_ignore_s {
uint64_t reserved1:9;
uint64_t ignore:1;
uint64_t reserved2:54;
};
union cgx_rsp_sts {
/* Fixed, applicable for all commands/events */
struct cgx_evt_sts_s evt_sts;
/* response to CGX_CMD_LINK_BRINGUP/DOWN/LINK_CHANGE */
struct cgx_lnk_sts_s link_sts;
/* response to CGX_CMD_GET_FW_VER */
struct cgx_ver_s ver;
/* response to CGX_CMD_GET_MAC_ADDR */
struct cgx_mac_addr_s mac_s;
/* response to CGX_CMD_GET_FWD_BASE */
struct sh_fwd_base_s fwd_base_s;
/* response if evt_status = CMD_FAIL */
struct cgx_err_sts_s err;
/* response to CGX_CMD_GET_SUPPORTED_FEC */
struct cgx_fec_types_s supported_fec;
/* response to CGX_CMD_GET_LINK_MODES */
struct cgx_link_modes_s supported_modes;
/* response to CGX_CMD_GET_ADV_LINK_MODES */
struct cgx_link_modes_s adv_modes;
/* response to CGX_CMD_GET_ADV_FEC */
struct cgx_fec_types_s adv_fec;
/* response to CGX_CMD_GET_AN */
struct cgx_get_an_s an;
/* response to CGX_CMD_GET_PHY_MOD_TYPE */
struct cgx_get_phy_mod_type_s phy_mod_type;
/* response to CGX_CMD_GET_PERSIST_IGNORE */
struct cgx_get_flash_ignore_s persist;
#ifdef NT_FW_CONFIG
/* response to CGX_CMD_GET_MKEX_SIZE */
struct cgx_mcam_profile_sz_s prfl_sz;
/* response to CGX_CMD_GET_MKEX_PROFILE */
struct cgx_mcam_profile_addr_s prfl_addr;
#endif
};
union cgx_scratchx0 {
u64 u;
union cgx_rsp_sts s;
};
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
*/
struct cgx_cmd { /* start from bit 2 */
u64 reserved1:2;
u64 id:6; /* cgx_request_id */
u64 reserved2:56;
};
/* all the below structures are in the same memory location of SCRATCHX(1)
* corresponding arguments for command Id needs to be updated
*/
/* Any command using enable/disable as an argument need
* to pass the option via this structure.
* Ex: Loopback, HiGig...
*/
struct cgx_ctl_args { /* start from bit 8 */
u64 reserved1:8;
u64 enable:1;
u64 reserved2:55;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */
struct cgx_mtu_args {
u64 reserved1:8;
u64 size:16;
u64 reserved2:40;
};
/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
struct cgx_mode_change_args {
uint64_t reserved1:8;
uint64_t speed:4; /* cgx_link_speed enum */
uint64_t duplex:1; /* 0 - full duplex, 1 - half duplex */
uint64_t an:1; /* 0 - disable AN, 1 - enable AN */
uint64_t port:8; /* device port */
uint64_t mode:42;
};
/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */
struct cgx_link_change_args { /* start from bit 8 */
u64 reserved1:8;
u64 link_up:1;
u64 full_duplex:1;
u64 speed:4; /* cgx_link_speed */
u64 reserved2:50;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_LINK_MODE */
struct cgx_set_mode_args {
u64 reserved1:8;
u64 mode:56;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_FEC */
struct cgx_set_fec_args {
u64 reserved1:8;
u64 fec:2;
u64 reserved2:54;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_PHY_MOD_TYPE */
struct cgx_set_phy_mod_args {
u64 reserved1:8;
u64 mod:1; /* 0=NRZ, 1=PAM4 */
u64 reserved2:55;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_PERSIST_IGNORE */
struct cgx_set_flash_ignore_args {
uint64_t reserved1:8;
uint64_t ignore:1;
uint64_t reserved2:55;
};
/* command argument to be passed for cmd ID - CGX_CMD_SET_MAC_ADDR */
struct cgx_mac_addr_args {
uint64_t reserved1:8;
uint64_t addr:48;
uint64_t pf_id:8;
};
struct cgx_prbs_args {
u64 reserved1:8; /* start from bit 8 */
u64 lane:8;
u64 qlm:8;
u64 stop_on_error:1;
u64 mode:8;
u64 time:31;
};
struct cgx_display_eye_args {
u64 reserved1:8; /* start from bit 8 */
u64 qlm:8;
u64 lane:47;
};
union cgx_cmd_s {
u64 own_status:2; /* cgx_cmd_own */
struct cgx_cmd cmd;
struct cgx_ctl_args cmd_args;
struct cgx_mtu_args mtu_size;
struct cgx_link_change_args lnk_args; /* Input to CGX_CMD_LINK_CHANGE */
struct cgx_set_mode_args mode_args;
struct cgx_mode_change_args mode_change_args;
struct cgx_set_fec_args fec_args;
struct cgx_set_phy_mod_args phy_mod_args;
struct cgx_set_flash_ignore_args persist_args;
struct cgx_mac_addr_args mac_args;
/* any other arg for command id * like : mtu, dmac filtering control */
struct cgx_prbs_args prbs_args;
struct cgx_display_eye_args dsp_eye_args;
};
union cgx_scratchx1 {
u64 u;
union cgx_cmd_s s;
};
#endif /* __CGX_INTF_H__ */

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
/**
* Atomically adds a signed value to a 64 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline s64 atomic_fetch_and_add64_nosync(s64 *ptr, s64 incr)
{
s64 result;
/* Atomic add with no ordering */
asm volatile("ldadd %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
: "memory");
return result;
}
static inline void lmt_cancel(const struct nix *nix)
{
writeq(0, nix->lmt_base + LMT_LF_LMTCANCEL());
}
static inline u64 *lmt_store_ptr(struct nix *nix)
{
return (u64 *)((u8 *)(nix->lmt_base) +
LMT_LF_LMTLINEX(0));
}
static inline s64 lmt_submit(u64 io_address)
{
s64 result = 0;
asm volatile("ldeor xzr, %x[rf],[%[rs]]"
: [rf] "=r"(result) : [rs] "r"(io_address));
return result;
}

831
drivers/net/octeontx2/nix.c Normal file
View File

@ -0,0 +1,831 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <log.h>
#include <malloc.h>
#include <memalign.h>
#include <misc.h>
#include <net.h>
#include <pci.h>
#include <watchdog.h>
#include <asm/arch/board.h>
#include <asm/arch/csrs/csrs-lmt.h>
#include <asm/io.h>
#include <asm/types.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/types.h>
#include "nix.h"
#include "lmt.h"
#include "cgx.h"
/**
* NIX needs a lot of memory areas. Rather than handle all the failure cases,
* we'll use a wrapper around alloc that prints an error if a memory
* allocation fails.
*
* @param num_elements
* Number of elements to allocate
* @param elem_size Size of each element
* @param msg Text string to show when allocation fails
*
* @return A valid memory location or NULL on failure
*/
static void *nix_memalloc(int num_elements, size_t elem_size, const char *msg)
{
size_t alloc_size = num_elements * elem_size;
void *base = memalign(CONFIG_SYS_CACHELINE_SIZE, alloc_size);
if (!base)
printf("NIX: Mem alloc failed for %s (%d * %zu = %zu bytes)\n",
msg ? msg : __func__, num_elements, elem_size,
alloc_size);
else
memset(base, 0, alloc_size);
debug("NIX: Memory alloc for %s (%d * %zu = %zu bytes) at %p\n",
msg ? msg : __func__, num_elements, elem_size, alloc_size, base);
return base;
}
int npc_lf_setup(struct nix *nix)
{
int err;
err = npc_lf_admin_setup(nix);
if (err) {
printf("%s: Error setting up npc lf admin\n", __func__);
return err;
}
return 0;
}
static int npa_setup_pool(struct npa *npa, u32 pool_id,
size_t buffer_size, u32 queue_length, void *buffers[])
{
struct {
union npa_lf_aura_op_free0 f0;
union npa_lf_aura_op_free1 f1;
} aura_descr;
int index;
for (index = 0; index < queue_length; index++) {
buffers[index] = memalign(CONFIG_SYS_CACHELINE_SIZE,
buffer_size);
if (!buffers[index]) {
printf("%s: Out of memory %d, size: %zu\n",
__func__, index, buffer_size);
return -ENOMEM;
}
debug("%s: allocating buffer %d, addr %p size: %zu\n",
__func__, index, buffers[index], buffer_size);
/* Add the newly obtained pointer to the pool. 128 bit
* writes only.
*/
aura_descr.f0.s.addr = (u64)buffers[index];
aura_descr.f1.u = 0;
aura_descr.f1.s.aura = pool_id;
st128(npa->npa_base + NPA_LF_AURA_OP_FREE0(),
aura_descr.f0.u, aura_descr.f1.u);
}
return 0;
}
int npa_lf_setup(struct nix *nix)
{
struct rvu_pf *rvu = dev_get_priv(nix->dev);
struct nix_af *nix_af = nix->nix_af;
struct npa *npa;
union npa_af_const npa_af_const;
union npa_aura_s *aura;
union npa_pool_s *pool;
union rvu_func_addr_s block_addr;
int idx;
int stack_page_pointers;
int stack_page_bytes;
int err;
npa = (struct npa *)calloc(1, sizeof(struct npa));
if (!npa) {
printf("%s: out of memory for npa instance\n", __func__);
return -ENOMEM;
}
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
npa->npa_base = rvu->pf_base + block_addr.u;
npa->npa_af = nix_af->npa_af;
nix->npa = npa;
npa_af_const.u = npa_af_reg_read(npa->npa_af, NPA_AF_CONST());
stack_page_pointers = npa_af_const.s.stack_page_ptrs;
stack_page_bytes = npa_af_const.s.stack_page_bytes;
npa->stack_pages[NPA_POOL_RX] = (RQ_QLEN + stack_page_pointers - 1) /
stack_page_pointers;
npa->stack_pages[NPA_POOL_TX] = (SQ_QLEN + stack_page_pointers - 1) /
stack_page_pointers;
npa->stack_pages[NPA_POOL_SQB] = (SQB_QLEN + stack_page_pointers - 1) /
stack_page_pointers;
npa->pool_stack_pointers = stack_page_pointers;
npa->q_len[NPA_POOL_RX] = RQ_QLEN;
npa->q_len[NPA_POOL_TX] = SQ_QLEN;
npa->q_len[NPA_POOL_SQB] = SQB_QLEN;
npa->buf_size[NPA_POOL_RX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
npa->buf_size[NPA_POOL_TX] = MAX_MTU + CONFIG_SYS_CACHELINE_SIZE;
npa->buf_size[NPA_POOL_SQB] = nix_af->sqb_size;
npa->aura_ctx = nix_memalloc(NPA_POOL_COUNT,
sizeof(union npa_aura_s),
"aura context");
if (!npa->aura_ctx) {
printf("%s: Out of memory for aura context\n", __func__);
return -ENOMEM;
}
for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
npa->pool_ctx[idx] = nix_memalloc(1,
sizeof(union npa_pool_s),
"pool context");
if (!npa->pool_ctx[idx]) {
printf("%s: Out of memory for pool context\n",
__func__);
return -ENOMEM;
}
npa->pool_stack[idx] = nix_memalloc(npa->stack_pages[idx],
stack_page_bytes,
"pool stack");
if (!npa->pool_stack[idx]) {
printf("%s: Out of memory for pool stack\n", __func__);
return -ENOMEM;
}
}
err = npa_lf_admin_setup(npa, nix->lf, (dma_addr_t)npa->aura_ctx);
if (err) {
printf("%s: Error setting up NPA LF admin for lf %d\n",
__func__, nix->lf);
return err;
}
/* Set up the auras */
for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
aura = npa->aura_ctx + (idx * sizeof(union npa_aura_s));
pool = npa->pool_ctx[idx];
debug("%s aura %p pool %p\n", __func__, aura, pool);
memset(aura, 0, sizeof(union npa_aura_s));
aura->s.fc_ena = 0;
aura->s.pool_addr = (u64)npa->pool_ctx[idx];
debug("%s aura.s.pool_addr %llx pool_addr %p\n", __func__,
aura->s.pool_addr, npa->pool_ctx[idx]);
aura->s.shift = 64 - __builtin_clzll(npa->q_len[idx]) - 8;
aura->s.count = npa->q_len[idx];
aura->s.limit = npa->q_len[idx];
aura->s.ena = 1;
err = npa_attach_aura(nix_af, nix->lf, aura, idx);
if (err)
return err;
memset(pool, 0, sizeof(*pool));
pool->s.fc_ena = 0;
pool->s.nat_align = 1;
pool->s.stack_base = (u64)(npa->pool_stack[idx]);
debug("%s pool.s.stack_base %llx stack_base %p\n", __func__,
pool->s.stack_base, npa->pool_stack[idx]);
pool->s.buf_size =
npa->buf_size[idx] / CONFIG_SYS_CACHELINE_SIZE;
pool->s.stack_max_pages = npa->stack_pages[idx];
pool->s.shift =
64 - __builtin_clzll(npa->pool_stack_pointers) - 8;
pool->s.ptr_start = 0;
pool->s.ptr_end = (1ULL << 40) - 1;
pool->s.ena = 1;
err = npa_attach_pool(nix_af, nix->lf, pool, idx);
if (err)
return err;
}
for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
npa->buffers[idx] = nix_memalloc(npa->q_len[idx],
sizeof(void *),
"buffers");
if (!npa->buffers[idx]) {
printf("%s: Out of memory\n", __func__);
return -ENOMEM;
}
}
for (idx = 0; idx < NPA_POOL_COUNT; idx++) {
err = npa_setup_pool(npa, idx, npa->buf_size[idx],
npa->q_len[idx], npa->buffers[idx]);
if (err) {
printf("%s: Error setting up pool %d\n",
__func__, idx);
return err;
}
}
return 0;
}
int npa_lf_shutdown(struct nix *nix)
{
struct npa *npa = nix->npa;
int err;
int pool;
err = npa_lf_admin_shutdown(nix->nix_af, nix->lf, NPA_POOL_COUNT);
if (err) {
printf("%s: Error %d shutting down NPA LF admin\n",
__func__, err);
return err;
}
free(npa->aura_ctx);
npa->aura_ctx = NULL;
for (pool = 0; pool < NPA_POOL_COUNT; pool++) {
free(npa->pool_ctx[pool]);
npa->pool_ctx[pool] = NULL;
free(npa->pool_stack[pool]);
npa->pool_stack[pool] = NULL;
free(npa->buffers[pool]);
npa->buffers[pool] = NULL;
}
return 0;
}
int nix_lf_setup(struct nix *nix)
{
struct nix_af *nix_af = nix->nix_af;
int idx;
int err = -1;
/* Alloc NIX RQ HW context memory */
nix->rq_ctx_base = nix_memalloc(nix->rq_cnt, nix_af->rq_ctx_sz,
"RQ CTX");
if (!nix->rq_ctx_base)
goto error;
memset(nix->rq_ctx_base, 0, nix_af->rq_ctx_sz);
/* Alloc NIX SQ HW context memory */
nix->sq_ctx_base = nix_memalloc(nix->sq_cnt, nix_af->sq_ctx_sz,
"SQ CTX");
if (!nix->sq_ctx_base)
goto error;
memset(nix->sq_ctx_base, 0, nix_af->sq_ctx_sz);
/* Alloc NIX CQ HW context memory */
nix->cq_ctx_base = nix_memalloc(nix->cq_cnt, nix_af->cq_ctx_sz,
"CQ CTX");
if (!nix->cq_ctx_base)
goto error;
memset(nix->cq_ctx_base, 0, nix_af->cq_ctx_sz * NIX_CQ_COUNT);
/* Alloc NIX CQ Ring memory */
for (idx = 0; idx < NIX_CQ_COUNT; idx++) {
err = qmem_alloc(&nix->cq[idx], CQ_ENTRIES, CQ_ENTRY_SIZE);
if (err)
goto error;
}
/* Alloc memory for Qints HW contexts */
nix->qint_base = nix_memalloc(nix_af->qints, nix_af->qint_ctx_sz,
"Qint CTX");
if (!nix->qint_base)
goto error;
/* Alloc memory for CQints HW contexts */
nix->cint_base = nix_memalloc(nix_af->cints, nix_af->cint_ctx_sz,
"Cint CTX");
if (!nix->cint_base)
goto error;
/* Alloc NIX RSS HW context memory and config the base */
nix->rss_base = nix_memalloc(nix->rss_grps, nix_af->rsse_ctx_sz,
"RSS CTX");
if (!nix->rss_base)
goto error;
err = nix_lf_admin_setup(nix);
if (err) {
printf("%s: Error setting up LF\n", __func__);
goto error;
}
return 0;
error:
if (nix->rq_ctx_base)
free(nix->rq_ctx_base);
nix->rq_ctx_base = NULL;
if (nix->rq_ctx_base)
free(nix->rq_ctx_base);
nix->rq_ctx_base = NULL;
if (nix->sq_ctx_base)
free(nix->sq_ctx_base);
nix->sq_ctx_base = NULL;
if (nix->cq_ctx_base)
free(nix->cq_ctx_base);
nix->cq_ctx_base = NULL;
for (idx = 0; idx < NIX_CQ_COUNT; idx++)
qmem_free(&nix->cq[idx]);
return err;
}
int nix_lf_shutdown(struct nix *nix)
{
struct nix_af *nix_af = nix->nix_af;
int index;
int err;
err = nix_lf_admin_shutdown(nix_af, nix->lf, nix->cq_cnt,
nix->rq_cnt, nix->sq_cnt);
if (err) {
printf("%s: Error shutting down LF admin\n", __func__);
return err;
}
if (nix->rq_ctx_base)
free(nix->rq_ctx_base);
nix->rq_ctx_base = NULL;
if (nix->rq_ctx_base)
free(nix->rq_ctx_base);
nix->rq_ctx_base = NULL;
if (nix->sq_ctx_base)
free(nix->sq_ctx_base);
nix->sq_ctx_base = NULL;
if (nix->cq_ctx_base)
free(nix->cq_ctx_base);
nix->cq_ctx_base = NULL;
for (index = 0; index < NIX_CQ_COUNT; index++)
qmem_free(&nix->cq[index]);
debug("%s: nix lf %d reset --\n", __func__, nix->lf);
return 0;
}
struct nix *nix_lf_alloc(struct udevice *dev)
{
union rvu_func_addr_s block_addr;
struct nix *nix;
struct rvu_pf *rvu = dev_get_priv(dev);
struct rvu_af *rvu_af = dev_get_priv(rvu->afdev);
union rvu_pf_func_s pf_func;
int err;
debug("%s(%s )\n", __func__, dev->name);
nix = (struct nix *)calloc(1, sizeof(*nix));
if (!nix) {
printf("%s: Out of memory for nix instance\n", __func__);
return NULL;
}
nix->nix_af = rvu_af->nix_af;
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
nix->nix_base = rvu->pf_base + block_addr.u;
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
nix->npc_base = rvu->pf_base + block_addr.u;
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_LMT;
nix->lmt_base = rvu->pf_base + block_addr.u;
pf_func.u = 0;
pf_func.s.pf = rvu->pfid;
nix->pf_func = pf_func.u;
nix->lf = rvu->nix_lfid;
nix->pf = rvu->pfid;
nix->dev = dev;
nix->sq_cnt = 1;
nix->rq_cnt = 1;
nix->rss_grps = 1;
nix->cq_cnt = 2;
nix->xqe_sz = NIX_CQE_SIZE_W16;
nix->lmac = nix_get_cgx_lmac(nix->pf);
if (!nix->lmac) {
printf("%s: Error: could not find lmac for pf %d\n",
__func__, nix->pf);
free(nix);
return NULL;
}
nix->lmac->link_num =
NIX_LINK_E_CGXX_LMACX(nix->lmac->cgx->cgx_id,
nix->lmac->lmac_id);
nix->lmac->chan_num =
NIX_CHAN_E_CGXX_LMACX_CHX(nix->lmac->cgx->cgx_id,
nix->lmac->lmac_id, 0);
/* This is rx pkind in 1:1 mapping to NIX_LINK_E */
nix->lmac->pknd = nix->lmac->link_num;
cgx_lmac_set_pkind(nix->lmac, nix->lmac->lmac_id, nix->lmac->pknd);
debug("%s(%s CGX%x LMAC%x)\n", __func__, dev->name,
nix->lmac->cgx->cgx_id, nix->lmac->lmac_id);
debug("%s(%s Link %x Chan %x Pknd %x)\n", __func__, dev->name,
nix->lmac->link_num, nix->lmac->chan_num, nix->lmac->pknd);
err = npa_lf_setup(nix);
if (err)
return NULL;
err = npc_lf_setup(nix);
if (err)
return NULL;
err = nix_lf_setup(nix);
if (err)
return NULL;
return nix;
}
u64 npa_aura_op_alloc(struct npa *npa, u64 aura_id)
{
union npa_lf_aura_op_allocx op_allocx;
op_allocx.u = atomic_fetch_and_add64_nosync(npa->npa_base +
NPA_LF_AURA_OP_ALLOCX(0), aura_id);
return op_allocx.s.addr;
}
u64 nix_cq_op_status(struct nix *nix, u64 cq_id)
{
union nixx_lf_cq_op_status op_status;
s64 *reg = nix->nix_base + NIXX_LF_CQ_OP_STATUS();
op_status.u = atomic_fetch_and_add64_nosync(reg, cq_id << 32);
return op_status.u;
}
/* TX */
static inline void nix_write_lmt(struct nix *nix, void *buffer,
int num_words)
{
int i;
u64 *lmt_ptr = lmt_store_ptr(nix);
u64 *ptr = buffer;
debug("%s lmt_ptr %p %p\n", __func__, nix->lmt_base, lmt_ptr);
for (i = 0; i < num_words; i++) {
debug("%s data %llx lmt_ptr %p\n", __func__, ptr[i],
lmt_ptr + i);
lmt_ptr[i] = ptr[i];
}
}
void nix_cqe_tx_pkt_handler(struct nix *nix, void *cqe)
{
union nix_cqe_hdr_s *txcqe = (union nix_cqe_hdr_s *)cqe;
debug("%s: txcqe: %p\n", __func__, txcqe);
if (txcqe->s.cqe_type != NIX_XQE_TYPE_E_SEND) {
printf("%s: Error: Unsupported CQ header type %d\n",
__func__, txcqe->s.cqe_type);
return;
}
nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
(NIX_CQ_TX << 32) | 1);
}
void nix_lf_flush_tx(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
union nixx_lf_cq_op_status op_status;
u32 head, tail;
void *cq_tx_base = nix->cq[NIX_CQ_TX].base;
union nix_cqe_hdr_s *cqe;
/* ack tx cqe entries */
op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
head = op_status.s.head;
tail = op_status.s.tail;
head &= (nix->cq[NIX_CQ_TX].qsize - 1);
tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
debug("%s cq tx head %d tail %d\n", __func__, head, tail);
while (head != tail) {
cqe = cq_tx_base + head * nix->cq[NIX_CQ_TX].entry_sz;
nix_cqe_tx_pkt_handler(nix, cqe);
op_status.u = nix_cq_op_status(nix, NIX_CQ_TX);
head = op_status.s.head;
tail = op_status.s.tail;
head &= (nix->cq[NIX_CQ_TX].qsize - 1);
tail &= (nix->cq[NIX_CQ_TX].qsize - 1);
debug("%s cq tx head %d tail %d\n", __func__, head, tail);
}
}
int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
struct nix_tx_dr tx_dr;
int dr_sz = (sizeof(struct nix_tx_dr) + 15) / 16 - 1;
s64 result;
void *packet;
nix_lf_flush_tx(dev);
memset((void *)&tx_dr, 0, sizeof(struct nix_tx_dr));
/* Dump TX packet in to NPA buffer */
packet = (void *)npa_aura_op_alloc(nix->npa, NPA_POOL_TX);
if (!packet) {
printf("%s TX buffers unavailable\n", __func__);
return -1;
}
memcpy(packet, pkt, pkt_len);
debug("%s TX buffer %p\n", __func__, packet);
tx_dr.hdr.s.aura = NPA_POOL_TX;
tx_dr.hdr.s.df = 0;
tx_dr.hdr.s.pnc = 1;
tx_dr.hdr.s.sq = 0;
tx_dr.hdr.s.total = pkt_len;
tx_dr.hdr.s.sizem1 = dr_sz - 2; /* FIXME - for now hdr+sg+sg1addr */
debug("%s dr_sz %d\n", __func__, dr_sz);
tx_dr.tx_sg.s.segs = 1;
tx_dr.tx_sg.s.subdc = NIX_SUBDC_E_SG;
tx_dr.tx_sg.s.seg1_size = pkt_len;
tx_dr.tx_sg.s.ld_type = NIX_SENDLDTYPE_E_LDT;
tx_dr.sg1_addr = (dma_addr_t)packet;
#define DEBUG_PKT
#ifdef DEBUG_PKT
debug("TX PKT Data\n");
for (int i = 0; i < pkt_len; i++) {
if (i && (i % 8 == 0))
debug("\n");
debug("%02x ", *((u8 *)pkt + i));
}
debug("\n");
#endif
do {
nix_write_lmt(nix, &tx_dr, (dr_sz - 1) * 2);
__iowmb();
result = lmt_submit((u64)(nix->nix_base +
NIXX_LF_OP_SENDX(0)));
WATCHDOG_RESET();
} while (result == 0);
return 0;
}
/* RX */
void nix_lf_flush_rx(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
union nixx_lf_cq_op_status op_status;
void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
struct nix_rx_dr *rx_dr;
union nix_rx_parse_s *rxparse;
u32 head, tail;
u32 rx_cqe_sz = nix->cq[NIX_CQ_RX].entry_sz;
u64 *seg;
/* flush rx cqe entries */
op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
head = op_status.s.head;
tail = op_status.s.tail;
head &= (nix->cq[NIX_CQ_RX].qsize - 1);
tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
debug("%s cq rx head %d tail %d\n", __func__, head, tail);
while (head != tail) {
rx_dr = (struct nix_rx_dr *)cq_rx_base + head * rx_cqe_sz;
rxparse = &rx_dr->rx_parse;
debug("%s: rx parse: %p\n", __func__, rxparse);
debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
__func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
seg = (dma_addr_t *)(&rx_dr->rx_sg + 1);
st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(),
seg[0], (1ULL << 63) | NPA_POOL_RX);
debug("%s return %llx to NPA\n", __func__, seg[0]);
nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
(NIX_CQ_RX << 32) | 1);
op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
head = op_status.s.head;
tail = op_status.s.tail;
head &= (nix->cq[NIX_CQ_RX].qsize - 1);
tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
debug("%s cq rx head %d tail %d\n", __func__, head, tail);
}
}
int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
/* Return rx packet to NPA */
debug("%s return %p to NPA\n", __func__, pkt);
st128(nix->npa->npa_base + NPA_LF_AURA_OP_FREE0(), (u64)pkt,
(1ULL << 63) | NPA_POOL_RX);
nix_pf_reg_write(nix, NIXX_LF_CQ_OP_DOOR(),
(NIX_CQ_RX << 32) | 1);
nix_lf_flush_tx(dev);
return 0;
}
int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
union nixx_lf_cq_op_status op_status;
void *cq_rx_base = nix->cq[NIX_CQ_RX].base;
struct nix_rx_dr *rx_dr;
union nix_rx_parse_s *rxparse;
void *pkt, *cqe;
int pkt_len = 0;
u64 *addr;
u32 head, tail;
/* fetch rx cqe entries */
op_status.u = nix_cq_op_status(nix, NIX_CQ_RX);
head = op_status.s.head;
tail = op_status.s.tail;
head &= (nix->cq[NIX_CQ_RX].qsize - 1);
tail &= (nix->cq[NIX_CQ_RX].qsize - 1);
debug("%s cq rx head %d tail %d\n", __func__, head, tail);
if (head == tail)
return -EAGAIN;
debug("%s: rx_base %p head %d sz %d\n", __func__, cq_rx_base, head,
nix->cq[NIX_CQ_RX].entry_sz);
cqe = cq_rx_base + head * nix->cq[NIX_CQ_RX].entry_sz;
rx_dr = (struct nix_rx_dr *)cqe;
rxparse = &rx_dr->rx_parse;
debug("%s: rx completion: %p\n", __func__, cqe);
debug("%s: rx dr: %p\n", __func__, rx_dr);
debug("%s: rx parse: %p\n", __func__, rxparse);
debug("%s: rx parse: desc_sizem1 %x pkt_lenm1 %x\n",
__func__, rxparse->s.desc_sizem1, rxparse->s.pkt_lenm1);
debug("%s: rx parse: pkind %x chan %x\n",
__func__, rxparse->s.pkind, rxparse->s.chan);
if (rx_dr->hdr.s.cqe_type != NIX_XQE_TYPE_E_RX) {
printf("%s: Error: Unsupported CQ header type in Rx %d\n",
__func__, rx_dr->hdr.s.cqe_type);
return -1;
}
pkt_len = rxparse->s.pkt_lenm1 + 1;
addr = (dma_addr_t *)(&rx_dr->rx_sg + 1);
pkt = (void *)addr[0];
debug("%s: segs: %d (%d@0x%llx, %d@0x%llx, %d@0x%llx)\n", __func__,
rx_dr->rx_sg.s.segs, rx_dr->rx_sg.s.seg1_size, addr[0],
rx_dr->rx_sg.s.seg2_size, addr[1],
rx_dr->rx_sg.s.seg3_size, addr[2]);
if (pkt_len < rx_dr->rx_sg.s.seg1_size + rx_dr->rx_sg.s.seg2_size +
rx_dr->rx_sg.s.seg3_size) {
debug("%s: Error: rx buffer size too small\n", __func__);
return -1;
}
__iowmb();
#define DEBUG_PKT
#ifdef DEBUG_PKT
debug("RX PKT Data\n");
for (int i = 0; i < pkt_len; i++) {
if (i && (i % 8 == 0))
debug("\n");
debug("%02x ", *((u8 *)pkt + i));
}
debug("\n");
#endif
*packetp = (uchar *)pkt;
return pkt_len;
}
int nix_lf_setup_mac(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
struct eth_pdata *pdata = dev_get_platdata(dev);
/* If lower level firmware fails to set proper MAC
* u-boot framework updates MAC to random address.
* Use this hook to update mac address in cgx lmac
* and call mac filter setup to update new address.
*/
if (memcmp(nix->lmac->mac_addr, pdata->enetaddr, ARP_HLEN)) {
memcpy(nix->lmac->mac_addr, pdata->enetaddr, 6);
eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
pdata->enetaddr);
cgx_lmac_mac_filter_setup(nix->lmac);
/* Update user given MAC address to ATF for update
* in sh_fwdata to use in Linux.
*/
cgx_intf_set_macaddr(dev);
debug("%s: lMAC %pM\n", __func__, nix->lmac->mac_addr);
debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
}
debug("%s: setupMAC %pM\n", __func__, pdata->enetaddr);
return 0;
}
void nix_lf_halt(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
cgx_lmac_rx_tx_enable(nix->lmac, nix->lmac->lmac_id, false);
mdelay(1);
/* Flush tx and rx descriptors */
nix_lf_flush_rx(dev);
nix_lf_flush_tx(dev);
}
int nix_lf_init(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
struct lmac *lmac = nix->lmac;
int ret;
u64 link_sts;
u8 link, speed;
u16 errcode;
printf("Waiting for CGX%d LMAC%d [%s] link status...",
lmac->cgx->cgx_id, lmac->lmac_id,
lmac_type_to_str[lmac->lmac_type]);
if (lmac->init_pend) {
/* Bring up LMAC */
ret = cgx_lmac_link_enable(lmac, lmac->lmac_id,
true, &link_sts);
lmac->init_pend = 0;
} else {
ret = cgx_lmac_link_status(lmac, lmac->lmac_id, &link_sts);
}
if (ret) {
printf(" [Down]\n");
return -1;
}
link = link_sts & 0x1;
speed = (link_sts >> 2) & 0xf;
errcode = (link_sts >> 6) & 0x2ff;
debug("%s: link %x speed %x errcode %x\n",
__func__, link, speed, errcode);
/* Print link status */
printf(" [%s]\n", link ? lmac_speed_to_str[speed] : "Down");
if (!link)
return -1;
if (!lmac->init_pend)
cgx_lmac_rx_tx_enable(lmac, lmac->lmac_id, true);
return 0;
}
void nix_get_cgx_lmac_id(struct udevice *dev, int *cgxid, int *lmacid)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
struct lmac *lmac = nix->lmac;
*cgxid = lmac->cgx->cgx_id;
*lmacid = lmac->lmac_id;
}
void nix_print_mac_info(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
struct nix *nix = rvu->nix;
struct lmac *lmac = nix->lmac;
printf(" CGX%d LMAC%d [%s]", lmac->cgx->cgx_id, lmac->lmac_id,
lmac_type_to_str[lmac->lmac_type]);
}

353
drivers/net/octeontx2/nix.h Normal file
View File

@ -0,0 +1,353 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __NIX_H__
#define __NIX_H__
#include <asm/arch/csrs/csrs-npa.h>
#include <asm/arch/csrs/csrs-nix.h>
#include "rvu.h"
/** Maximum number of LMACs supported */
#define MAX_LMAC 12
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
#define NIX_RX_ACTIONOP_UCAST (0x1ull)
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
#define NIX_TX_ACTIONOP_UCAST_DEFAULT (0x1ull)
#define NIX_TX_ACTIONOP_UCAST_CHAN (0x2ull)
#define NIX_TX_ACTIONOP_MCAST (0x3ull)
#define NIX_TX_ACTIONOP_DROP_VIOL (0x5ull)
#define NIX_INTF_RX 0
#define NIX_INTF_TX 1
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
#define NIX_MAX_HW_MTU 9212
#define NIX_MIN_HW_MTU 40
#define MAX_MTU 1536
#define NPA_POOL_COUNT 3
#define NPA_AURA_COUNT(x) (1ULL << ((x) + 6))
#define NPA_POOL_RX 0ULL
#define NPA_POOL_TX 1ULL
#define NPA_POOL_SQB 2ULL
#define RQ_QLEN Q_COUNT(Q_SIZE_1K)
#define SQ_QLEN Q_COUNT(Q_SIZE_1K)
#define SQB_QLEN Q_COUNT(Q_SIZE_16)
#define NIX_CQ_RX 0ULL
#define NIX_CQ_TX 1ULL
#define NIX_CQ_COUNT 2ULL
#define NIX_CQE_SIZE_W16 (16 * sizeof(u64))
#define NIX_CQE_SIZE_W64 (64 * sizeof(u64))
/** Size of aura hardware context */
#define NPA_AURA_HW_CTX_SIZE 48
/** Size of pool hardware context */
#define NPA_POOL_HW_CTX_SIZE 64
#define NPA_DEFAULT_PF_FUNC 0xffff
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
#define MAX_LMAC_PKIND 12
/** Number of Admin queue entries */
#define AQ_RING_SIZE Q_COUNT(Q_SIZE_16)
/** Each completion queue contains 256 entries, see NIC_CQ_CTX_S[qsize] */
#define CQS_QSIZE Q_SIZE_256
#define CQ_ENTRIES Q_COUNT(CQS_QSIZE)
/**
* Each completion queue entry contains 128 bytes, see
* NIXX_AF_LFX_CFG[xqe_size]
*/
#define CQ_ENTRY_SIZE NIX_CQE_SIZE_W16
enum npa_aura_size {
NPA_AURA_SZ_0,
NPA_AURA_SZ_128,
NPA_AURA_SZ_256,
NPA_AURA_SZ_512,
NPA_AURA_SZ_1K,
NPA_AURA_SZ_2K,
NPA_AURA_SZ_4K,
NPA_AURA_SZ_8K,
NPA_AURA_SZ_16K,
NPA_AURA_SZ_32K,
NPA_AURA_SZ_64K,
NPA_AURA_SZ_128K,
NPA_AURA_SZ_256K,
NPA_AURA_SZ_512K,
NPA_AURA_SZ_1M,
NPA_AURA_SZ_MAX,
};
#define NPA_AURA_SIZE_DEFAULT NPA_AURA_SZ_128
/* NIX Transmit schedulers */
enum nix_scheduler {
NIX_TXSCH_LVL_SMQ = 0x0,
NIX_TXSCH_LVL_MDQ = 0x0,
NIX_TXSCH_LVL_TL4 = 0x1,
NIX_TXSCH_LVL_TL3 = 0x2,
NIX_TXSCH_LVL_TL2 = 0x3,
NIX_TXSCH_LVL_TL1 = 0x4,
NIX_TXSCH_LVL_CNT = 0x5,
};
struct cgx;
struct nix_stats {
u64 num_packets;
u64 num_bytes;
};
struct nix;
struct lmac;
struct npa_af {
void __iomem *npa_af_base;
struct admin_queue aq;
u32 aura;
};
struct npa {
struct npa_af *npa_af;
void __iomem *npa_base;
void __iomem *npc_base;
void __iomem *lmt_base;
/** Hardware aura context */
void *aura_ctx;
/** Hardware pool context */
void *pool_ctx[NPA_POOL_COUNT];
void *pool_stack[NPA_POOL_COUNT];
void **buffers[NPA_POOL_COUNT];
u32 pool_stack_pages[NPA_POOL_COUNT];
u32 pool_stack_pointers;
u32 q_len[NPA_POOL_COUNT];
u32 buf_size[NPA_POOL_COUNT];
u32 stack_pages[NPA_POOL_COUNT];
};
struct nix_af {
struct udevice *dev;
struct nix *lmacs[MAX_LMAC];
struct npa_af *npa_af;
void __iomem *nix_af_base;
void __iomem *npc_af_base;
struct admin_queue aq;
u8 num_lmacs;
s8 index;
u8 xqe_size;
u32 sqb_size;
u32 qints;
u32 cints;
u32 sq_ctx_sz;
u32 rq_ctx_sz;
u32 cq_ctx_sz;
u32 rsse_ctx_sz;
u32 cint_ctx_sz;
u32 qint_ctx_sz;
};
struct nix_tx_dr {
union nix_send_hdr_s hdr;
union nix_send_sg_s tx_sg;
dma_addr_t sg1_addr;
dma_addr_t sg2_addr;
dma_addr_t sg3_addr;
u64 in_use;
};
struct nix_rx_dr {
union nix_cqe_hdr_s hdr;
union nix_rx_parse_s rx_parse;
union nix_rx_sg_s rx_sg;
};
struct nix {
struct udevice *dev;
struct eth_device *netdev;
struct nix_af *nix_af;
struct npa *npa;
struct lmac *lmac;
union nix_cint_hw_s *cint_base;
union nix_cq_ctx_s *cq_ctx_base;
union nix_qint_hw_s *qint_base;
union nix_rq_ctx_s *rq_ctx_base;
union nix_rsse_s *rss_base;
union nix_sq_ctx_s *sq_ctx_base;
void *cqe_base;
struct qmem sq;
struct qmem cq[NIX_CQ_COUNT];
struct qmem rq;
struct qmem rss;
struct qmem cq_ints;
struct qmem qints;
char name[16];
void __iomem *nix_base; /** PF reg base */
void __iomem *npc_base;
void __iomem *lmt_base;
struct nix_stats tx_stats;
struct nix_stats rx_stats;
u32 aura;
int pknd;
int lf;
int pf;
u16 pf_func;
u32 rq_cnt; /** receive queues count */
u32 sq_cnt; /** send queues count */
u32 cq_cnt; /** completion queues count */
u16 rss_sz;
u16 sqb_size;
u8 rss_grps;
u8 xqe_sz;
};
struct nix_aq_cq_dis {
union nix_aq_res_s resp ALIGNED;
union nix_cq_ctx_s cq ALIGNED;
union nix_cq_ctx_s mcq ALIGNED;
};
struct nix_aq_rq_dis {
union nix_aq_res_s resp ALIGNED;
union nix_rq_ctx_s rq ALIGNED;
union nix_rq_ctx_s mrq ALIGNED;
};
struct nix_aq_sq_dis {
union nix_aq_res_s resp ALIGNED;
union nix_sq_ctx_s sq ALIGNED;
union nix_sq_ctx_s msq ALIGNED;
};
struct nix_aq_cq_request {
union nix_aq_res_s resp ALIGNED;
union nix_cq_ctx_s cq ALIGNED;
};
struct nix_aq_rq_request {
union nix_aq_res_s resp ALIGNED;
union nix_rq_ctx_s rq ALIGNED;
};
struct nix_aq_sq_request {
union nix_aq_res_s resp ALIGNED;
union nix_sq_ctx_s sq ALIGNED;
};
static inline u64 nix_af_reg_read(struct nix_af *nix_af, u64 offset)
{
u64 val = readq(nix_af->nix_af_base + offset);
debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
val);
return val;
}
static inline void nix_af_reg_write(struct nix_af *nix_af, u64 offset,
u64 val)
{
debug("%s reg %p val %llx\n", __func__, nix_af->nix_af_base + offset,
val);
writeq(val, nix_af->nix_af_base + offset);
}
static inline u64 nix_pf_reg_read(struct nix *nix, u64 offset)
{
u64 val = readq(nix->nix_base + offset);
debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
val);
return val;
}
static inline void nix_pf_reg_write(struct nix *nix, u64 offset,
u64 val)
{
debug("%s reg %p val %llx\n", __func__, nix->nix_base + offset,
val);
writeq(val, nix->nix_base + offset);
}
static inline u64 npa_af_reg_read(struct npa_af *npa_af, u64 offset)
{
u64 val = readq(npa_af->npa_af_base + offset);
debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
val);
return val;
}
static inline void npa_af_reg_write(struct npa_af *npa_af, u64 offset,
u64 val)
{
debug("%s reg %p val %llx\n", __func__, npa_af->npa_af_base + offset,
val);
writeq(val, npa_af->npa_af_base + offset);
}
static inline u64 npc_af_reg_read(struct nix_af *nix_af, u64 offset)
{
u64 val = readq(nix_af->npc_af_base + offset);
debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
val);
return val;
}
static inline void npc_af_reg_write(struct nix_af *nix_af, u64 offset,
u64 val)
{
debug("%s reg %p val %llx\n", __func__, nix_af->npc_af_base + offset,
val);
writeq(val, nix_af->npc_af_base + offset);
}
int npa_attach_aura(struct nix_af *nix_af, int lf,
const union npa_aura_s *desc, u32 aura_id);
int npa_attach_pool(struct nix_af *nix_af, int lf,
const union npa_pool_s *desc, u32 pool_id);
int npa_af_setup(struct npa_af *npa_af);
int npa_af_shutdown(struct npa_af *npa_af);
int npa_lf_setup(struct nix *nix);
int npa_lf_shutdown(struct nix *nix);
int npa_lf_admin_setup(struct npa *npa, int lf, dma_addr_t aura_base);
int npa_lf_admin_shutdown(struct nix_af *nix_af, int lf, u32 pool_count);
int npc_lf_admin_setup(struct nix *nix);
int npc_af_shutdown(struct nix_af *nix_af);
int nix_af_setup(struct nix_af *nix_af);
int nix_af_shutdown(struct nix_af *nix_af);
int nix_lf_setup(struct nix *nix);
int nix_lf_shutdown(struct nix *nix);
struct nix *nix_lf_alloc(struct udevice *dev);
int nix_lf_admin_setup(struct nix *nix);
int nix_lf_admin_shutdown(struct nix_af *nix_af, int lf,
u32 cq_count, u32 rq_count, u32 sq_count);
struct rvu_af *get_af(void);
int nix_lf_setup_mac(struct udevice *dev);
int nix_lf_read_rom_mac(struct udevice *dev);
void nix_lf_halt(struct udevice *dev);
int nix_lf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len);
int nix_lf_recv(struct udevice *dev, int flags, uchar **packetp);
int nix_lf_init(struct udevice *dev);
int nix_lf_xmit(struct udevice *dev, void *pkt, int pkt_len);
#endif /* __NIX_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __NPC_H__
#define __NPC_H__
#define RSVD_MCAM_ENTRIES_PER_PF 2 /** Ucast and Bcast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /** Ucast for VFs */
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
u16 dp0;
u16 dp0_mask;
u16 dp1;
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
};
struct npc_kpu_profile_action {
u8 errlev;
u8 errcode;
u8 dp0_offset;
u8 dp1_offset;
u8 dp2_offset;
u8 bypass_count;
u8 parse_done;
u8 next_state;
u8 ptr_advance;
u8 cap_ena;
u8 lid;
u8 ltype;
u8 flags;
u8 offset;
u8 mask;
u8 right;
u8 shift;
};
struct npc_kpu_profile {
int cam_entries;
int action_entries;
struct npc_kpu_profile_cam *cam;
struct npc_kpu_profile_action *action;
};
struct npc_pkind {
struct rsrc_bmap rsrc;
u32 *pfchan_map;
};
struct npc_mcam {
struct rsrc_bmap rsrc;
u16 *pfvf_map;
u16 total_entries; /* Total number of MCAM entries */
u16 entries; /* Total - reserved for NIX LFs */
u8 banks_per_entry; /* Number of keywords in key */
u8 keysize;
u8 banks; /* Number of MCAM banks */
u16 banksize; /* Number of MCAM entries in each bank */
u16 counters; /* Number of match counters */
u16 nixlf_offset;
u16 pf_offset;
};
struct nix_af_handle;
struct nix_handle;
struct rvu_hwinfo;
struct npc_af {
struct nix_af_handle *nix_af;
struct npc_pkind pkind;
void __iomem *npc_af_base;
u8 npc_kpus; /** Number of parser units */
struct npc_mcam mcam;
struct rvu_block block;
struct rvu_hwinfo *hw;
};
struct npc {
struct npc_af *npc_af;
void __iomem *npc_base;
struct nix_handle *nix;
}
#endif /* __NPC_H__ */

119
drivers/net/octeontx2/rvu.h Normal file
View File

@ -0,0 +1,119 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __RVU_H__
#define __RVU_H__
#include <asm/arch/csrs/csrs-rvu.h>
#define ALIGNED __aligned(CONFIG_SYS_CACHELINE_SIZE)
#define Q_SIZE_16 0ULL /* 16 entries */
#define Q_SIZE_64 1ULL /* 64 entries */
#define Q_SIZE_256 2ULL
#define Q_SIZE_1K 3ULL
#define Q_SIZE_4K 4ULL
#define Q_SIZE_16K 5ULL
#define Q_SIZE_64K 6ULL
#define Q_SIZE_256K 7ULL
#define Q_SIZE_1M 8ULL /* Million entries */
#define Q_SIZE_MIN Q_SIZE_16
#define Q_SIZE_MAX Q_SIZE_1M
#define Q_COUNT(x) (16ULL << (2 * (x)))
#define Q_SIZE(x, n) ((ilog2(x) - (n)) / 2)
/* Admin queue info */
/* Since we intend to add only one instruction at a time,
* keep queue size to it's minimum.
*/
#define AQ_SIZE Q_SIZE_16
/* HW head & tail pointer mask */
#define AQ_PTR_MASK 0xFFFFF
struct qmem {
void *base;
dma_addr_t iova;
size_t alloc_sz;
u32 qsize;
u8 entry_sz;
};
struct admin_queue {
struct qmem inst;
struct qmem res;
};
struct rvu_af {
struct udevice *dev;
void __iomem *af_base;
struct nix_af *nix_af;
};
struct rvu_pf {
struct udevice *dev;
struct udevice *afdev;
void __iomem *pf_base;
struct nix *nix;
u8 pfid;
int nix_lfid;
int npa_lfid;
};
/**
* Store 128 bit value
*
* @param[out] dest pointer to destination address
* @param val0 first 64 bits to write
* @param val1 second 64 bits to write
*/
static inline void st128(void *dest, u64 val0, u64 val1)
{
__asm__ __volatile__("stp %x[x0], %x[x1], [%[pm]]" :
: [x0]"r"(val0), [x1]"r"(val1), [pm]"r"(dest)
: "memory");
}
/**
* Load 128 bit value
*
* @param[in] source pointer to 128 bits of data to load
* @param[out] val0 first 64 bits of data
* @param[out] val1 second 64 bits of data
*/
static inline void ld128(const u64 *src, u64 *val0, u64 *val1)
{
__asm__ __volatile__ ("ldp %x[x0], %x[x1], [%[pm]]" :
: [x0]"r"(*val0), [x1]"r"(*val1), [pm]"r"(src));
}
void qmem_free(struct qmem *q);
int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz);
/**
* Allocates an admin queue for instructions and results
*
* @param aq admin queue to allocate for
* @param qsize Number of entries in the queue
* @param inst_size Size of each instruction
* @param res_size Size of each result
*
* @return -ENOMEM on error, 0 on success
*/
int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
size_t inst_size, size_t res_size);
/**
* Frees an admin queue
*
* @param aq Admin queue to free
*/
void rvu_aq_free(struct admin_queue *aq);
void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid);
#endif /* __RVU_H__ */

View File

@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <pci_ids.h>
#include <linux/list.h>
#include <asm/io.h>
#include <asm/arch/board.h>
#include <asm/arch/csrs/csrs-npa.h>
#include "nix.h"
struct udevice *rvu_af_dev;
inline struct rvu_af *get_af(void)
{
return rvu_af_dev ? dev_get_priv(rvu_af_dev) : NULL;
}
void rvu_get_lfid_for_pf(int pf, int *nixid, int *npaid)
{
union nixx_af_rvu_lf_cfg_debug nix_lf_dbg;
union npa_af_rvu_lf_cfg_debug npa_lf_dbg;
union rvu_pf_func_s pf_func;
struct rvu_af *af = dev_get_priv(rvu_af_dev);
struct nix_af *nix_af = af->nix_af;
pf_func.u = 0;
pf_func.s.pf = pf;
nix_lf_dbg.u = 0;
nix_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
nix_lf_dbg.s.exec = 1;
nix_af_reg_write(nix_af, NIXX_AF_RVU_LF_CFG_DEBUG(),
nix_lf_dbg.u);
do {
nix_lf_dbg.u = nix_af_reg_read(nix_af,
NIXX_AF_RVU_LF_CFG_DEBUG());
} while (nix_lf_dbg.s.exec);
if (nix_lf_dbg.s.lf_valid)
*nixid = nix_lf_dbg.s.lf;
debug("%s: nix lf_valid %d lf %d nixid %d\n", __func__,
nix_lf_dbg.s.lf_valid, nix_lf_dbg.s.lf, *nixid);
npa_lf_dbg.u = 0;
npa_lf_dbg.s.pf_func = pf_func.u & 0xFFFF;
npa_lf_dbg.s.exec = 1;
npa_af_reg_write(nix_af->npa_af, NPA_AF_RVU_LF_CFG_DEBUG(),
npa_lf_dbg.u);
do {
npa_lf_dbg.u = npa_af_reg_read(nix_af->npa_af,
NPA_AF_RVU_LF_CFG_DEBUG());
} while (npa_lf_dbg.s.exec);
if (npa_lf_dbg.s.lf_valid)
*npaid = npa_lf_dbg.s.lf;
debug("%s: npa lf_valid %d lf %d npaid %d\n", __func__,
npa_lf_dbg.s.lf_valid, npa_lf_dbg.s.lf, *npaid);
}
struct nix_af *rvu_af_init(struct rvu_af *rvu_af)
{
struct nix_af *nix_af;
union rvu_af_addr_s block_addr;
int err;
nix_af = (struct nix_af *)calloc(1, sizeof(struct nix_af));
if (!nix_af) {
printf("%s: out of memory\n", __func__);
goto error;
}
nix_af->dev = rvu_af->dev;
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NIXX(0);
nix_af->nix_af_base = rvu_af->af_base + block_addr.u;
nix_af->npa_af = (struct npa_af *)calloc(1, sizeof(struct npa_af));
if (!nix_af->npa_af) {
printf("%s: out of memory\n", __func__);
goto error;
}
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NPA;
nix_af->npa_af->npa_af_base = rvu_af->af_base + block_addr.u;
block_addr.u = 0;
block_addr.s.block = RVU_BLOCK_ADDR_E_NPC;
nix_af->npc_af_base = rvu_af->af_base + block_addr.u;
debug("%s: Setting up npa admin\n", __func__);
err = npa_af_setup(nix_af->npa_af);
if (err) {
printf("%s: Error %d setting up NPA admin\n", __func__, err);
goto error;
}
debug("%s: Setting up nix af\n", __func__);
err = nix_af_setup(nix_af);
if (err) {
printf("%s: Error %d setting up NIX admin\n", __func__, err);
goto error;
}
debug("%s: nix_af: %p\n", __func__, nix_af);
return nix_af;
error:
if (nix_af->npa_af) {
free(nix_af->npa_af);
memset(nix_af, 0, sizeof(*nix_af));
}
if (nix_af)
free(nix_af);
return NULL;
}
int rvu_af_probe(struct udevice *dev)
{
struct rvu_af *af_ptr = dev_get_priv(dev);
af_ptr->af_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
PCI_REGION_MEM);
debug("%s RVU AF BAR %p\n", __func__, af_ptr->af_base);
af_ptr->dev = dev;
rvu_af_dev = dev;
af_ptr->nix_af = rvu_af_init(af_ptr);
if (!af_ptr->nix_af) {
printf("%s: Error: could not initialize NIX AF\n", __func__);
return -1;
}
debug("%s: Done\n", __func__);
return 0;
}
int rvu_af_remove(struct udevice *dev)
{
struct rvu_af *rvu_af = dev_get_priv(dev);
nix_af_shutdown(rvu_af->nix_af);
npa_af_shutdown(rvu_af->nix_af->npa_af);
npc_af_shutdown(rvu_af->nix_af);
debug("%s: rvu af down --\n", __func__);
return 0;
}
U_BOOT_DRIVER(rvu_af) = {
.name = "rvu_af",
.id = UCLASS_MISC,
.probe = rvu_af_probe,
.remove = rvu_af_remove,
.priv_auto_alloc_size = sizeof(struct rvu_af),
};
static struct pci_device_id rvu_af_supported[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_AF) },
{}
};
U_BOOT_PCI_DEVICE(rvu_af, rvu_af_supported);

View File

@ -0,0 +1,71 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <asm/io.h>
#include "rvu.h"
int qmem_alloc(struct qmem *q, u32 qsize, size_t entry_sz)
{
q->base = memalign(CONFIG_SYS_CACHELINE_SIZE, qsize * entry_sz);
if (!q->base)
return -ENOMEM;
q->entry_sz = entry_sz;
q->qsize = qsize;
q->alloc_sz = (size_t)qsize * entry_sz;
q->iova = (dma_addr_t)(q->base);
debug("NIX: qmem alloc for (%d * %d = %ld bytes) at %p\n",
q->qsize, q->entry_sz, q->alloc_sz, q->base);
return 0;
}
void qmem_free(struct qmem *q)
{
if (q->base)
free(q->base);
memset(q, 0, sizeof(*q));
}
/**
* Allocates an admin queue for instructions and results
*
* @param aq admin queue to allocate for
* @param qsize Number of entries in the queue
* @param inst_size Size of each instruction
* @param res_size Size of each result
*
* @return -ENOMEM on error, 0 on success
*/
int rvu_aq_alloc(struct admin_queue *aq, unsigned int qsize,
size_t inst_size, size_t res_size)
{
int err;
err = qmem_alloc(&aq->inst, qsize, inst_size);
if (err)
return err;
err = qmem_alloc(&aq->res, qsize, res_size);
if (err)
qmem_free(&aq->inst);
return err;
}
/**
* Frees an admin queue
*
* @param aq Admin queue to free
*/
void rvu_aq_free(struct admin_queue *aq)
{
qmem_free(&aq->inst);
qmem_free(&aq->res);
memset(aq, 0, sizeof(*aq));
}

View File

@ -0,0 +1,116 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <errno.h>
#include <malloc.h>
#include <misc.h>
#include <net.h>
#include <pci_ids.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/arch/board.h>
#include "cgx.h"
#include "nix.h"
extern struct udevice *rvu_af_dev;
int rvu_pf_init(struct rvu_pf *rvu)
{
struct nix *nix;
struct eth_pdata *pdata = dev_get_platdata(rvu->dev);
debug("%s: Allocating nix lf\n", __func__);
nix = nix_lf_alloc(rvu->dev);
if (!nix) {
printf("%s: Error allocating lf for pf %d\n",
__func__, rvu->pfid);
return -1;
}
rvu->nix = nix;
/* to make post_probe happy */
if (is_valid_ethaddr(nix->lmac->mac_addr)) {
memcpy(pdata->enetaddr, nix->lmac->mac_addr, 6);
eth_env_set_enetaddr_by_index("eth", rvu->dev->seq,
pdata->enetaddr);
}
return 0;
}
static const struct eth_ops nix_eth_ops = {
.start = nix_lf_init,
.send = nix_lf_xmit,
.recv = nix_lf_recv,
.free_pkt = nix_lf_free_pkt,
.stop = nix_lf_halt,
.write_hwaddr = nix_lf_setup_mac,
};
int rvu_pf_probe(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
int err;
char name[16];
debug("%s: name: %s\n", __func__, dev->name);
rvu->pf_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_2, PCI_REGION_MEM);
rvu->pfid = dev->seq + 1; // RVU PF's start from 1;
rvu->dev = dev;
if (!rvu_af_dev) {
printf("%s: Error: Could not find RVU AF device\n",
__func__);
return -1;
}
rvu->afdev = rvu_af_dev;
debug("RVU PF %u BAR2 %p\n", rvu->pfid, rvu->pf_base);
rvu_get_lfid_for_pf(rvu->pfid, &rvu->nix_lfid, &rvu->npa_lfid);
err = rvu_pf_init(rvu);
if (err)
printf("%s: Error %d adding nix\n", __func__, err);
/*
* modify device name to include index/sequence number,
* for better readability, this is 1:1 mapping with eth0/1/2.. names.
*/
sprintf(name, "rvu_pf#%d", dev->seq);
device_set_name(dev, name);
debug("%s: name: %s\n", __func__, dev->name);
return err;
}
int rvu_pf_remove(struct udevice *dev)
{
struct rvu_pf *rvu = dev_get_priv(dev);
nix_lf_shutdown(rvu->nix);
npa_lf_shutdown(rvu->nix);
debug("%s: rvu pf%d down --\n", __func__, rvu->pfid);
return 0;
}
U_BOOT_DRIVER(rvu_pf) = {
.name = "rvu_pf",
.id = UCLASS_ETH,
.probe = rvu_pf_probe,
.remove = rvu_pf_remove,
.ops = &nix_eth_ops,
.priv_auto_alloc_size = sizeof(struct rvu_pf),
.platdata_auto_alloc_size = sizeof(struct eth_pdata),
};
static struct pci_device_id rvu_pf_supported[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RVU_PF) },
{}
};
U_BOOT_PCI_DEVICE(rvu_pf, rvu_pf_supported);

View File

@ -15,6 +15,7 @@
#include <asm/io.h>
#include <dm/device_compat.h>
#include <linux/bitops.h>
#include <asm/gpio.h>
DECLARE_GLOBAL_DATA_PTR;
@ -27,6 +28,7 @@ DECLARE_GLOBAL_DATA_PTR;
#define MVEBU_SPI_A3700_SPI_EN_0 BIT(16)
#define MVEBU_SPI_A3700_CLK_PRESCALE_MASK 0x1f
#define MAX_CS_COUNT 4
/* SPI registers */
struct spi_reg {
@ -39,16 +41,23 @@ struct spi_reg {
struct mvebu_spi_platdata {
struct spi_reg *spireg;
struct clk clk;
struct gpio_desc cs_gpios[MAX_CS_COUNT];
};
static void spi_cs_activate(struct spi_reg *reg, int cs)
static void spi_cs_activate(struct mvebu_spi_platdata *plat, int cs)
{
setbits_le32(&reg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
dm_gpio_set_value(&plat->cs_gpios[cs], 1);
else
setbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
}
static void spi_cs_deactivate(struct spi_reg *reg, int cs)
static void spi_cs_deactivate(struct mvebu_spi_platdata *plat, int cs)
{
clrbits_le32(&reg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
dm_gpio_set_value(&plat->cs_gpios[cs], 0);
else
clrbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
}
/**
@ -150,7 +159,7 @@ static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen,
/* Activate CS */
if (flags & SPI_XFER_BEGIN) {
debug("SPI: activate cs.\n");
spi_cs_activate(reg, spi_chip_select(dev));
spi_cs_activate(plat, spi_chip_select(dev));
}
/* Send and/or receive */
@ -169,7 +178,7 @@ static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen,
return ret;
debug("SPI: deactivate cs.\n");
spi_cs_deactivate(reg, spi_chip_select(dev));
spi_cs_deactivate(plat, spi_chip_select(dev));
}
return 0;
@ -247,6 +256,26 @@ static int mvebu_spi_probe(struct udevice *bus)
writel(data, &reg->cfg);
/* Set up CS GPIOs in device tree, if any */
if (CONFIG_IS_ENABLED(DM_GPIO) && gpio_get_list_count(bus, "cs-gpios") > 0) {
int i;
for (i = 0; i < ARRAY_SIZE(plat->cs_gpios); i++) {
ret = gpio_request_by_name(bus, "cs-gpios", i, &plat->cs_gpios[i], 0);
if (ret < 0 || !dm_gpio_is_valid(&plat->cs_gpios[i])) {
/* Use the native CS function for this line */
continue;
}
ret = dm_gpio_set_dir_flags(&plat->cs_gpios[i],
GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
if (ret) {
dev_err(bus, "Setting cs %d error\n", i);
return ret;
}
}
}
return 0;
}

View File

@ -17,8 +17,6 @@
#define CONFIG_SYS_BOOTM_LEN SZ_64M /* Increase max gunzip size */
/* auto boot */
#define CONFIG_SYS_BAUDRATE_TABLE { 9600, 19200, 38400, 57600, \
115200, 230400, 460800, 921600 }
@ -57,11 +55,8 @@
/*
* SPI Flash configuration
*/
#define CONFIG_MTD_PARTITIONS /* required for UBI partition support */
/* Environment in SPI NOR flash */
/*
* Ethernet Driver configuration
*/
@ -70,8 +65,6 @@
#define CONFIG_USB_MAX_CONTROLLER_COUNT (3 + 3)
/* USB ethernet */
/*
* SATA/SCSI/AHCI configuration
*/