summaryrefslogtreecommitdiff
path: root/Silicon/Marvell/Drivers/Net
diff options
context:
space:
mode:
authorMarcin Wojtas <mw@semihalf.com>2017-12-08 15:57:31 +0100
committerArd Biesheuvel <ard.biesheuvel@linaro.org>2017-12-08 15:21:34 +0000
commita9ac0c46818954873134960d6bb304c743869327 (patch)
tree8b15278ad8bd631ed4f65daee577b7b36384fcde /Silicon/Marvell/Drivers/Net
parent993deafa1fd81b260ae28fff3db851c6b0aa9d74 (diff)
downloadedk2-platforms-a9ac0c46818954873134960d6bb304c743869327.tar.xz
Marvell: Reorganize file structure
In edk2-platforms it is expected to provide a separation between SoC and boards files in 'Silicon' and 'Platform' directories accordingly. This patch aligns Marvell code to this requirement with no functional changes in the actual source files, unless required due to modified paths. Change the supported board's files names to proper Armada70x0Db. Also rename 'Armada' directory to 'Armada7k8k' in order to properly refer to the SoC family and prevent confusion in future, when adding new Armada machines. On the occasion add ARM copyright, which was wrongly missing in the dsc.inc file. Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Marcin Wojtas <mw@semihalf.com> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'Silicon/Marvell/Drivers/Net')
-rw-r--r--Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.c252
-rw-r--r--Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.h57
-rw-r--r--Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.inf66
-rw-r--r--Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.c460
-rw-r--r--Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.h100
-rw-r--r--Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.inf73
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c5023
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.h762
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2LibHw.h2015
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c1396
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h622
-rw-r--r--Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.inf84
12 files changed, 10910 insertions, 0 deletions
diff --git a/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.c b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.c
new file mode 100644
index 0000000000..12aabad0f3
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.c
@@ -0,0 +1,252 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include <Protocol/DriverBinding.h>
+#include <Protocol/Mdio.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/IoLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/PcdLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiLib.h>
+
+#include "MvMdioDxe.h"
+
+DECLARE_A7K8K_MDIO_TEMPLATE;
+
+STATIC
+EFI_STATUS
+MdioCheckParam (
+ INTN PhyAddr,
+ INTN RegOff
+ )
+{
+ if (PhyAddr > MVEBU_PHY_ADDR_MASK) {
+ DEBUG((DEBUG_ERROR, "Invalid PHY address %d\n", PhyAddr));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (RegOff > MVEBU_PHY_REG_MASK) {
+ DEBUG((DEBUG_ERROR, "Invalid register offset %d\n", RegOff));
+ return EFI_INVALID_PARAMETER;
+ }
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+MdioWaitReady (
+ UINT32 MdioBase
+ )
+{
+ UINT32 Timeout = MVEBU_SMI_TIMEOUT;
+ UINT32 MdioReg;
+
+ /* wait till the SMI is not busy */
+ do {
+ /* read smi register */
+ MdioReg = MmioRead32(MdioBase);
+ if (Timeout-- == 0) {
+ DEBUG((DEBUG_ERROR, "SMI busy Timeout\n"));
+ return EFI_TIMEOUT;
+ }
+ } while (MdioReg & MVEBU_SMI_BUSY);
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+MdioWaitValid (
+ UINT32 MdioBase
+ )
+{
+ UINT32 Timeout = MVEBU_SMI_TIMEOUT;
+ UINT32 MdioReg;
+
+ /* wait till read value is ready */
+ do {
+ /* read smi register */
+ MdioReg = MmioRead32 (MdioBase);
+ if (Timeout-- == 0) {
+ DEBUG((DEBUG_ERROR, "SMI read ready time-out\n"));
+ return EFI_TIMEOUT;
+ }
+ } while (!(MdioReg & MVEBU_SMI_READ_VALID));
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+MdioOperation (
+ IN CONST MARVELL_MDIO_PROTOCOL *This,
+ IN UINT32 PhyAddr,
+ IN UINT32 MdioIndex,
+ IN UINT32 RegOff,
+ IN BOOLEAN Write,
+ IN OUT UINT32 *Data
+ )
+{
+ UINT32 MdioBase = This->BaseAddresses[MdioIndex];
+ UINT32 MdioReg;
+ EFI_STATUS Status;
+
+ Status = MdioCheckParam (PhyAddr, RegOff);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "MdioDxe: wrong parameters\n"));
+ return Status;
+ }
+
+ /* wait till the SMI is not busy */
+ Status = MdioWaitReady (MdioBase);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "MdioDxe: MdioWaitReady error\n"));
+ return Status;
+ }
+
+ /* fill the phy addr and reg offset and write opcode and data */
+ MdioReg = (PhyAddr << MVEBU_SMI_DEV_ADDR_OFFS)
+ | (RegOff << MVEBU_SMI_REG_ADDR_OFFS);
+ if (Write) {
+ MdioReg &= ~MVEBU_SMI_OPCODE_READ;
+ MdioReg |= (*Data << MVEBU_SMI_DATA_OFFS);
+ } else {
+ MdioReg |= MVEBU_SMI_OPCODE_READ;
+ }
+
+ /* write the smi register */
+ MdioRegWrite32 (MdioReg, MdioBase);
+
+ /* make sure that the write transaction is over */
+ Status = Write ? MdioWaitReady (MdioBase) : MdioWaitValid (MdioBase);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "MdioDxe: MdioWaitReady error\n"));
+ return Status;
+ }
+
+ if (!Write) {
+ *Data = MmioRead32 (MdioBase) & MVEBU_SMI_DATA_MASK;
+ }
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+MvMdioRead (
+ IN CONST MARVELL_MDIO_PROTOCOL *This,
+ IN UINT32 PhyAddr,
+ IN UINT32 MdioIndex,
+ IN UINT32 RegOff,
+ IN UINT32 *Data
+ )
+{
+ EFI_STATUS Status;
+
+ Status = MdioOperation (
+ This,
+ PhyAddr,
+ MdioIndex,
+ RegOff,
+ FALSE,
+ Data
+ );
+
+ return Status;
+}
+
+EFI_STATUS
+MvMdioWrite (
+ IN CONST MARVELL_MDIO_PROTOCOL *This,
+ IN UINT32 PhyAddr,
+ IN UINT32 MdioIndex,
+ IN UINT32 RegOff,
+ IN UINT32 Data
+ )
+{
+ return MdioOperation (
+ This,
+ PhyAddr,
+ MdioIndex,
+ RegOff,
+ TRUE,
+ &Data
+ );
+}
+
+EFI_STATUS
+EFIAPI
+MvMdioDxeInitialise (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ MVHW_MDIO_DESC *Desc = &mA7k8kMdioDescTemplate;
+ UINT8 Index;
+ MARVELL_MDIO_PROTOCOL *Mdio;
+ EFI_STATUS Status;
+ EFI_HANDLE Handle = NULL;
+
+ Mdio = AllocateZeroPool (sizeof (MARVELL_MDIO_PROTOCOL));
+ if (Mdio == NULL) {
+ DEBUG ((DEBUG_ERROR, "MdioDxe: Protocol allocation failed\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ /* Obtain base addresses of all possible controllers */
+ for (Index = 0; Index < Desc->MdioDevCount; Index++) {
+ Mdio->BaseAddresses[Index] = Desc->MdioBaseAddresses[Index];
+ }
+
+ Mdio->ControllerCount = Desc->MdioDevCount;
+ Mdio->Read = MvMdioRead;
+ Mdio->Write = MvMdioWrite;
+
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gMarvellMdioProtocolGuid, Mdio,
+ NULL
+ );
+
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Failed to install interfaces\n"));
+ return Status;
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.h b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.h
new file mode 100644
index 0000000000..b41a1e6fba
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.h
@@ -0,0 +1,57 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MDIO_DXE_H__
+#define __MDIO_DXE_H__
+
+#include <Uefi.h>
+
+#define MVEBU_SMI_TIMEOUT 10000
+
+/* SMI register fields */
+#define MVEBU_SMI_DATA_OFFS 0 /* Data */
+#define MVEBU_SMI_DATA_MASK (0xffff << MVEBU_SMI_DATA_OFFS)
+#define MVEBU_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
+#define MVEBU_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
+#define MVEBU_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
+#define MVEBU_SMI_OPCODE_READ (1 << MVEBU_SMI_OPCODE_OFFS)
+#define MVEBU_SMI_READ_VALID (1 << 27) /* Read Valid */
+#define MVEBU_SMI_BUSY (1 << 28) /* Busy */
+
+#define MVEBU_PHY_REG_MASK 0x1f
+#define MVEBU_PHY_ADDR_MASK 0x1f
+
+#define MdioRegWrite32(x, y) MmioWrite32((y), (x))
+
+#endif // __MDIO_DXE_H__
diff --git a/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.inf b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.inf
new file mode 100644
index 0000000000..c070785e6e
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/MvMdioDxe/MvMdioDxe.inf
@@ -0,0 +1,66 @@
+# Copyright (C) 2016 Marvell International Ltd.
+#
+# Marvell BSD License Option
+#
+# If you received this File from Marvell, you may opt to use, redistribute and/or
+# modify this File under the following licensing terms.
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Marvell nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = MdioDxe
+ FILE_GUID = 59fc3843-d8d4-40ba-ae07-38967138509c
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ ENTRY_POINT = MvMdioDxeInitialise
+
+[Sources.common]
+ MvMdioDxe.c
+ MvMdioDxe.h
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ ArmPlatformPkg/ArmPlatformPkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ MdePkg/MdePkg.dec
+ Silicon/Marvell/Marvell.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ DebugLib
+ IoLib
+ PcdLib
+ UefiBootServicesTableLib
+ UefiDriverEntryPoint
+ UefiLib
+
+[Protocols]
+ gMarvellMdioProtocolGuid
+
+[Depex]
+ TRUE
diff --git a/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.c b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.c
new file mode 100644
index 0000000000..dd2edaec36
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.c
@@ -0,0 +1,460 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include <Protocol/DriverBinding.h>
+#include <Protocol/Mdio.h>
+#include <Protocol/MvPhy.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/IoLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/MvHwDescLib.h>
+#include <Library/PcdLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiLib.h>
+
+#include "MvPhyDxe.h"
+
+#define TIMEOUT 500
+
+STATIC MARVELL_MDIO_PROTOCOL *Mdio;
+
+//
+// Table with available Mdio controllers
+//
+STATIC UINT8 * CONST MdioDeviceTable = PcdGetPtr (PcdMdioControllersEnabled);
+//
+// Table with PHY to Mdio controller mappings
+//
+STATIC UINT8 * CONST Phy2MdioController = PcdGetPtr (PcdPhy2MdioController);
+//
+// Table with PHYs' SMI addresses
+//
+STATIC UINT8 * CONST PhySmiAddresses = PcdGetPtr (PcdPhySmiAddresses);
+
+STATIC MV_PHY_DEVICE MvPhyDevices[] = {
+ { MV_PHY_DEVICE_1512, MvPhyInit1512 },
+ { 0, NULL }
+};
+
+EFI_STATUS
+MvPhyStatus (
+ IN CONST MARVELL_PHY_PROTOCOL *This,
+ IN PHY_DEVICE *PhyDev
+ );
+
+EFI_STATUS
+MvPhyReset (
+ IN PHY_DEVICE *PhyDev
+ )
+{
+ UINT32 Reg = 0;
+ INTN timeout = TIMEOUT;
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMCR, &Reg);
+ Reg |= BMCR_RESET;
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMCR, Reg);
+
+ while ((Reg & BMCR_RESET) && timeout--) {
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMCR, &Reg);
+ gBS->Stall(1000);
+ }
+
+ if (Reg & BMCR_RESET) {
+ DEBUG((DEBUG_ERROR, "PHY reset timed out\n"));
+ return EFI_TIMEOUT;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/* Marvell 88E1111S */
+EFI_STATUS
+MvPhyM88e1111sConfig (
+ IN PHY_DEVICE *PhyDev
+ )
+{
+ UINT32 Reg;
+
+ if ((PhyDev->Connection == PHY_CONNECTION_RGMII) ||
+ (PhyDev->Connection == PHY_CONNECTION_RGMII_ID) ||
+ (PhyDev->Connection == PHY_CONNECTION_RGMII_RXID) ||
+ (PhyDev->Connection == PHY_CONNECTION_RGMII_TXID)) {
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_CR, &Reg);
+
+ if ((PhyDev->Connection == PHY_CONNECTION_RGMII) ||
+ (PhyDev->Connection == PHY_CONNECTION_RGMII_ID)) {
+ Reg |= (MIIM_88E1111_RX_DELAY | MIIM_88E1111_TX_DELAY);
+ } else if (PhyDev->Connection == PHY_CONNECTION_RGMII_RXID) {
+ Reg &= ~MIIM_88E1111_TX_DELAY;
+ Reg |= MIIM_88E1111_RX_DELAY;
+ } else if (PhyDev->Connection == PHY_CONNECTION_RGMII_TXID) {
+ Reg &= ~MIIM_88E1111_RX_DELAY;
+ Reg |= MIIM_88E1111_TX_DELAY;
+ }
+
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_CR, Reg);
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, &Reg);
+
+ Reg &= ~(MIIM_88E1111_HWCFG_MODE_MASK);
+
+ if (Reg & MIIM_88E1111_HWCFG_FIBER_COPPER_RES)
+ Reg |= MIIM_88E1111_HWCFG_MODE_FIBER_RGMII;
+ else
+ Reg |= MIIM_88E1111_HWCFG_MODE_COPPER_RGMII;
+
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, Reg);
+ }
+
+ if (PhyDev->Connection == PHY_CONNECTION_SGMII) {
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, &Reg);
+
+ Reg &= ~(MIIM_88E1111_HWCFG_MODE_MASK);
+ Reg |= MIIM_88E1111_HWCFG_MODE_SGMII_NO_CLK;
+ Reg |= MIIM_88E1111_HWCFG_FIBER_COPPER_AUTO;
+
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, Reg);
+ }
+
+ if (PhyDev->Connection == PHY_CONNECTION_RTBI) {
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_CR, &Reg);
+ Reg |= (MIIM_88E1111_RX_DELAY | MIIM_88E1111_TX_DELAY);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_CR, Reg);
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, &Reg);
+ Reg &= ~(MIIM_88E1111_HWCFG_MODE_MASK |
+ MIIM_88E1111_HWCFG_FIBER_COPPER_RES);
+ Reg |= 0x7 | MIIM_88E1111_HWCFG_FIBER_COPPER_AUTO;
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, Reg);
+
+ /* Soft reset */
+ MvPhyReset (PhyDev);
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, &Reg);
+ Reg &= ~(MIIM_88E1111_HWCFG_MODE_MASK |
+ MIIM_88E1111_HWCFG_FIBER_COPPER_RES);
+ Reg |= MIIM_88E1111_HWCFG_MODE_COPPER_RTBI |
+ MIIM_88E1111_HWCFG_FIBER_COPPER_AUTO;
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1111_PHY_EXT_SR, Reg);
+ }
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMCR, &Reg);
+ Reg |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ Reg &= ~BMCR_ISOLATE;
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMCR, Reg);
+
+ /* Soft reset */
+ MvPhyReset (PhyDev);
+
+ MvPhyReset (PhyDev);
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+MvPhyParseStatus (
+ IN PHY_DEVICE *PhyDev
+ )
+{
+ UINT32 Data;
+ UINT32 Speed;
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1xxx_PHY_STATUS, &Data);
+
+ if ((Data & MIIM_88E1xxx_PHYSTAT_LINK) &&
+ !(Data & MIIM_88E1xxx_PHYSTAT_SPDDONE)) {
+ INTN i = 0;
+
+ DEBUG((DEBUG_ERROR,"MvPhyDxe: Waiting for PHY realtime link"));
+ while (!(Data & MIIM_88E1xxx_PHYSTAT_SPDDONE)) {
+ if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
+ DEBUG((DEBUG_ERROR," TIMEOUT !\n"));
+ PhyDev->LinkUp = FALSE;
+ break;
+ }
+
+ if ((i++ % 1000) == 0)
+ DEBUG((DEBUG_ERROR, "."));
+ gBS->Stall(1000);
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MIIM_88E1xxx_PHY_STATUS, &Data);
+ }
+ DEBUG((DEBUG_ERROR," done\n"));
+ gBS->Stall(500000);
+ } else {
+ if (Data & MIIM_88E1xxx_PHYSTAT_LINK) {
+ DEBUG((DEBUG_ERROR, "MvPhyDxe: link up, "));
+ PhyDev->LinkUp = TRUE;
+ } else {
+ DEBUG((DEBUG_ERROR, "MvPhyDxe: link down, "));
+ PhyDev->LinkUp = FALSE;
+ }
+ }
+
+ if (Data & MIIM_88E1xxx_PHYSTAT_DUPLEX) {
+ DEBUG((DEBUG_ERROR, "full duplex, "));
+ PhyDev->FullDuplex = TRUE;
+ } else {
+ DEBUG((DEBUG_ERROR, "half duplex, "));
+ PhyDev->FullDuplex = FALSE;
+ }
+
+ Speed = Data & MIIM_88E1xxx_PHYSTAT_SPEED;
+
+ switch (Speed) {
+ case MIIM_88E1xxx_PHYSTAT_GBIT:
+ DEBUG((DEBUG_ERROR, "speed 1000\n"));
+ PhyDev->Speed = SPEED_1000;
+ break;
+ case MIIM_88E1xxx_PHYSTAT_100:
+ DEBUG((DEBUG_ERROR, "speed 100\n"));
+ PhyDev->Speed = SPEED_100;
+ break;
+ default:
+ DEBUG((DEBUG_ERROR, "speed 10\n"));
+ PhyDev->Speed = SPEED_10;
+ break;
+ }
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+VOID
+MvPhy1512WriteBits (
+ IN PHY_DEVICE *PhyDev,
+ IN UINT8 RegNum,
+ IN UINT16 Offset,
+ IN UINT16 Len,
+ IN UINT16 Data)
+{
+ UINT32 Reg, Mask;
+
+ if ((Len + Offset) >= 16)
+ Mask = 0 - (1 << Offset);
+ else
+ Mask = (1 << (Len + Offset)) - (1 << Offset);
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, RegNum, &Reg);
+
+ Reg &= ~Mask;
+ Reg |= Data << Offset;
+
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, RegNum, Reg);
+}
+
+STATIC
+EFI_STATUS
+MvPhyInit1512 (
+ IN CONST MARVELL_PHY_PROTOCOL *Snp,
+ IN OUT PHY_DEVICE *PhyDev
+ )
+{
+ UINT32 Data;
+ INTN i;
+
+ if (PhyDev->Connection == PHY_CONNECTION_SGMII) {
+ /* Select page 0xff and update configuration registers according to
+ * Marvell Release Notes - Alaska 88E1510/88E1518/88E1512 Rev A0,
+ * Errata Section 3.1 - needed in SGMII mode.
+ */
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 22, 0x00ff);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 17, 0x214B);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 16, 0x2144);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 17, 0x0C28);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 16, 0x2146);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 17, 0xB233);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 16, 0x214D);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 17, 0xCC0C);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 16, 0x2159);
+
+ /* Reset page selection and select page 0x12 */
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 22, 0x0000);
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 22, 0x0012);
+
+ /* Write HWCFG_MODE = SGMII to Copper */
+ MvPhy1512WriteBits(PhyDev, 20, 0, 3, 1);
+
+ /* Phy reset - necessary after changing mode */
+ MvPhy1512WriteBits(PhyDev, 20, 15, 1, 1);
+
+ /* Reset page selection */
+ Mdio->Write (Mdio, PhyDev->Addr, PhyDev->MdioIndex, 22, 0x0000);
+ gBS->Stall(100);
+ }
+
+ MvPhyM88e1111sConfig (PhyDev);
+
+ /* autonegotiation on startup is not always required */
+ if (!PcdGetBool (PcdPhyStartupAutoneg))
+ return EFI_SUCCESS;
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMSR, &Data);
+
+ if ((Data & BMSR_ANEGCAPABLE) && !(Data & BMSR_ANEGCOMPLETE)) {
+
+ DEBUG((DEBUG_ERROR, "MvPhyDxe: Waiting for PHY auto negotiation... "));
+ for (i = 0; !(Data & BMSR_ANEGCOMPLETE); i++) {
+ if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
+ DEBUG((DEBUG_ERROR, "timeout\n"));
+ PhyDev->LinkUp = FALSE;
+ return EFI_TIMEOUT;
+ }
+
+ gBS->Stall(1000); /* 1 ms */
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMSR, &Data);
+ }
+ PhyDev->LinkUp = TRUE;
+ DEBUG((DEBUG_INFO, "MvPhyDxe: link up\n"));
+ } else {
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMSR, &Data);
+
+ if (Data & BMSR_LSTATUS) {
+ PhyDev->LinkUp = TRUE;
+ DEBUG((DEBUG_INFO, "MvPhyDxe: link up\n"));
+ } else {
+ PhyDev->LinkUp = FALSE;
+ DEBUG((DEBUG_INFO, "MvPhyDxe: link down\n"));
+ }
+ }
+ MvPhyParseStatus (PhyDev);
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+MvPhyInit (
+ IN CONST MARVELL_PHY_PROTOCOL *Snp,
+ IN UINT32 PhyIndex,
+ IN PHY_CONNECTION PhyConnection,
+ IN OUT PHY_DEVICE **OutPhyDev
+ )
+{
+ EFI_STATUS Status;
+ PHY_DEVICE *PhyDev;
+ UINT8 *DeviceIds;
+ UINT8 MdioIndex;
+ INTN i;
+
+ Status = gBS->LocateProtocol (
+ &gMarvellMdioProtocolGuid,
+ NULL,
+ (VOID **) &Mdio
+ );
+ if (EFI_ERROR(Status))
+ return Status;
+
+ MdioIndex = Phy2MdioController[PhyIndex];
+
+ /* Verify correctness of PHY <-> MDIO assignment */
+ if (!MVHW_DEV_ENABLED (Mdio, MdioIndex) || MdioIndex >= Mdio->ControllerCount) {
+ DEBUG ((DEBUG_ERROR, "MvPhyDxe: Incorrect Mdio controller assignment for PHY#%d", PhyIndex));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ /* perform setup common for all PHYs */
+ PhyDev = AllocateZeroPool (sizeof (PHY_DEVICE));
+ PhyDev->Addr = PhySmiAddresses[PhyIndex];
+ PhyDev->Connection = PhyConnection;
+ DEBUG((DEBUG_INFO, "MvPhyDxe: PhyAddr is %d, connection %d\n",
+ PhyDev->Addr, PhyConnection));
+ *OutPhyDev = PhyDev;
+
+ DeviceIds = PcdGetPtr (PcdPhyDeviceIds);
+ for (i = 0; i < PcdGetSize (PcdPhyDeviceIds); i++) {
+ /* find MvPhyDevices fitting entry */
+ if (MvPhyDevices[i].DevId == DeviceIds[i]) {
+ ASSERT (MvPhyDevices[i].DevInit != NULL);
+ /* proceed with PHY-specific initialization */
+ return MvPhyDevices[i].DevInit (Snp, PhyDev);
+ }
+ }
+
+ /* if we are here, no matching DevId was found */
+ Status = EFI_INVALID_PARAMETER;
+ FreePool (PhyDev);
+ return Status;
+}
+
+EFI_STATUS
+MvPhyStatus (
+ IN CONST MARVELL_PHY_PROTOCOL *This,
+ IN PHY_DEVICE *PhyDev
+ )
+{
+ UINT32 Data;
+
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMSR, &Data);
+ Mdio->Read (Mdio, PhyDev->Addr, PhyDev->MdioIndex, MII_BMSR, &Data);
+
+ if ((Data & BMSR_LSTATUS) == 0) {
+ PhyDev->LinkUp = FALSE;
+ } else {
+ PhyDev->LinkUp = TRUE;
+ }
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+EFIAPI
+MvPhyDxeInitialise (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ MARVELL_PHY_PROTOCOL *Phy;
+ EFI_STATUS Status;
+ EFI_HANDLE Handle = NULL;
+
+ Phy = AllocateZeroPool (sizeof (MARVELL_PHY_PROTOCOL));
+ Phy->Status = MvPhyStatus;
+ Phy->Init = MvPhyInit;
+
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gMarvellPhyProtocolGuid, Phy,
+ NULL
+ );
+
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Failed to install interfaces\n"));
+ return Status;
+ }
+ DEBUG((DEBUG_ERROR, "Succesfully installed protocol interfaces\n"));
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.h b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.h
new file mode 100644
index 0000000000..66974bba4b
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.h
@@ -0,0 +1,100 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __MV_PHY_DXE_H__
+#define __MV_PHY_DXE_H__
+
+#define MII_BMCR 0x00 /* Basic mode control Register */
+#define MII_BMSR 0x01 /* Basic mode status Register */
+
+/* BMCR */
+#define BMCR_ANRESTART 0x0200 /* 1 = Restart autonegotiation */
+#define BMCR_ISOLATE 0x0400 /* 0 = Isolate PHY */
+#define BMCR_ANENABLE 0x1000 /* 1 = Enable autonegotiation */
+#define BMCR_RESET 0x8000 /* 1 = Reset the PHY */
+
+/* BSMR */
+#define BMSR_LSTATUS 0x0004 /* 1 = Link up */
+#define BMSR_ANEGCAPABLE 0x0008 /* 1 = Able to perform auto-neg */
+#define BMSR_ANEGCOMPLETE 0x0020 /* 1 = Auto-neg complete */
+
+#define PHY_AUTONEGOTIATE_TIMEOUT 5000
+
+/* 88E1011 PHY Status Register */
+#define MIIM_88E1xxx_PHY_STATUS 0x11
+#define MIIM_88E1xxx_PHYSTAT_SPEED 0xc000
+#define MIIM_88E1xxx_PHYSTAT_GBIT 0x8000
+#define MIIM_88E1xxx_PHYSTAT_100 0x4000
+#define MIIM_88E1xxx_PHYSTAT_DUPLEX 0x2000
+#define MIIM_88E1xxx_PHYSTAT_SPDDONE 0x0800
+#define MIIM_88E1xxx_PHYSTAT_LINK 0x0400
+
+/* 88E1111 Extended PHY Specific Control Register */
+#define MIIM_88E1111_PHY_EXT_CR 0x14
+#define MIIM_88E1111_RX_DELAY 0x80
+#define MIIM_88E1111_TX_DELAY 0x02
+
+/* 88E1111 Extended PHY Specific Status Register */
+#define MIIM_88E1111_PHY_EXT_SR 0x1b
+#define MIIM_88E1111_HWCFG_MODE_MASK 0xf
+#define MIIM_88E1111_HWCFG_MODE_COPPER_RGMII 0xb
+#define MIIM_88E1111_HWCFG_MODE_FIBER_RGMII 0x3
+#define MIIM_88E1111_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MIIM_88E1111_HWCFG_MODE_COPPER_RTBI 0x9
+#define MIIM_88E1111_HWCFG_FIBER_COPPER_AUTO 0x8000
+#define MIIM_88E1111_HWCFG_FIBER_COPPER_RES 0x2000
+
+typedef enum {
+ MV_PHY_DEVICE_1512
+} MV_PHY_DEVICE_ID;
+
+typedef
+EFI_STATUS
+(*MV_PHY_DEVICE_INIT) (
+ IN CONST MARVELL_PHY_PROTOCOL *Snp,
+ IN OUT PHY_DEVICE *PhyDev
+ );
+
+typedef struct {
+ MV_PHY_DEVICE_ID DevId;
+ MV_PHY_DEVICE_INIT DevInit;
+} MV_PHY_DEVICE;
+
+STATIC
+EFI_STATUS
+MvPhyInit1512 (
+ IN CONST MARVELL_PHY_PROTOCOL *Snp,
+ IN OUT PHY_DEVICE *PhyDev
+ );
+
+#endif
diff --git a/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.inf b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.inf
new file mode 100644
index 0000000000..fe0f55478b
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Phy/MvPhyDxe/MvPhyDxe.inf
@@ -0,0 +1,73 @@
+# Copyright (C) 2016 Marvell International Ltd.
+#
+# Marvell BSD License Option
+#
+# If you received this File from Marvell, you may opt to use, redistribute and/or
+# modify this File under the following licensing terms.
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Marvell nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = MvPhyDxe
+ FILE_GUID = 5aac3843-d8d4-40ba-ae07-38967138509c
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ ENTRY_POINT = MvPhyDxeInitialise
+
+[Sources.common]
+ MvPhyDxe.c
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ ArmPlatformPkg/ArmPlatformPkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ MdePkg/MdePkg.dec
+ Silicon/Marvell/Marvell.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ DebugLib
+ IoLib
+ PcdLib
+ UefiBootServicesTableLib
+ UefiDriverEntryPoint
+ UefiLib
+
+[Protocols]
+ gMarvellMdioProtocolGuid
+ gMarvellPhyProtocolGuid
+
+[Pcd]
+ gMarvellTokenSpaceGuid.PcdMdioControllersEnabled
+ gMarvellTokenSpaceGuid.PcdPhy2MdioController
+ gMarvellTokenSpaceGuid.PcdPhyDeviceIds
+ gMarvellTokenSpaceGuid.PcdPhySmiAddresses
+ gMarvellTokenSpaceGuid.PcdPhyStartupAutoneg
+
+[Depex]
+ TRUE
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c
new file mode 100644
index 0000000000..0c9f00c04a
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.c
@@ -0,0 +1,5023 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "Mvpp2Lib.h"
+#include "Mvpp2LibHw.h"
+#include "Pp2Dxe.h"
+
+/* Parser configuration routines */
+
+/* Update parser Tcam and Sram hw entries */
+STATIC
+INT32
+Mvpp2PrsHwWrite (
+ IN MVPP2_SHARED *Priv,
+ IN OUT MVPP2_PRS_ENTRY *Pe
+ )
+{
+ INT32 i;
+
+ if (Pe->Index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) {
+ return MVPP2_EINVAL;
+ }
+
+ /* Clear entry invalidation bit */
+ Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+ /* Write Tcam Index - indirect access */
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Pe->Index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) {
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(i), Pe->Tcam.Word[i]);
+ }
+
+ /* Write Sram Index - indirect access */
+ Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Pe->Index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) {
+ Mvpp2Write (Priv, MVPP2_PRS_SRAM_DATA_REG(i), Pe->Sram.Word[i]);
+ }
+
+ return 0;
+}
+
+/* Read Tcam entry from hw */
+STATIC
+INT32
+Mvpp2PrsHwRead (
+ IN MVPP2_SHARED *Priv,
+ IN OUT MVPP2_PRS_ENTRY *Pe
+ )
+{
+ INT32 i;
+
+ if (Pe->Index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) {
+ return MVPP2_EINVAL;
+ }
+
+ /* Write Tcam Index - indirect access */
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Pe->Index);
+
+ Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] =
+ Mvpp2Read (Priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+ if (Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) {
+ return MVPP2_PRS_TCAM_ENTRY_INVALID;
+ }
+
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) {
+ Pe->Tcam.Word[i] = Mvpp2Read (Priv, MVPP2_PRS_TCAM_DATA_REG(i));
+ }
+
+ /* Write Sram Index - indirect access */
+ Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Pe->Index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) {
+ Pe->Sram.Word[i] = Mvpp2Read (Priv, MVPP2_PRS_SRAM_DATA_REG(i));
+ }
+
+ return 0;
+}
+
+/* Invalidate Tcam hw entry */
+STATIC
+VOID
+Mvpp2PrsHwInv (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Index
+ )
+{
+ /* Write Index - indirect access */
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Index);
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+ MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+STATIC
+VOID
+Mvpp2PrsShadowSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Index,
+ IN INT32 Lu
+ )
+{
+ Priv->PrsShadow[Index].Valid = TRUE;
+ Priv->PrsShadow[Index].Lu = Lu;
+}
+
+/* Update Ri fields in shadow table entry */
+STATIC
+VOID
+Mvpp2PrsShadowRiSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Index,
+ IN UINT32 Ri,
+ IN UINT32 RiMask
+ )
+{
+ Priv->PrsShadow[Index].RiMask = RiMask;
+ Priv->PrsShadow[Index].Ri = Ri;
+}
+
+/* Update lookup field in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamLuSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Lu
+ )
+{
+ INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_LU_BYTE);
+
+ Pe->Tcam.Byte[MVPP2_PRS_TCAM_LU_BYTE] = Lu;
+ Pe->Tcam.Byte[EnableOff] = MVPP2_PRS_LU_MASK;
+}
+
+/* Update Mask for single Port in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamPortSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 PortId,
+ IN BOOLEAN Add
+ )
+{
+ INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE);
+
+ if (Add) {
+ Pe->Tcam.Byte[EnableOff] &= ~(1 << PortId);
+ } else {
+ Pe->Tcam.Byte[EnableOff] |= 1 << PortId;
+ }
+}
+
+/* Update Port map in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamPortMapSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 PortMask
+ )
+{
+ INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE);
+ UINT8 Mask = MVPP2_PRS_PORT_MASK;
+
+ Pe->Tcam.Byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+ Pe->Tcam.Byte[EnableOff] &= ~Mask;
+ Pe->Tcam.Byte[EnableOff] |= ~PortMask & MVPP2_PRS_PORT_MASK;
+}
+
+/* Obtain Port map from Tcam sw entry */
+STATIC
+UINT32
+Mvpp2PrsTcamPortMapGet (
+ IN MVPP2_PRS_ENTRY *Pe
+ )
+{
+ INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE);
+
+ return ~(Pe->Tcam.Byte[EnableOff]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set Byte of data and its enable bits in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamDataByteSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Offs,
+ IN UINT8 Byte,
+ IN UINT8 Enable
+ )
+{
+ Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(Offs)] = Byte;
+ Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(Offs)] = Enable;
+}
+
+/* Get Byte of data and its enable bits from Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamDataByteGet (
+ IN MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Offs,
+ OUT UINT8 *Byte,
+ OUT UINT8 *Enable
+ )
+{
+ *Byte = Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(Offs)];
+ *Enable = Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(Offs)];
+}
+
+/* Compare Tcam data bytes with a pattern */
+STATIC
+BOOLEAN
+Mvpp2PrsTcamDataCmp (
+ IN MVPP2_PRS_ENTRY *Pe,
+ IN INT32 Offset,
+ IN UINT16 Data
+ )
+{
+ INT32 ByteOffset = MVPP2_PRS_TCAM_DATA_BYTE(Offset);
+ UINT16 TcamData;
+
+ TcamData = (Pe->Tcam.Byte[ByteOffset + 1] << 8) | Pe->Tcam.Byte[ByteOffset];
+ if (TcamData != Data) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Update ai bits in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamAiUpdate (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Bits,
+ IN UINT32 Enable
+ )
+{
+ INT32 i, AiIdx = MVPP2_PRS_TCAM_AI_BYTE;
+
+ for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+
+ if (!(Enable & BIT (i))) {
+ continue;
+ }
+
+ if (Bits & BIT (i)) {
+ Pe->Tcam.Byte[AiIdx] |= 1 << i;
+ } else {
+ Pe->Tcam.Byte[AiIdx] &= ~(1 << i);
+ }
+ }
+
+ Pe->Tcam.Byte[MVPP2_PRS_TCAM_EN_OFFS (AiIdx)] |= Enable;
+}
+
+/* Get ai bits from Tcam sw entry */
+STATIC
+INT32
+Mvpp2PrsTcamAiGet (
+ IN MVPP2_PRS_ENTRY *Pe
+ )
+{
+ return Pe->Tcam.Byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+/* Get word of data and its enable bits from Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsTcamDataWordGet (
+ IN MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 DataOffset,
+ OUT UINT32 *Word,
+ OUT UINT32 *Enable
+ )
+{
+ INT32 Index, Position;
+ UINT8 Byte, Mask;
+
+ for (Index = 0; Index < 4; Index++) {
+ Position = (DataOffset * sizeof (INT32)) + Index;
+ Mvpp2PrsTcamDataByteGet (Pe, Position, &Byte, &Mask);
+ ((UINT8 *)Word)[Index] = Byte;
+ ((UINT8 *)Enable)[Index] = Mask;
+ }
+}
+
+/* Set ethertype in Tcam sw entry */
+STATIC
+VOID
+Mvpp2PrsMatchEtype (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN INT32 Offset,
+ IN UINT16 EtherType
+ )
+{
+ Mvpp2PrsTcamDataByteSet (Pe, Offset + 0, EtherType >> 8, 0xff);
+ Mvpp2PrsTcamDataByteSet (Pe, Offset + 1, EtherType & 0xff, 0xff);
+}
+
+/* Set bits in Sram sw entry */
+STATIC
+VOID
+Mvpp2PrsSramBitsSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN INT32 BitNum,
+ IN INT32 Val
+ )
+{
+ Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(BitNum)] |= (Val << (BitNum % 8));
+}
+
+/* Clear bits in Sram sw entry */
+STATIC
+VOID
+Mvpp2PrsSramBitsClear (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN INT32 BitNum,
+ IN INT32 Val
+ )
+{
+ Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(BitNum)] &= ~(Val << (BitNum % 8));
+}
+
+/* Update Ri bits in Sram sw entry */
+STATIC
+VOID
+Mvpp2PrsSramRiUpdate (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 bits,
+ IN UINT32 Mask
+ )
+{
+ UINT32 i;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+ INT32 RiOff = MVPP2_PRS_SRAM_RI_OFFS;
+
+ if (!(Mask & BIT (i))) {
+ continue;
+ }
+
+ if (bits & BIT (i)) {
+ Mvpp2PrsSramBitsSet (Pe, RiOff + i, 1);
+ } else {
+ Mvpp2PrsSramBitsClear (Pe, RiOff + i, 1);
+ }
+
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Obtain Ri bits from Sram sw entry */
+STATIC
+INT32
+Mvpp2PrsSramRiGet (
+ IN MVPP2_PRS_ENTRY *Pe
+ )
+{
+ return Pe->Sram.Word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in Sram sw entry */
+STATIC
+VOID
+Mvpp2PrsSramAiUpdate (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Bits,
+ UINT32 Mask
+ )
+{
+ UINT32 i;
+ INT32 AiOff = MVPP2_PRS_SRAM_AI_OFFS;
+
+ for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+
+ if (!(Mask & BIT (i))) {
+ continue;
+ }
+
+ if (Bits & BIT (i)) {
+ Mvpp2PrsSramBitsSet (Pe, AiOff + i, 1);
+ } else {
+ Mvpp2PrsSramBitsClear (Pe, AiOff + i, 1);
+ }
+
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+ }
+}
+
+/* Read ai bits from Sram sw entry */
+STATIC
+INT32
+Mvpp2PrsSramAiGet (
+ IN MVPP2_PRS_ENTRY *Pe
+ )
+{
+ UINT8 bits;
+ INT32 AiOff = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+ INT32 AiEnOff = AiOff + 1;
+ INT32 AiShift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+ bits = (Pe->Sram.Byte[AiOff] >> AiShift) |
+ (Pe->Sram.Byte[AiEnOff] << (8 - AiShift));
+
+ return bits;
+}
+
+/*
+ * In Sram sw entry set lookup ID field of the
+ * Tcam key to be used in the next lookup iteration
+ */
+STATIC
+VOID
+Mvpp2PrsSramNextLuSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Lu
+ )
+{
+ INT32 SramNextOff = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+ Mvpp2PrsSramBitsClear (Pe, SramNextOff, MVPP2_PRS_SRAM_NEXT_LU_MASK);
+ Mvpp2PrsSramBitsSet (Pe, SramNextOff, Lu);
+}
+
+/*
+ * In the Sram sw entry set sign and value of the next lookup Offset
+ * and the Offset value generated to the classifier
+ */
+STATIC
+VOID
+Mvpp2PrsSramShiftSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN INT32 Shift,
+ IN UINT32 Op
+ )
+{
+ /* Set sign */
+ if (Shift < 0) {
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ Shift = -Shift;
+ } else {
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = (UINT8)Shift;
+
+ /* Reset and set operation */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, Op);
+
+ /* Set base Offset as current */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/*
+ * In the Sram sw entry set sign and value of the user defined offset
+ * generated for the classifier
+ */
+STATIC
+VOID
+Mvpp2PrsSramOffsetSet (
+ IN OUT MVPP2_PRS_ENTRY *Pe,
+ IN UINT32 Type,
+ IN INT32 Offset,
+ IN UINT32 Op
+ )
+{
+ UINT8 UdfByte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS);
+ UINT8 UdfByteOffset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
+ UINT8 OpSelUdfByte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
+ UINT8 OpSelUdfByteOffset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
+
+ /* Set sign */
+ if (Offset < 0) {
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ Offset = -Offset;
+ } else {
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+ }
+
+ /* Set value */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK);
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_OFFS, Offset);
+
+ Pe->Sram.Byte[UdfByte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> UdfByteOffset);
+ Pe->Sram.Byte[UdfByte] |= (Offset >> UdfByteOffset);
+
+ /* Set Offset Type */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, Type);
+
+ /* Set Offset operation */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, Op);
+
+ Pe->Sram.Byte[OpSelUdfByte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> OpSelUdfByteOffset);
+ Pe->Sram.Byte[OpSelUdfByte] |= (Op >> OpSelUdfByteOffset);
+
+ /* Set base Offset as current */
+ Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser Flow entry */
+STATIC
+MVPP2_PRS_ENTRY *
+Mvpp2PrsFlowFind (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Flow
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 Tid;
+ UINT32 Word, Enable;
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return NULL;
+ }
+
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+ for (Tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; Tid >= 0; Tid--) {
+ UINT8 Bits;
+
+ if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_FLOWS) {
+ continue;
+ }
+
+ Pe->Index = Tid;
+ Mvpp2PrsHwRead (Priv, Pe);
+
+ /*
+ * Check result info, because there maybe
+ * several TCAM lines to generate the same Flow
+ */
+ Mvpp2PrsTcamDataWordGet (Pe, 0, &Word, &Enable);
+ if ((Word != 0) || (Enable != 0)) {
+ continue;
+ }
+
+ Bits = Mvpp2PrsSramAiGet (Pe);
+
+ /* Sram store classification lookup ID in AI Bits [5:0] */
+ if ((Bits & MVPP2_PRS_FLOW_ID_MASK) == Flow) {
+ return Pe;
+ }
+ }
+
+ Mvpp2Free (Pe);
+
+ return NULL;
+}
+
+/* Return first free Tcam Index, seeking from start to end */
+STATIC
+INT32
+Mvpp2PrsTcamFirstFree (
+ IN MVPP2_SHARED *Priv,
+ IN UINT8 Start,
+ IN UINT8 End
+ )
+{
+ INT32 Tid;
+
+ if (Start > End) {
+ Mvpp2SwapVariables (Start, End);
+ }
+
+ if (End >= MVPP2_PRS_TCAM_SRAM_SIZE) {
+ End = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+ }
+
+ for (Tid = Start; Tid <= End; Tid++) {
+ if (!Priv->PrsShadow[Tid].Valid) {
+ return Tid;
+ }
+ }
+
+ return MVPP2_EINVAL;
+}
+
+/* Enable/disable dropping all mac Da's */
+STATIC
+VOID
+Mvpp2PrsMacDropAllSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN BOOLEAN Add
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+
+ if (Priv->PrsShadow[MVPP2_PE_DROP_ALL].Valid) {
+ /* Entry exist - update PortId only */
+ Pe.Index = MVPP2_PE_DROP_ALL;
+ Mvpp2PrsHwRead (Priv, &Pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC);
+ Pe.Index = MVPP2_PE_DROP_ALL;
+
+ /* Non-promiscuous mode for all Ports - DROP unknown packets */
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK);
+
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC);
+
+ /* Mask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, 0);
+ }
+
+ /* Update PortId Mask */
+ Mvpp2PrsTcamPortSet (&Pe, PortId, Add);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Set port to promiscuous mode */
+VOID
+Mvpp2PrsMacPromiscSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN BOOLEAN Add
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+
+ /* Promiscuous mode - Accept unknown packets */
+
+ if (Priv->PrsShadow[MVPP2_PE_MAC_PROMISCUOUS].Valid) {
+ /* Entry exist - update port only */
+ Pe.Index = MVPP2_PE_MAC_PROMISCUOUS;
+ Mvpp2PrsHwRead (Priv, &Pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC);
+ Pe.Index = MVPP2_PE_MAC_PROMISCUOUS;
+
+ /* Continue - set next lookup */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L2_UCAST, MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Shift to ethertype with 2 of MAC Address length */
+ Mvpp2PrsSramShiftSet (&Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, 0);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port Mask */
+ Mvpp2PrsTcamPortSet (&Pe, PortId, Add);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Accept multicast */
+VOID
+Mvpp2PrsMacMultiSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN INT32 Index,
+ IN BOOLEAN Add
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ UINT8 DaMc;
+
+ /*
+ * Ethernet multicast Address first Byte is
+ * 0x01 for IPv4 and 0x33 for IPv6
+ */
+ DaMc = (Index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+ if (Priv->PrsShadow[Index].Valid) {
+ /* Entry exist - update port only */
+ Pe.Index = Index;
+ Mvpp2PrsHwRead (Priv, &Pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC);
+ Pe.Index = Index;
+
+ /* Continue - set next lookup */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_DSA);
+
+ /* Set result info bits */
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L2_MCAST, MVPP2_PRS_RI_L2_CAST_MASK);
+
+ /* Update Tcam entry data first Byte */
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, DaMc, 0xff);
+
+ /* Shift to ethertype */
+ Mvpp2PrsSramShiftSet (&Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Mask all ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, 0);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC);
+ }
+
+ /* Update port Mask */
+ Mvpp2PrsTcamPortSet (&Pe, PortId, Add);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Set entry for dsa packets */
+STATIC
+VOID
+Mvpp2PrsDsaTagSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN BOOLEAN Add,
+ IN BOOLEAN Tagged,
+ IN BOOLEAN Extend
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid, Shift;
+
+ if (Extend) {
+ Tid = Tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+ Shift = 8;
+ } else {
+ Tid = Tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+ Shift = 4;
+ }
+
+ if (Priv->PrsShadow[Tid].Valid) {
+ /* Entry exist - update port only */
+ Pe.Index = Tid;
+ Mvpp2PrsHwRead (Priv, &Pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA);
+ Pe.Index = Tid;
+
+ /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+ Mvpp2PrsSramShiftSet (&Pe, Shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_DSA);
+
+ if (Tagged) {
+ /* Set Tagged bit in DSA tag */
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+
+ /* Clear all ai bits for next iteration */
+ Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* If packet is Tagged continue check vlans */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2);
+ }
+
+ /* Mask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, 0);
+ }
+
+ /* Update port Mask */
+ Mvpp2PrsTcamPortSet (&Pe, PortId, Add);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Set entry for dsa ethertype */
+STATIC
+VOID
+Mvpp2PrsDsaTagEthertypeSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN BOOLEAN Add,
+ IN BOOLEAN Tagged,
+ IN BOOLEAN Extend
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid, Shift, PortMask;
+
+ if (Extend) {
+ Tid = Tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+ PortMask = 0;
+ Shift = 8;
+ } else {
+ Tid = Tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : MVPP2_PE_ETYPE_DSA_UNTAGGED;
+ PortMask = MVPP2_PRS_PORT_MASK;
+ Shift = 4;
+ }
+
+ if (Priv->PrsShadow[Tid].Valid) {
+ /* Entry exist - update PortId only */
+ Pe.Index = Tid;
+ Mvpp2PrsHwRead (Priv, &Pe);
+ } else {
+ /* Entry doesn't exist - create new */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA);
+ Pe.Index = Tid;
+
+ /*
+ * Set ethertype at offset 0 for DSA and
+ * clear it at offset 2 - obtained from Marvell.
+ */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_EDSA);
+ Mvpp2PrsMatchEtype (&Pe, 2, 0);
+
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DSA_MASK,
+ MVPP2_PRS_RI_DSA_MASK);
+
+ /* Shift ethertype + 2 Byte reserved + tag */
+ Mvpp2PrsSramShiftSet (&Pe, 2 + MVPP2_ETH_TYPE_LEN + Shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_DSA);
+
+ if (Tagged) {
+ /* Set Tagged bit in DSA tag */
+ Mvpp2PrsTcamDataByteSet (
+ &Pe,
+ MVPP2_ETH_TYPE_LEN + 2 + 3,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT
+ );
+
+ /* Clear all ai bits for next iteration */
+ Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* If packet is Tagged continue check vlans */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN);
+ } else {
+ /* Set result info bits to 'no vlans' */
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2);
+ }
+
+ /* Mask/unmask all ports, depending on dsa type */
+ Mvpp2PrsTcamPortMapSet (&Pe, PortMask);
+ }
+
+ /* Update port Mask */
+ Mvpp2PrsTcamPortSet (&Pe, PortId, Add);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Search for existing single/triple vlan entry */
+STATIC
+MVPP2_PRS_ENTRY *
+Mvpp2PrsVlanFind (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Tpid,
+ IN INT32 Ai
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 Tid;
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return NULL;
+ }
+
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN);
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) {
+ UINT32 RiBits, AiBits;
+ BOOLEAN match;
+
+ if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_VLAN) {
+ continue;
+ }
+
+ Pe->Index = Tid;
+
+ Mvpp2PrsHwRead (Priv, Pe);
+ match = Mvpp2PrsTcamDataCmp (Pe, 0, Mvpp2SwapBytes16 (Tpid));
+ if (!match) {
+ continue;
+ }
+
+ /* Get vlan type */
+ RiBits = Mvpp2PrsSramRiGet (Pe);
+ RiBits &= MVPP2_PRS_RI_VLAN_MASK;
+
+ /* Get current Ai value from Tcam */
+ AiBits = Mvpp2PrsTcamAiGet (Pe);
+
+ /* Clear double vlan bit */
+ AiBits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+ if (Ai != AiBits) {
+ continue;
+ }
+
+ if (RiBits == MVPP2_PRS_RI_VLAN_SINGLE || RiBits == MVPP2_PRS_RI_VLAN_TRIPLE) {
+ return Pe;
+ }
+ }
+
+ Mvpp2Free (Pe);
+
+ return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+INT32
+Mvpp2PrsVlanAdd (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Tpid,
+ IN INT32 Ai,
+ IN UINT32 PortMap
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 TidAux, Tid;
+ INT32 Ret = 0;
+
+ Pe = Mvpp2PrsVlanFind (Priv, Tpid, Ai);
+
+ if (!Pe) {
+ /* Create new Tcam entry */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return MVPP2_ENOMEM;
+ }
+
+ /* Get last double vlan Tid */
+ for (TidAux = MVPP2_PE_LAST_FREE_TID; TidAux >= MVPP2_PE_FIRST_FREE_TID; TidAux--) {
+ UINT32 RiBits;
+
+ if (!Priv->PrsShadow[TidAux].Valid || Priv->PrsShadow[TidAux].Lu != MVPP2_PRS_LU_VLAN) {
+ continue;
+ }
+
+ Pe->Index = TidAux;
+ Mvpp2PrsHwRead (Priv, Pe);
+ RiBits = Mvpp2PrsSramRiGet (Pe);
+ if ((RiBits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) {
+ break;
+ }
+ }
+
+ if (Tid <= TidAux) {
+ Ret = MVPP2_EINVAL;
+ goto error;
+ }
+
+ Mvpp2Memset (Pe, 0 , sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN);
+ Pe->Index = Tid;
+
+ /* Set VLAN type's offset to 0 bytes - obtained from Marvell */
+ Mvpp2PrsMatchEtype (Pe, 0, Tpid);
+
+ Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_L2);
+
+ /* Shift 4 bytes - skip 1 vlan tag */
+ Mvpp2PrsSramShiftSet (Pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all Ai bits for next iteration */
+ Mvpp2PrsSramAiUpdate (Pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ if (Ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+ Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK);
+ } else {
+ Ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+ Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK);
+ }
+
+ Mvpp2PrsTcamAiUpdate (Pe, Ai, MVPP2_PRS_SRAM_AI_MASK);
+
+ Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_VLAN);
+ }
+
+ /* Update Ports' Mask */
+ Mvpp2PrsTcamPortMapSet (Pe, PortMap);
+ Mvpp2PrsHwWrite (Priv, Pe);
+
+error:
+ Mvpp2Free (Pe);
+
+ return Ret;
+}
+
+/* Get first free double vlan ai number */
+INT32
+Mvpp2PrsDoubleVlanAiFreeGet (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ INT32 i;
+
+ for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+ if (!Priv->PrsDoubleVlans[i]) {
+ return i;
+ }
+ }
+
+ return MVPP2_EINVAL;
+}
+
+/* Search for existing double vlan entry */
+MVPP2_PRS_ENTRY *Mvpp2PrsDoubleVlanFind (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Tpid1,
+ IN UINT16 Tpid2
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 Tid;
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return NULL;
+ }
+
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN);
+
+ /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+ for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) {
+ UINT32 RiMask;
+ BOOLEAN match;
+
+ if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_VLAN) {
+ continue;
+ }
+
+ Pe->Index = Tid;
+ Mvpp2PrsHwRead (Priv, Pe);
+
+ match = Mvpp2PrsTcamDataCmp (Pe, 0, Mvpp2SwapBytes16 (Tpid1)) &&
+ Mvpp2PrsTcamDataCmp (Pe, 4, Mvpp2SwapBytes16 (Tpid2));
+
+ if (!match) {
+ continue;
+ }
+
+ RiMask = Mvpp2PrsSramRiGet (Pe) & MVPP2_PRS_RI_VLAN_MASK;
+ if (RiMask == MVPP2_PRS_RI_VLAN_DOUBLE) {
+ return Pe;
+ }
+ }
+
+ Mvpp2Free (Pe);
+
+ return NULL;
+}
+
+/* Add or update double vlan entry */
+INT32
+Mvpp2PrsDoubleVlanAdd (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Tpid1,
+ IN UINT16 Tpid2,
+ IN UINT32 PortMap
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 TidAux, Tid, Ai, Ret = 0;
+
+ Pe = Mvpp2PrsDoubleVlanFind (Priv, Tpid1, Tpid2);
+
+ if (!Pe) {
+ /* Create new Tcam entry */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return MVPP2_ENOMEM;
+ }
+
+ /* Set Ai value for new double vlan entry */
+ Ai = Mvpp2PrsDoubleVlanAiFreeGet (Priv);
+ if (Ai < 0) {
+ Ret = Ai;
+ goto error;
+ }
+
+ /* Get first single/triple vlan Tid */
+ for (TidAux = MVPP2_PE_FIRST_FREE_TID; TidAux <= MVPP2_PE_LAST_FREE_TID; TidAux++) {
+ UINT32 RiBits;
+
+ if (!Priv->PrsShadow[TidAux].Valid || Priv->PrsShadow[TidAux].Lu != MVPP2_PRS_LU_VLAN) {
+ continue;
+ }
+
+ Pe->Index = TidAux;
+ Mvpp2PrsHwRead (Priv, Pe);
+ RiBits = Mvpp2PrsSramRiGet (Pe);
+ RiBits &= MVPP2_PRS_RI_VLAN_MASK;
+
+ if (RiBits == MVPP2_PRS_RI_VLAN_SINGLE || RiBits == MVPP2_PRS_RI_VLAN_TRIPLE) {
+ break;
+ }
+ }
+
+ if (Tid >= TidAux) {
+ Ret = MVPP2_ERANGE;
+ goto error;
+ }
+
+ Mvpp2Memset (Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN);
+ Pe->Index = Tid;
+
+ Priv->PrsDoubleVlans[Ai] = TRUE;
+
+ /* Set both VLAN types' offsets to 0 and 4 bytes - obtained from Marvell */
+ Mvpp2PrsMatchEtype (Pe, 0, Tpid1);
+ Mvpp2PrsMatchEtype (Pe, 4, Tpid2);
+
+ Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_VLAN);
+
+ /* Shift 8 bytes - skip 2 vlan tags */
+ Mvpp2PrsSramShiftSet (Pe, 2 * MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK);
+ Mvpp2PrsSramAiUpdate (Pe, Ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK);
+
+ Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_VLAN);
+ }
+
+ /* Update Ports' Mask */
+ Mvpp2PrsTcamPortMapSet (Pe, PortMap);
+ Mvpp2PrsHwWrite (Priv, Pe);
+
+error:
+ Mvpp2Free (Pe);
+ return Ret;
+}
+
+/* IPv4 header parsing for fragmentation and L4 Offset */
+STATIC
+INT32
+Mvpp2PrsIp4Proto (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Proto,
+ IN UINT32 Ri,
+ IN UINT32 RiMask
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid;
+
+ if ((Proto != MV_IPPR_TCP) && (Proto != MV_IPPR_UDP) && (Proto != MV_IPPR_IGMP)) {
+ return MVPP2_EINVAL;
+ }
+
+ /* Fragmented packet */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Pe.Index = Tid;
+
+ /* Set next Lu to IPv4 - 12 bytes shift */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsSramShiftSet (&Pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L4 offset 4 bytes relative IPv4 header size (current position) */
+ Mvpp2PrsSramOffsetSet (
+ &Pe,
+ MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof (Mvpp2Iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
+ );
+
+ Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ Mvpp2PrsSramRiUpdate (&Pe, Ri | MVPP2_PRS_RI_IP_FRAG_MASK, RiMask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+ Mvpp2PrsTcamDataByteSet (&Pe, 5, Proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Not fragmented packet */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe.Index = Tid;
+
+ /* Clear Ri before updating */
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ Mvpp2PrsSramRiUpdate (&Pe, Ri, RiMask);
+
+ Mvpp2PrsTcamDataByteSet (&Pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+ Mvpp2PrsTcamDataByteSet (&Pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+STATIC
+INT32
+Mvpp2PrsIp4Cast (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 L3Cast
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Mask, Tid;
+
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Pe.Index = Tid;
+
+ switch (L3Cast) {
+ case MVPP2_PRS_L3_MULTI_CAST:
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_IPV4_MC, MVPP2_PRS_IPV4_MC_MASK);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ case MVPP2_PRS_L3_BROAD_CAST:
+ Mask = MVPP2_PRS_IPV4_BC_MASK;
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, Mask, Mask);
+ Mvpp2PrsTcamDataByteSet (&Pe, 1, Mask, Mask);
+ Mvpp2PrsTcamDataByteSet (&Pe, 2, Mask, Mask);
+ Mvpp2PrsTcamDataByteSet (&Pe, 3, Mask, Mask);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_BCAST, MVPP2_PRS_RI_L3_ADDR_MASK);
+ break;
+ default:
+ return MVPP2_EINVAL;
+ }
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Set entries for protocols over IPv6 */
+STATIC
+INT32
+Mvpp2PrsIp6Proto (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 Proto,
+ IN UINT32 Ri,
+ IN UINT32 RiMask
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid;
+
+ if ((Proto != MV_IPPR_TCP) && (Proto != MV_IPPR_UDP) &&
+ (Proto != MV_IPPR_ICMPV6) && (Proto != MV_IPPR_IPIP))
+ {
+ return MVPP2_EINVAL;
+ }
+
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = Tid;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (&Pe, Ri, RiMask);
+
+ /* Set offset for protocol 6 bytes relative to IPv6 header size */
+ Mvpp2PrsSramOffsetSet (
+ &Pe,
+ MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof (Mvpp2Ipv6hdr) - 6,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
+ );
+
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, Proto, MVPP2_PRS_TCAM_PROTO_MASK);
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Write HW */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* IPv6 L3 multicast entry */
+STATIC
+INT32
+Mvpp2PrsIp6Cast (
+ IN MVPP2_SHARED *Priv,
+ IN UINT16 L3Cast
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid;
+
+ if (L3Cast != MVPP2_PRS_L3_MULTI_CAST) {
+ return MVPP2_EINVAL;
+ }
+
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = Tid;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK);
+ Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Shift back to IPv6 by 18 bytes - byte count provided by Marvell */
+ Mvpp2PrsSramShiftSet (&Pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_IPV6_MC, MVPP2_PRS_IPV6_MC_MASK);
+ Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Parser per-Port initialization */
+STATIC
+VOID
+Mvpp2PrsHwPortInit (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN INT32 LuFirst,
+ IN INT32 LuMax,
+ IN INT32 Offset
+ )
+{
+ UINT32 Val;
+
+ /* Set lookup ID */
+ Val = Mvpp2Read (Priv, MVPP2_PRS_INIT_LOOKUP_REG);
+ Val &= ~MVPP2_PRS_PORT_LU_MASK (PortId);
+ Val |= MVPP2_PRS_PORT_LU_VAL (PortId, LuFirst);
+ Mvpp2Write (Priv, MVPP2_PRS_INIT_LOOKUP_REG, Val);
+
+ /* Set maximum number of loops for packet received from PortId */
+ Val = Mvpp2Read (Priv, MVPP2_PRS_MAX_LOOP_REG(PortId));
+ Val &= ~MVPP2_PRS_MAX_LOOP_MASK (PortId);
+ Val |= MVPP2_PRS_MAX_LOOP_VAL (PortId, LuMax);
+ Mvpp2Write (Priv, MVPP2_PRS_MAX_LOOP_REG(PortId), Val);
+
+ /*
+ * Set initial Offset for packet header extraction for the first
+ * searching loop
+ */
+ Val = Mvpp2Read (Priv, MVPP2_PRS_INIT_OFFS_REG(PortId));
+ Val &= ~MVPP2_PRS_INIT_OFF_MASK (PortId);
+ Val |= MVPP2_PRS_INIT_OFF_VAL (PortId, Offset);
+ Mvpp2Write (Priv, MVPP2_PRS_INIT_OFFS_REG(PortId), Val);
+}
+
+/* Default Flow entries initialization for all Ports */
+STATIC
+VOID
+Mvpp2PrsDefFlowInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 PortId;
+
+ for (PortId = 0; PortId < MVPP2_MAX_PORTS; PortId++) {
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Pe.Index = MVPP2_PE_FIRST_DEFAULT_FLOW - PortId;
+
+ /* Mask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, 0);
+
+ /* Set Flow ID*/
+ Mvpp2PrsSramAiUpdate (&Pe, PortId, MVPP2_PRS_FLOW_ID_MASK);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+ }
+}
+
+/* Set default entry for Marvell Header field */
+STATIC
+VOID
+Mvpp2PrsMhInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+
+ Pe.Index = MVPP2_PE_MH_DEFAULT;
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MH);
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_MH_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_MAC);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MH);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/*
+ * Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC Addresses
+ */
+STATIC
+VOID
+Mvpp2PrsMacInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+
+ /* Non-promiscuous mode for all Ports - DROP unknown packets */
+ Pe.Index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC);
+
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Place holders only - no Ports */
+ Mvpp2PrsMacDropAllSet (Priv, 0, FALSE);
+ Mvpp2PrsMacPromiscSet (Priv, 0, FALSE);
+ Mvpp2PrsMacMultiSet (Priv, MVPP2_PE_MAC_MC_ALL, 0, FALSE);
+ Mvpp2PrsMacMultiSet (Priv, MVPP2_PE_MAC_MC_IP6, 0, FALSE);
+}
+
+/* Set default entries for various types of dsa packets */
+STATIC
+VOID
+Mvpp2PrsDsaInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+
+ /* None tagged EDSA entry - place holder */
+ Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA entry - place holder */
+ Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA entry - place holder */
+ Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Tagged DSA entry - place holder */
+ Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* None tagged EDSA ethertype entry - place holder*/
+ Mvpp2PrsDsaTagEthertypeSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+ /* Tagged EDSA ethertype entry - place holder*/
+ Mvpp2PrsDsaTagEthertypeSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+ /* None tagged DSA ethertype entry */
+ Mvpp2PrsDsaTagEthertypeSet (Priv, 0, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Tagged DSA ethertype entry */
+ Mvpp2PrsDsaTagEthertypeSet (Priv, 0, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+ /* Set default entry, in case DSA or EDSA tag not found */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA);
+ Pe.Index = MVPP2_PE_DSA_DEFAULT;
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN);
+
+ /* Shift 0 bytes */
+ Mvpp2PrsSramShiftSet (&Pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC);
+
+ /* Clear all Sram ai bits for next iteration */
+ Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+}
+
+/* Match basic ethertypes */
+STATIC
+INT32
+Mvpp2PrsEtypeInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid;
+
+ /* Ethertype: PPPoE */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = Tid;
+
+ /* Set PPPoE type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_PPP_SES);
+
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_PPPOE_HDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_PPPOE);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = FALSE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Ethertype: ARP */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = Tid;
+
+ /* Set ARP type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_ARP);
+
+ /* Generate Flow in the next iteration*/
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = TRUE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Ethertype: LBTD */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = Tid;
+
+ /* Set LBTD type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MVPP2_IP_LBDT_TYPE);
+
+ /* Generate Flow in the next iteration*/
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (
+ &Pe,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK
+ );
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = TRUE;
+ Mvpp2PrsShadowRiSet (
+ Priv,
+ Pe.Index,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK
+ );
+
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Ethertype: IPv4 without options */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = Tid;
+
+ /* Set IPv4 type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_IP);
+ Mvpp2PrsTcamDataByteSet (
+ &Pe,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK
+ );
+
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Skip EthType + 4 bytes of IP header */
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = FALSE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Ethertype: IPv4 with options */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe.Index = Tid;
+
+ /* Clear Tcam data before updating */
+ Pe.Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+ Pe.Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+ Mvpp2PrsTcamDataByteSet (&Pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
+
+ /* Clear Ri before updating */
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = FALSE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Ethertype: IPv6 without options */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = Tid;
+
+ /* Set IPv6 type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_IPV6);
+
+ /* Skip DIP of IPV6 header - value provided by Marvell */
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 8 + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = FALSE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Pe.Index = MVPP2_PE_ETH_TYPE_UN;
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Generate Flow in the next iteration*/
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Set L3 Offset even it's unknown L3 */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2);
+ Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF;
+ Priv->PrsShadow[Pe.Index].Finish = TRUE;
+ Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/*
+ * Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100, 0x88A8
+ */
+STATIC
+INT32
+Mvpp2PrsVlanInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Err;
+
+ /* Double VLAN: 0x8100, 0x88A8 */
+ Err = Mvpp2PrsDoubleVlanAdd (Priv, MV_ETH_P_8021Q, MV_ETH_P_8021AD, MVPP2_PRS_PORT_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Double VLAN: 0x8100, 0x8100 */
+ Err = Mvpp2PrsDoubleVlanAdd (Priv, MV_ETH_P_8021Q, MV_ETH_P_8021Q, MVPP2_PRS_PORT_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Single VLAN: 0x88a8 */
+ Err = Mvpp2PrsVlanAdd (Priv, MV_ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Single VLAN: 0x8100 */
+ Err = Mvpp2PrsVlanAdd (Priv, MV_ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Set default double vlan entry */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_VLAN);
+ Pe.Index = MVPP2_PE_VLAN_DBL;
+
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2);
+
+ /* Clear ai for next iterations */
+ Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK);
+
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_DBL_VLAN_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_VLAN);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Set default vlan none entry */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_VLAN);
+ Pe.Index = MVPP2_PE_VLAN_NONE;
+
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_VLAN);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+STATIC
+INT32
+Mvpp2PrsPppoeInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid;
+
+ /* IPv4 over PPPoE with options */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE);
+ Pe.Index = Tid;
+
+ /* Set IPv4 over PPPoE type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_PPP_IP);
+
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Skip EthType + 4 bytes of IP header */
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* IPv4 over PPPoE without options */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe.Index = Tid;
+
+ Mvpp2PrsTcamDataByteSet (
+ &Pe,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+ MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK
+ );
+
+ /* Clear Ri before updating */
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+ Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* IPv6 over PPPoE */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE);
+ Pe.Index = Tid;
+
+ /* Set IPv6 over PPPoE type offset to 0 - obtained from Marvell */
+ Mvpp2PrsMatchEtype (&Pe, 0, MV_PPP_IPV6);
+
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Skip EthType + 4 bytes of IPv6 header */
+ Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L3 Offset */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Non-IP over PPPoE */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE);
+ Pe.Index = Tid;
+
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK);
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+ /* Set L3 Offset even if it's unknown L3 */
+ Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv4 */
+STATIC
+INT32
+Mvpp2PrsIp4Init (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Err;
+
+ /* Set entries for TCP, UDP and IGMP over IPv4 */
+ Err = Mvpp2PrsIp4Proto (Priv, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp4Proto (Priv, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp4Proto (
+ Priv,
+ MV_IPPR_IGMP,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK
+ );
+
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* IPv4 Broadcast */
+ Err = Mvpp2PrsIp4Cast (Priv, MVPP2_PRS_L3_BROAD_CAST);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* IPv4 Multicast */
+ Err = Mvpp2PrsIp4Cast (Priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Default IPv4 entry for unknown protocols */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Pe.Index = MVPP2_PE_IP4_PROTO_UN;
+
+ /* Set next Lu to IPv4 and shift by 12 bytes - obtained from Marvell*/
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsSramShiftSet (&Pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set L4 offset 4 bytes relative IPv4 header size (current position) */
+ Mvpp2PrsSramOffsetSet (
+ &Pe,
+ MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof (Mvpp2Iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
+ );
+
+ Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Default IPv4 entry for unicast Address */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4);
+ Pe.Index = MVPP2_PE_IP4_ADDR_UN;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK);
+
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Initialize entries for IPv6 */
+STATIC
+INT32
+Mvpp2PrsIp6Init (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Tid, Err;
+
+ /* Set entries for TCP, UDP and ICMP over IPv6 */
+ Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp6Proto (
+ Priv,
+ MV_IPPR_ICMPV6,
+ MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+ MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK
+ );
+
+ if (Err != 0) {
+ return Err;
+ }
+
+ /*
+ * IPv4 is the last header. This is similar case as 6-TCP or 17-UDP
+ * Result Info: UDF7=1, DS lite
+ */
+ Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE, MVPP2_PRS_RI_UDF7_MASK);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* IPv6 multicast */
+ Err = Mvpp2PrsIp6Cast (Priv, MVPP2_PRS_L3_MULTI_CAST);
+ if (Err != 0) {
+ return Err;
+ }
+
+ /* Entry for checking hop limit */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = Tid;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (
+ &Pe,
+ MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK
+ );
+
+ Mvpp2PrsTcamDataByteSet (&Pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Default IPv6 entry for unknown protocols */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = MVPP2_PE_IP6_PROTO_UN;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ /* Set L4 offset 6 bytes relative IPv6 header size (current position) */
+ Mvpp2PrsSramOffsetSet (
+ &Pe,
+ MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ sizeof (Mvpp2Ipv6hdr) - 6,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
+ );
+
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Default IPv6 entry for unknown ext protocols */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+ /* Finished: go to Flowid generation */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS);
+ Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK);
+
+ Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_EXT_AI_BIT, MVPP2_PRS_IPV6_EXT_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ /* Default IPv6 entry for unicast Address */
+ Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY));
+ Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Pe.Index = MVPP2_PE_IP6_ADDR_UN;
+
+ /* Finished: go to IPv6 again */
+ Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK);
+ Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Shift back to IPv6 by 18 bytes - byte count provided by Marvell */
+ Mvpp2PrsSramShiftSet (&Pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+ /* Unmask all Ports */
+ Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6);
+ Mvpp2PrsHwWrite (Priv, &Pe);
+
+ return 0;
+}
+
+/* Parser default initialization */
+INT32
+Mvpp2PrsDefaultInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ INT32 Err, Index, i;
+
+ /* Enable Tcam table */
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+ /* Clear all Tcam and Sram entries */
+ for (Index = 0; Index < MVPP2_PRS_TCAM_SRAM_SIZE; Index++) {
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Index);
+ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) {
+ Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+ }
+
+ Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Index);
+ for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) {
+ Mvpp2Write (Priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+ }
+ }
+
+ /* Invalidate all Tcam entries */
+ for (Index = 0; Index < MVPP2_PRS_TCAM_SRAM_SIZE; Index++) {
+ Mvpp2PrsHwInv (Priv, Index);
+ }
+
+ /* Always start from lookup = 0 */
+ for (Index = 0; Index < MVPP2_MAX_PORTS; Index++) {
+ Mvpp2PrsHwPortInit (Priv, Index, MVPP2_PRS_LU_MH, MVPP2_PRS_PORT_LU_MAX, 0);
+ }
+
+ Mvpp2PrsDefFlowInit (Priv);
+
+ Mvpp2PrsMhInit (Priv);
+
+ Mvpp2PrsMacInit (Priv);
+
+ Mvpp2PrsDsaInit (Priv);
+
+ Err = Mvpp2PrsEtypeInit (Priv);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsVlanInit (Priv);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsPppoeInit (Priv);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp6Init (Priv);
+ if (Err != 0) {
+ return Err;
+ }
+
+ Err = Mvpp2PrsIp4Init (Priv);
+ if (Err != 0) {
+ return Err;
+ }
+
+ return 0;
+}
+
+/* Compare MAC DA with Tcam entry data */
+STATIC
+BOOLEAN
+Mvpp2PrsMacRangeEquals (
+ IN MVPP2_PRS_ENTRY *Pe,
+ IN const UINT8 *Da,
+ IN UINT8 *Mask
+ )
+{
+ UINT8 TcamByte, TcamMask;
+ INT32 Index;
+
+ for (Index = 0; Index < MV_ETH_ALEN; Index++) {
+ Mvpp2PrsTcamDataByteGet (Pe, Index, &TcamByte, &TcamMask);
+ if (TcamMask != Mask[Index]) {
+ return FALSE;
+ }
+
+ if ((TcamMask & TcamByte) != (Da[Index] & Mask[Index])) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+/* Find Tcam entry with matched pair <MAC DA, Port> */
+STATIC
+MVPP2_PRS_ENTRY *
+Mvpp2PrsMacDaRangeFind (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pmap,
+ IN const UINT8 *Da,
+ IN UINT8 *Mask,
+ IN INT32 UdfType
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 Tid;
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return NULL;
+ }
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_MAC);
+
+ /* Go through the all entires with MVPP2_PRS_LU_MAC */
+ for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) {
+ UINT32 EntryPmap;
+
+ if (!Priv->PrsShadow[Tid].Valid ||
+ (Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_MAC) ||
+ (Priv->PrsShadow[Tid].Udf != UdfType))
+ {
+ continue;
+ }
+
+ Pe->Index = Tid;
+ Mvpp2PrsHwRead (Priv, Pe);
+ EntryPmap = Mvpp2PrsTcamPortMapGet (Pe);
+
+ if (Mvpp2PrsMacRangeEquals (Pe, Da, Mask) && EntryPmap == Pmap) {
+ return Pe;
+ }
+ }
+
+ Mvpp2Free (Pe);
+
+ return NULL;
+}
+
+/* Update parser's mac Da entry */
+INT32
+Mvpp2PrsMacDaAccept (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN const UINT8 *Da,
+ IN BOOLEAN Add
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ UINT32 Pmap, Len, Ri;
+ UINT8 Mask[MV_ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ INT32 Tid;
+
+ /* Scan TCAM and see if entry with this <MAC DA, PortId> already exist */
+ Pe = Mvpp2PrsMacDaRangeFind (Priv, (1 << PortId), Da, Mask, MVPP2_PRS_UDF_MAC_DEF);
+
+ /* No such entry */
+ if (Pe == NULL) {
+ if (!Add) {
+ return 0;
+ }
+
+ /* Create new TCAM entry */
+ /* Find first range mac entry*/
+ for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) {
+ if (Priv->PrsShadow[Tid].Valid &&
+ (Priv->PrsShadow[Tid].Lu == MVPP2_PRS_LU_MAC) &&
+ (Priv->PrsShadow[Tid].Udf == MVPP2_PRS_UDF_MAC_RANGE))
+ {
+ break;
+ }
+ }
+
+
+ /* Go through the all entries from first to last */
+ Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, Tid - 1);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return -1;
+ }
+
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_MAC);
+ Pe->Index = Tid;
+
+ /* Mask all Ports */
+ Mvpp2PrsTcamPortMapSet (Pe, 0);
+ }
+
+ /* Update PortId Mask */
+ Mvpp2PrsTcamPortSet (Pe, PortId, Add);
+
+ /* Invalidate the entry if no Ports are left enabled */
+ Pmap = Mvpp2PrsTcamPortMapGet (Pe);
+ if (Pmap == 0) {
+ if (Add) {
+ Mvpp2Free (Pe);
+ return -1;
+ }
+
+ Mvpp2PrsHwInv (Priv, Pe->Index);
+ Priv->PrsShadow[Pe->Index].Valid = FALSE;
+
+ Mvpp2Free (Pe);
+
+ return 0;
+ }
+
+ /* Continue - set next lookup */
+ Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_DSA);
+
+ /* Set match on DA */
+ Len = MV_ETH_ALEN;
+ while (Len--) {
+ Mvpp2PrsTcamDataByteSet (Pe, Len, Da[Len], 0xff);
+ }
+
+ /* Set result info bits */
+ if (Mvpp2IsBroadcastEtherAddr (Da)) {
+ Ri = MVPP2_PRS_RI_L2_BCAST;
+ } else if (Mvpp2IsMulticastEtherAddr (Da)) {
+ Ri = MVPP2_PRS_RI_L2_MCAST;
+ } else {
+ Ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+ }
+
+ Mvpp2PrsSramRiUpdate (Pe, Ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK);
+ Mvpp2PrsShadowRiSet (Priv, Pe->Index, Ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK);
+
+ /* Shift to ethertype */
+ Mvpp2PrsSramShiftSet (Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Update shadow table and hw entry */
+ Priv->PrsShadow[Pe->Index].Udf = MVPP2_PRS_UDF_MAC_DEF;
+ Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_MAC);
+ Mvpp2PrsHwWrite (Priv, Pe);
+
+ Mvpp2Free (Pe);
+
+ return 0;
+}
+
+/* Delete all Port's multicast simple (not range) entries */
+VOID
+Mvpp2PrsMcastDelAll (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId
+ )
+{
+ MVPP2_PRS_ENTRY Pe;
+ INT32 Index, Tid;
+
+ for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) {
+ UINT8 Da[MV_ETH_ALEN], DaMask[MV_ETH_ALEN];
+
+ if (!Priv->PrsShadow[Tid].Valid ||
+ (Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_MAC) ||
+ (Priv->PrsShadow[Tid].Udf != MVPP2_PRS_UDF_MAC_DEF))
+ {
+ continue;
+ }
+
+ /* Only simple mac entries */
+ Pe.Index = Tid;
+ Mvpp2PrsHwRead (Priv, &Pe);
+
+ /* Read mac Addr from entry */
+ for (Index = 0; Index < MV_ETH_ALEN; Index++) {
+ Mvpp2PrsTcamDataByteGet (&Pe, Index, &Da[Index], &DaMask[Index]);
+ }
+
+ if (Mvpp2IsMulticastEtherAddr (Da) && !Mvpp2IsBroadcastEtherAddr (Da)) {
+ /* Delete this entry */
+ Mvpp2PrsMacDaAccept (Priv, PortId, Da, FALSE);
+ }
+ }
+}
+
+INT32
+Mvpp2PrsTagModeSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN INT32 Type
+ )
+{
+ switch (Type) {
+ case MVPP2_TAG_TYPE_EDSA:
+ /* Add PortId to EDSA entries */
+ Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ /* Remove PortId from DSA entries */
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ break;
+ case MVPP2_TAG_TYPE_DSA:
+ /* Add PortId to DSA entries */
+ Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+ /* Remove PortId from EDSA entries */
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+ case MVPP2_TAG_TYPE_MH:
+ case MVPP2_TAG_TYPE_NONE:
+ /* Remove PortId form EDSA and DSA entries */
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+ Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+ break;
+ default:
+ if ((Type < 0) || (Type > MVPP2_TAG_TYPE_EDSA)) {
+ return MVPP2_EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Set prs Flow for the Port */
+INT32
+Mvpp2PrsDefFlow (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MVPP2_PRS_ENTRY *Pe;
+ INT32 Tid;
+
+ Pe = Mvpp2PrsFlowFind (Port->Priv, Port->Id);
+
+ /* Such entry not exist */
+ if (Pe == NULL) {
+ /* Go through the all entires from last to first */
+ Tid = Mvpp2PrsTcamFirstFree (Port->Priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
+ if (Tid < 0) {
+ return Tid;
+ }
+
+ Pe = Mvpp2Alloc (sizeof (*Pe));
+ if (Pe == NULL) {
+ return MVPP2_ENOMEM;
+ }
+
+ Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_FLOWS);
+ Pe->Index = Tid;
+
+ /* Set Flow ID*/
+ Mvpp2PrsSramAiUpdate (Pe, Port->Id, MVPP2_PRS_FLOW_ID_MASK);
+ Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+ /* Update shadow table */
+ Mvpp2PrsShadowSet (Port->Priv, Pe->Index, MVPP2_PRS_LU_FLOWS);
+ }
+
+ Mvpp2PrsTcamPortMapSet (Pe, (1 << Port->Id));
+ Mvpp2PrsHwWrite (Port->Priv, Pe);
+ Mvpp2Free (Pe);
+
+ return 0;
+}
+
+/* Classifier configuration routines */
+
+/* Update classification Flow table RegValisters */
+STATIC
+VOID
+Mvpp2ClsFlowWrite (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_CLS_FLOW_ENTRY *Fe
+ )
+{
+ Mvpp2Write (Priv, MVPP2_CLS_FLOW_INDEX_REG, Fe->Index);
+ Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL0_REG, Fe->Data[0]);
+ Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL1_REG, Fe->Data[1]);
+ Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL2_REG, Fe->Data[2]);
+}
+
+/* Update classification lookup table RegValister */
+VOID
+Mvpp2ClsLookupWrite (
+ IN MVPP2_SHARED *Priv,
+ IN OUT MVPP2_CLS_LOOKUP_ENTRY *Le
+ )
+{
+ UINT32 Val;
+
+ Val = (Le->Way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | Le->Lkpid;
+ Mvpp2Write (Priv, MVPP2_CLS_LKP_INDEX_REG, Val);
+ Mvpp2Write (Priv, MVPP2_CLS_LKP_TBL_REG, Le->Data);
+}
+
+/* Classifier default initialization */
+VOID
+Mvpp2ClsInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ MVPP2_CLS_LOOKUP_ENTRY Le;
+ MVPP2_CLS_FLOW_ENTRY Fe;
+ INT32 Index;
+
+ /* Enable classifier */
+ Mvpp2Write (Priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+ /* Clear classifier Flow table */
+ Mvpp2Memset (&Fe.Data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+ for (Index = 0; Index < MVPP2_CLS_FLOWS_TBL_SIZE; Index++) {
+ Fe.Index = Index;
+ Mvpp2ClsFlowWrite (Priv, &Fe);
+ }
+
+ /* Clear classifier lookup table */
+ Le.Data = 0;
+ for (Index = 0; Index < MVPP2_CLS_LKP_TBL_SIZE; Index++) {
+ Le.Lkpid = Index;
+ Le.Way = 0;
+ Mvpp2ClsLookupWrite (Priv, &Le);
+
+ Le.Way = 1;
+ Mvpp2ClsLookupWrite (Priv, &Le);
+ }
+}
+
+VOID
+Mvpp2ClsPortConfig (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MVPP2_CLS_LOOKUP_ENTRY Le;
+ UINT32 Val;
+
+ /* Set way for the Port */
+ Val = Mvpp2Read (Port->Priv, MVPP2_CLS_PORT_WAY_REG);
+ Val &= ~MVPP2_CLS_PORT_WAY_MASK (Port->Id);
+ Mvpp2Write (Port->Priv, MVPP2_CLS_PORT_WAY_REG, Val);
+
+ /*
+ * Pick the entry to be accessed in lookup ID decoding table
+ * according to the way and lkpid.
+ */
+ Le.Lkpid = Port->Id;
+ Le.Way = 0;
+ Le.Data = 0;
+
+ /* Set initial CPU Queue for receiving packets */
+ Le.Data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+ Le.Data |= Port->FirstRxq;
+
+ /* Disable classification engines */
+ Le.Data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+ /* Update lookup ID table entry */
+ Mvpp2ClsLookupWrite (Port->Priv, &Le);
+}
+
+/* Set CPU Queue number for oversize packets */
+VOID
+Mvpp2ClsOversizeRxqSet (
+ IN PP2DXE_PORT *Port
+ )
+{
+
+ Mvpp2Write (
+ Port->Priv,
+ MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(Port->Id),
+ Port->FirstRxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
+ );
+}
+
+/* BM helper routines */
+
+VOID
+Mvpp2BmPoolHwCreate (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_BMS_POOL *BmPool,
+ IN INT32 Size
+ )
+{
+ BmPool->Size = Size;
+
+ Mvpp2Write (Priv, MVPP2_BM_POOL_BASE_REG(BmPool->Id), Lower32Bits (BmPool->PhysAddr));
+ Mvpp2Write (Priv, MVPP22_BM_POOL_BASE_HIGH_REG, (Upper32Bits (BmPool->PhysAddr) & MVPP22_BM_POOL_BASE_HIGH_REG));
+ Mvpp2Write (Priv, MVPP2_BM_POOL_SIZE_REG(BmPool->Id), BmPool->Size);
+}
+
+/* Set Pool buffer Size */
+VOID
+Mvpp2BmPoolBufsizeSet (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_BMS_POOL *BmPool,
+ IN INT32 BufSize
+ )
+{
+ UINT32 Val;
+
+ BmPool->BufSize = BufSize;
+
+ Val = MVPP2_ALIGN (BufSize, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+ Mvpp2Write (Priv, MVPP2_POOL_BUF_SIZE_REG(BmPool->Id), Val);
+}
+
+VOID
+Mvpp2BmStop (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pool
+ )
+{
+ UINT32 Val, i;
+
+ for (i = 0; i < MVPP2_BM_SIZE; i++) {
+ Mvpp2Read (Priv, MVPP2_BM_PHY_ALLOC_REG(Pool));
+ }
+
+ Val = Mvpp2Read (Priv, MVPP2_BM_POOL_CTRL_REG(Pool));
+ Val |= MVPP2_BM_STOP_MASK;
+ Mvpp2Write (Priv, MVPP2_BM_POOL_CTRL_REG(Pool), Val);
+}
+
+VOID
+Mvpp2BmIrqClear (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pool
+ )
+{
+ /* Mask BM all interrupts */
+ Mvpp2Write (Priv, MVPP2_BM_INTR_MASK_REG(Pool), 0);
+
+ /* Clear BM cause RegValister */
+ Mvpp2Write (Priv, MVPP2_BM_INTR_CAUSE_REG(Pool), 0);
+}
+
+/* Attach long Pool to Rxq */
+VOID
+Mvpp2RxqLongPoolSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Lrxq,
+ IN INT32 LongPool
+ )
+{
+ UINT32 Val;
+ INT32 Prxq;
+
+ /* Get Queue physical ID */
+ Prxq = Port->Rxqs[Lrxq].Id;
+
+ Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq));
+ Val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+ Val |= ((LongPool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
+
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val);
+}
+
+/* Attach short Pool to Rxq */
+VOID
+Mvpp2RxqShortPoolSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Lrxq,
+ IN INT32 ShortPool
+ )
+{
+ UINT32 Val;
+ INT32 Prxq;
+
+ /* Get Queue physical ID */
+ Prxq = Port->Rxqs[Lrxq].Id;
+
+ Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq));
+ Val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+ Val |= ((ShortPool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
+
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val);
+}
+
+/* Release multicast buffer */
+VOID
+Mvpp2BmPoolMcPut (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Pool,
+ IN UINT32 BufPhysAddr,
+ IN UINT32 BufVirtAddr,
+ IN INT32 McId
+ )
+{
+ UINT32 Val = 0;
+
+ Val |= (McId & MVPP2_BM_MC_ID_MASK);
+ Mvpp2Write (Port->Priv, MVPP2_BM_MC_RLS_REG, Val);
+
+ Mvpp2BmPoolPut (Port->Priv, Pool, BufPhysAddr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, BufVirtAddr);
+}
+
+/* Refill BM Pool */
+VOID
+Mvpp2PoolRefill (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Bm,
+ IN UINT32 PhysAddr,
+ IN UINT32 cookie
+ )
+{
+ INT32 Pool = Mvpp2BmCookiePoolGet (Bm);
+
+ Mvpp2BmPoolPut (Port->Priv, Pool, PhysAddr, cookie);
+}
+
+INTN
+Mvpp2BmPoolCtrl (
+ IN MVPP2_SHARED *Priv,
+ IN INTN Pool,
+ IN enum Mvpp2Command Cmd
+ )
+{
+ UINT32 RegVal = 0;
+ RegVal = Mvpp2Read (Priv, MVPP2_BM_POOL_CTRL_REG(Pool));
+
+ switch (Cmd) {
+ case MVPP2_START:
+ RegVal |= MVPP2_BM_START_MASK;
+ break;
+
+ case MVPP2_STOP:
+ RegVal |= MVPP2_BM_STOP_MASK;
+ break;
+
+ default:
+ return -1;
+ }
+ Mvpp2Write (Priv, MVPP2_BM_POOL_CTRL_REG(Pool), RegVal);
+
+ return 0;
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+VOID
+Mvpp2InterruptsMask (
+ IN VOID *arg
+ )
+{
+ PP2DXE_PORT *Port = arg;
+
+ Mvpp2Write (Port->Priv, MVPP2_ISR_RX_TX_MASK_REG(Port->Id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+VOID
+Mvpp2InterruptsUnmask (
+ IN VOID *arg
+ )
+{
+ PP2DXE_PORT *Port = arg;
+
+ Mvpp2Write (
+ Port->Priv,
+ MVPP2_ISR_RX_TX_MASK_REG(Port->Id),
+ (MVPP2_CAUSE_MISC_SUM_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
+ );
+}
+
+/* MAC configuration routines */
+
+STATIC
+VOID
+Mvpp2PortMiiSet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG);
+
+ switch (Port->PhyInterface) {
+ case MV_MODE_SGMII:
+ Val |= MVPP2_GMAC_INBAND_AN_MASK;
+ break;
+ case MV_MODE_RGMII:
+ Val |= MVPP2_GMAC_PORT_RGMII_MASK;
+ default:
+ Val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+ }
+
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_2_REG, Val);
+}
+
+STATIC
+VOID Mvpp2PortFcAdvEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_AUTONEG_CONFIG);
+ Val |= MVPP2_GMAC_FC_ADV_EN;
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_AUTONEG_CONFIG, Val);
+}
+
+VOID
+Mvpp2PortEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG);
+ Val |= MVPP2_GMAC_PORT_EN_MASK;
+ Val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val);
+}
+
+VOID
+Mvpp2PortDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG);
+ Val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+STATIC
+VOID
+Mvpp2PortPeriodicXonDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_1_REG) & ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_1_REG, Val);
+}
+
+/* Configure loopback Port */
+STATIC
+VOID
+Mvpp2PortReset (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG) & ~MVPP2_GMAC_PORT_RESET_MASK;
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_2_REG, Val);
+
+ while (Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG) & MVPP2_GMAC_PORT_RESET_MASK) {
+ continue;
+ }
+}
+
+/* Set defaults to the MVPP2 Port */
+VOID
+Mvpp2DefaultsSet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ INT32 TxPortNum, Val, Queue, pTxq;
+
+ /* Disable Legacy WRR, Disable EJP, Release from Reset */
+ TxPortNum = Mvpp2EgressPort (Port);
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum);
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+ /* Close bandwidth for all Queues */
+ for (Queue = 0; Queue < MVPP2_MAX_TXQ; Queue++) {
+ pTxq = Mvpp2TxqPhys (Port->Id, Queue);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(pTxq), 0);
+ }
+
+
+ /* Set refill period to 1 Usec, refill tokens and bucket Size to maximum */
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PERIOD_REG, Port->Priv->Tclk / MVPP2_USEC_PER_SEC);
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_REFILL_REG);
+ Val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+ Val |= MVPP2_TXP_REFILL_PERIOD_MASK (1);
+ Val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_REFILL_REG, Val);
+ Val = MVPP2_TXP_TOKEN_SIZE_MAX;
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, Val);
+
+ /* Set MaximumLowLatencyPacketSize value to 256 */
+ Mvpp2Write (
+ Port->Priv,
+ MVPP2_RX_CTRL_REG(Port->Id),
+ MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | MVPP2_RX_LOW_LATENCY_PKT_SIZE (256)
+ );
+
+ /* Mask all interrupts to all present cpus */
+ Mvpp2InterruptsDisable (Port, 0x1);
+}
+
+/* Enable/disable receiving packets */
+VOID
+Mvpp2IngressEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+ INT32 Lrxq, Queue;
+
+ for (Lrxq = 0; Lrxq < RxqNumber; Lrxq++) {
+ Queue = Port->Rxqs[Lrxq].Id;
+ Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue));
+ Val &= ~MVPP2_RXQ_DISABLE_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue), Val);
+ }
+}
+
+VOID
+Mvpp2IngressDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+ INT32 Lrxq, Queue;
+
+ for (Lrxq = 0; Lrxq < RxqNumber; Lrxq++) {
+ Queue = Port->Rxqs[Lrxq].Id;
+ Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue));
+ Val |= MVPP2_RXQ_DISABLE_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue), Val);
+ }
+}
+
+/* Enable transmit via physical egress Queue - HW starts take descriptors from DRAM */
+VOID
+Mvpp2EgressEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 qmap;
+ INT32 Queue;
+ INT32 TxPortNum = Mvpp2EgressPort (Port);
+
+ /* Enable all initialized TXs. */
+ qmap = 0;
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ MVPP2_TX_QUEUE *Txq = &Port->Txqs[Queue];
+
+ if (Txq->Descs != NULL) {
+ qmap |= (1 << Queue);
+ }
+ }
+
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum);
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress Queue - HW doesn't take descriptors from DRAM */
+VOID
+Mvpp2EgressDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegData;
+ INT32 Delay;
+ INT32 TxPortNum = Mvpp2EgressPort (Port);
+
+ /* Issue stop command for active channels only */
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum);
+ RegData = (Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & MVPP2_TXP_SCHED_ENQ_MASK;
+ if (RegData != 0) {
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG, (RegData << MVPP2_TXP_SCHED_DISQ_OFFSET));
+ }
+
+ /* Wait for all Tx activity to terminate. */
+ Delay = 0;
+ do {
+ if (Delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+ Mvpp2Printf ("Tx stop timed out, status=0x%08x\n", RegData);
+ break;
+ }
+ Mvpp2Mdelay (1);
+ Delay++;
+
+ /* Check Port TX Command RegValister that all Tx Queues are stopped */
+ RegData = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+ } while (RegData & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Set rx Queue Offset */
+STATIC
+VOID
+Mvpp2RxqOffsetSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Prxq,
+ IN INT32 Offset
+ )
+{
+ UINT32 Val;
+
+ /* Convert Offset from bytes to units of 32 bytes */
+ Offset = Offset >> 5;
+
+ /* Clear previous value */
+ Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq));
+ Val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+ /* Update packet Offset in received buffer */
+ Val |= ((Offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & MVPP2_RXQ_PACKET_OFFSET_MASK);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val);
+}
+
+/* Obtain BM cookie information from descriptor */
+UINT32
+Mvpp2BmCookieBuild (
+ IN MVPP2_RX_DESC *RxDesc,
+ IN INT32 Cpu
+ )
+{
+ INT32 Pool;
+ UINT32 ret;
+
+ Pool = (RxDesc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS;
+
+ ret = ((Pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | ((Cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
+
+ return ret;
+}
+
+/* Tx descriptors helper methods */
+
+INT32
+Mvpp2TxqDrainSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Txq,
+ IN BOOLEAN En
+ )
+{
+ UINT32 RegVal;
+ INT32 pTxq = Mvpp2TxqPhys (Port->Id, Txq);
+
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, pTxq);
+ RegVal = Mvpp2Read (Port->Priv, MVPP2_TXQ_PREF_BUF_REG);
+
+ if (En) {
+ RegVal |= MVPP2_TXQ_DRAIN_EN_MASK;
+ } else {
+ RegVal &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+ }
+
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal);
+
+ return 0;
+}
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+INT32
+Mvpp2TxqPendDescNumGet (
+ IN PP2DXE_PORT *Port,
+ IN MVPP2_TX_QUEUE *Txq
+ )
+{
+ UINT32 Val;
+
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id);
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_PENDING_REG);
+
+ return Val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get number of occupied aggRegValated Tx descriptors */
+UINT32
+Mvpp2AggrTxqPendDescNumGet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Cpu
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_STATUS_REG(Cpu));
+
+ return RegVal & MVPP2_AGGR_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+MVPP2_TX_DESC *
+Mvpp2TxqNextDescGet (
+ MVPP2_TX_QUEUE *Txq
+ )
+{
+ INT32 TxDesc = Txq->NextDescToProc;
+
+ Txq->NextDescToProc = MVPP2_QUEUE_NEXT_DESC (Txq, TxDesc);
+
+ return Txq->Descs + TxDesc;
+}
+
+/* Update HW with number of aggRegValated Tx descriptors to be sent */
+VOID
+Mvpp2AggrTxqPendDescAdd (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Pending
+ )
+{
+ /* AggRegValated access - relevant TXQ number is written in TX desc */
+ Mvpp2Write (Port->Priv, MVPP2_AGGR_TXQ_UPDATE_REG, Pending);
+}
+
+/*
+ * Check if there are enough free descriptors in aggRegValated Txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ */
+INT32
+Mvpp2AggrDescNumCheck (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_TX_QUEUE *AggrTxq,
+ IN INT32 Num,
+ IN INT32 Cpu
+ )
+{
+ UINT32 Val;
+
+ if ((AggrTxq->count + Num) > AggrTxq->Size) {
+ /* Update number of occupied aggRegValated Tx descriptors */
+ Val = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_STATUS_REG(Cpu));
+ AggrTxq->count = Val & MVPP2_AGGR_TXQ_PENDING_MASK;
+ }
+
+ if ((AggrTxq->count + Num) > AggrTxq->Size) {
+ return MVPP2_ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+INT32
+Mvpp2TxqAllocReservedDesc (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_TX_QUEUE *Txq,
+ IN INT32 Num
+ )
+{
+ UINT32 Val;
+
+ Val = (Txq->Id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | Num;
+ Mvpp2Write (Priv, MVPP2_TXQ_RSVD_REQ_REG, Val);
+
+ Val = Mvpp2Read (Priv, MVPP2_TXQ_RSVD_RSLT_REG);
+
+ return Val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/*
+ * Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+VOID
+Mvpp2TxqDescPut (
+ IN MVPP2_TX_QUEUE *Txq
+ )
+{
+ if (Txq->NextDescToProc == 0) {
+ Txq->NextDescToProc = Txq->LastDesc - 1;
+ } else {
+ Txq->NextDescToProc--;
+ }
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+UINT32
+Mvpp2TxqDescCsum (
+ IN INT32 L3Offs,
+ IN INT32 L3Proto,
+ IN INT32 IpHdrLen,
+ IN INT32 L4Proto
+ )
+{
+ UINT32 command;
+
+ /*
+ * Fields: L3_Offset, IP_hdrlen, L3_type, G_IPV4Chk,
+ * G_L4_chk, L4_type required only for checksum calculation
+ */
+ command = (L3Offs << MVPP2_TXD_L3_OFF_SHIFT);
+ command |= (IpHdrLen << MVPP2_TXD_IP_HLEN_SHIFT);
+ command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+ if (L3Proto == Mvpp2SwapBytes16 (MV_ETH_P_IP)) {
+ command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
+ command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
+ } else {
+ command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
+ }
+
+ if (L4Proto == MV_IPPR_TCP) {
+ command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else if (L4Proto == MV_IPPR_UDP) {
+ command |= MVPP2_TXD_L4_UDP; /* enable UDP */
+ command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
+ } else {
+ command |= MVPP2_TXD_L4_CSUM_NOT;
+ }
+
+ return command;
+}
+
+/* Clear counter of sent packets */
+VOID
+Mvpp2TxqSentCounterClear (
+ IN OUT VOID *arg
+ )
+{
+ PP2DXE_PORT *Port = arg;
+ INT32 Queue;
+
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ INT32 Id = Port->Txqs[Queue].Id;
+
+ Mvpp2Read (Port->Priv, MVPP2_TXQ_SENT_REG(Id));
+ }
+}
+
+/* Change maximum receive Size of the Port */
+VOID
+Mvpp2GmacMaxRxSizeSet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val;
+
+ Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG);
+ Val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+ Val |= (((Port->PktSize - MVPP2_MH_SIZE) / 2) << MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+ Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val);
+}
+
+/* Set max sizes for Tx Queues */
+VOID
+Mvpp2TxpMaxTxSizeSet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val, Size, mtu;
+ INT32 Txq, TxPortNum;
+
+ mtu = Port->PktSize * 8;
+ if (mtu > MVPP2_TXP_MTU_MAX) {
+ mtu = MVPP2_TXP_MTU_MAX;
+ }
+
+ /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+ mtu = 3 * mtu;
+
+ /* Indirect access to RegValisters */
+ TxPortNum = Mvpp2EgressPort (Port);
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum);
+
+ /* Set MTU */
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_MTU_REG);
+ Val &= ~MVPP2_TXP_MTU_MAX;
+ Val |= mtu;
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_MTU_REG, Val);
+
+ /* TXP token Size and all TXQs token Size must be larger that MTU */
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+ Size = Val & MVPP2_TXP_TOKEN_SIZE_MAX;
+ if (Size < mtu) {
+ Size = mtu;
+ Val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+ Val |= Size;
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, Val);
+ }
+
+ for (Txq = 0; Txq < TxqNumber; Txq++) {
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq));
+ Size = Val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+ if (Size < mtu) {
+ Size = mtu;
+ Val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+ Val |= Size;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq), Val);
+ }
+ }
+}
+
+/*
+ * Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+VOID
+Mvpp2RxPktsCoalSet (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN UINT32 Pkts
+ )
+{
+ UINT32 Val;
+
+ Val = (Pkts & MVPP2_OCCUPIED_THRESH_MASK);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_THRESH_REG, Val);
+
+ Rxq->PktsCoal = Pkts;
+}
+
+/* Set the time Delay in Usec before Rx INT32errupt */
+VOID
+Mvpp2RxTimeCoalSet (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN UINT32 Usec
+ )
+{
+ UINT32 Val;
+
+ Val = (Port->Priv->Tclk / MVPP2_USEC_PER_SEC) * Usec;
+ Mvpp2Write (Port->Priv, MVPP2_ISR_RX_THRESHOLD_REG(Rxq->Id), Val);
+
+ Rxq->TimeCoal = Usec;
+}
+
+/* Rx/Tx Queue initialization/cleanup methods */
+
+/* Configure RXQ's */
+VOID
+Mvpp2RxqHwInit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq
+ )
+{
+ Rxq->LastDesc = Rxq->Size - 1;
+
+ /* Zero occupied and non-occupied counters - direct access */
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_STATUS_REG(Rxq->Id), 0);
+
+ /* Set Rx descriptors Queue starting Address - indirect access */
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_ADDR_REG, Rxq->DescsPhys >> MVPP22_DESC_ADDR_SHIFT);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_SIZE_REG, Rxq->Size);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_INDEX_REG, 0);
+
+ /* Set Offset */
+ Mvpp2RxqOffsetSet (Port, Rxq->Id, MVPP2_RXQ_OFFSET);
+
+ /* Set coalescing pkts and time */
+ Mvpp2RxPktsCoalSet (Port, Rxq, MVPP2_RX_COAL_PKTS);
+ Mvpp2RxTimeCoalSet (Port, Rxq, Rxq->TimeCoal);
+
+ /* Add number of descriptors ready for receiving packets */
+ Mvpp2RxqStatusUpdate (Port, Rxq->Id, 0, Rxq->Size);
+}
+
+/* Push packets received by the RXQ to BM Pool */
+VOID
+Mvpp2RxqDropPkts (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN INT32 Cpu
+ )
+{
+ INT32 RxReceived;
+
+ RxReceived = Mvpp2RxqReceived (Port, Rxq->Id);
+ if (!RxReceived) {
+ return;
+ }
+
+ Mvpp2RxqStatusUpdate (Port, Rxq->Id, RxReceived, RxReceived);
+}
+
+VOID
+Mvpp2RxqHwDeinit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq
+ )
+{
+ Rxq->Descs = NULL;
+ Rxq->LastDesc = 0;
+ Rxq->NextDescToProc = 0;
+ Rxq->DescsPhys = 0;
+
+ /*
+ * Clear Rx descriptors Queue starting Address and Size;
+ * free descriptor number
+ */
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_STATUS_REG(Rxq->Id), 0);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
+ Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Configure TXQ's */
+VOID
+Mvpp2TxqHwInit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_TX_QUEUE *Txq
+ )
+{
+ INT32 Desc, DescPerTxq, TxPortNum;
+ UINT32 Val;
+
+ Txq->LastDesc = Txq->Size - 1;
+
+ /* Set Tx descriptors Queue starting Address - indirect access */
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_ADDR_REG, Txq->DescsPhys);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_SIZE_REG, Txq->Size & MVPP2_TXQ_DESC_SIZE_MASK);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_INDEX_REG, 0);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_RSVD_CLR_REG, Txq->Id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_PENDING_REG);
+ Val &= ~MVPP2_TXQ_PENDING_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_PENDING_REG, Val);
+
+ /*
+ * Calculate base Address in prefetch buffer. We reserve 16 descriptors
+ * for each existing TXQ.
+ * TCONTS for PON Port must be continuous from 0 to MVPP2_MAX_TCONT
+ * GBE Ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+ */
+ DescPerTxq = 16;
+ Desc = (Port->Id * MVPP2_MAX_TXQ * DescPerTxq) + (Txq->LogId * DescPerTxq);
+
+ Mvpp2Write (
+ Port->Priv,
+ MVPP2_TXQ_PREF_BUF_REG,
+ MVPP2_PREF_BUF_PTR (Desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH (DescPerTxq/2)
+ );
+
+ /* WRR / EJP configuration - indirect access */
+ TxPortNum = Mvpp2EgressPort (Port);
+ Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum);
+
+ Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_SCHED_REFILL_REG(Txq->LogId));
+ Val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+ Val |= MVPP2_TXQ_REFILL_PERIOD_MASK (1);
+ Val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_REFILL_REG(Txq->LogId), Val);
+
+ Val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq->LogId), Val);
+}
+
+VOID
+Mvpp2TxqHwDeinit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_TX_QUEUE *Txq
+ )
+{
+ Txq->Descs = NULL;
+ Txq->LastDesc = 0;
+ Txq->NextDescToProc = 0;
+ Txq->DescsPhys = 0;
+
+ /* Set minimum bandwidth for disabled TXQs */
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(Txq->Id), 0);
+
+ /* Set Tx descriptors Queue starting Address and Size */
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+}
+
+/* Allocate and initialize descriptors for aggr TXQ */
+VOID
+Mvpp2AggrTxqHwInit (
+ IN OUT MVPP2_TX_QUEUE *AggrTxq,
+ IN INT32 DescNum,
+ IN INT32 Cpu,
+ IN MVPP2_SHARED *Priv
+ )
+{
+ AggrTxq->LastDesc = AggrTxq->Size - 1;
+
+ /* Aggr TXQ no Reset WA */
+ AggrTxq->NextDescToProc = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_INDEX_REG(Cpu));
+
+ /* Set Tx descriptors Queue starting Address (indirect access) */
+ Mvpp2Write (Priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(Cpu), AggrTxq->DescsPhys >> MVPP22_DESC_ADDR_SHIFT);
+ Mvpp2Write (Priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(Cpu), DescNum & MVPP2_AGGR_TXQ_DESC_SIZE_MASK);
+}
+
+/* Enable gmac */
+VOID
+Mvpp2PortPowerUp (
+ IN PP2DXE_PORT *Port
+ )
+{
+ Mvpp2PortMiiSet (Port);
+ Mvpp2PortPeriodicXonDisable (Port);
+ Mvpp2PortFcAdvEnable (Port);
+ Mvpp2PortReset (Port);
+}
+
+/* Initialize Rx FIFO's */
+VOID
+Mvpp2RxFifoInit (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ INT32 PortId;
+
+ for (PortId = 0; PortId < MVPP2_MAX_PORTS; PortId++) {
+ Mvpp2Write (Priv, MVPP2_RX_DATA_FIFO_SIZE_REG(PortId), MVPP2_RX_FIFO_PORT_DATA_SIZE);
+ Mvpp2Write (Priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(PortId), MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+ }
+
+ Mvpp2Write (Priv, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
+ Mvpp2Write (Priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+VOID
+MvGop110NetcActivePort (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1);
+ RegVal &= ~(NETC_PORTS_ACTIVE_MASK (PortId));
+
+ Val <<= NETC_PORTS_ACTIVE_OFFSET (PortId);
+ Val &= NETC_PORTS_ACTIVE_MASK (PortId);
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcXauiEnable (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG);
+ RegVal &= ~SD1_CONTROL_XAUI_EN_MASK;
+
+ Val <<= SD1_CONTROL_XAUI_EN_OFFSET;
+ Val &= SD1_CONTROL_XAUI_EN_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcRxaui0Enable (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG);
+ RegVal &= ~SD1_CONTROL_RXAUI0_L23_EN_MASK;
+
+ Val <<= SD1_CONTROL_RXAUI0_L23_EN_OFFSET;
+ Val &= SD1_CONTROL_RXAUI0_L23_EN_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcRxaui1Enable (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG);
+ RegVal &= ~SD1_CONTROL_RXAUI1_L45_EN_MASK;
+
+ Val <<= SD1_CONTROL_RXAUI1_L45_EN_OFFSET;
+ Val &= SD1_CONTROL_RXAUI1_L45_EN_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcMiiMode (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_CONTROL_0);
+ RegVal &= ~NETC_GBE_PORT1_MII_MODE_MASK;
+
+ Val <<= NETC_GBE_PORT1_MII_MODE_OFFSET;
+ Val &= NETC_GBE_PORT1_MII_MODE_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_CONTROL_0, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcGopReset (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_GOP_SOFT_RESET_1_REG);
+ RegVal &= ~NETC_GOP_SOFT_RESET_MASK;
+
+ Val <<= NETC_GOP_SOFT_RESET_OFFSET;
+ Val &= NETC_GOP_SOFT_RESET_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_GOP_SOFT_RESET_1_REG, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcGopClockLogicSet (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0);
+ RegVal &= ~NETC_CLK_DIV_PHASE_MASK;
+
+ Val <<= NETC_CLK_DIV_PHASE_OFFSET;
+ Val &= NETC_CLK_DIV_PHASE_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcPortRfReset (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1);
+ RegVal &= ~(NETC_PORT_GIG_RF_RESET_MASK (PortId));
+
+ Val <<= NETC_PORT_GIG_RF_RESET_OFFSET (PortId);
+ Val &= NETC_PORT_GIG_RF_RESET_MASK (PortId);
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcGbeSgmiiModeSelect (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal, Mask, Offset;
+
+ if (PortId == 2) {
+ Mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
+ Offset = NETC_GBE_PORT0_SGMII_MODE_OFFSET;
+ } else {
+ Mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
+ Offset = NETC_GBE_PORT1_SGMII_MODE_OFFSET;
+ }
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_CONTROL_0);
+ RegVal &= ~Mask;
+
+ Val <<= Offset;
+ Val &= Mask;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_CONTROL_0, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcBusWidthSelect (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0);
+ RegVal &= ~NETC_BUS_WIDTH_SELECT_MASK;
+
+ Val <<= NETC_BUS_WIDTH_SELECT_OFFSET;
+ Val &= NETC_BUS_WIDTH_SELECT_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcSampleStagesTiming (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Val
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0);
+ RegVal &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
+
+ Val <<= NETC_GIG_RX_DATA_SAMPLE_OFFSET;
+ Val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
+
+ RegVal |= Val;
+
+ Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal);
+}
+
+STATIC
+VOID
+MvGop110NetcMacToXgmii (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN enum MvNetcPhase Phase
+ )
+{
+ switch (Phase) {
+ case MV_NETC_FIRST_PHASE:
+
+ /* Set Bus Width to HB mode = 1 */
+ MvGop110NetcBusWidthSelect (Port, 0x1);
+
+ /* Select RGMII mode */
+ MvGop110NetcGbeSgmiiModeSelect (Port, PortId, MV_NETC_GBE_XMII);
+ break;
+ case MV_NETC_SECOND_PHASE:
+
+ /* De-assert the relevant PortId HB Reset */
+ MvGop110NetcPortRfReset (Port, PortId, 0x1);
+ break;
+ }
+}
+
+STATIC
+VOID
+MvGop110NetcMacToSgmii (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN enum MvNetcPhase Phase
+ )
+{
+ switch (Phase) {
+ case MV_NETC_FIRST_PHASE:
+
+ /* Set Bus Width to HB mode = 1 */
+ MvGop110NetcBusWidthSelect (Port, 1);
+
+ /* Select SGMII mode */
+ if (PortId >= 1) {
+ MvGop110NetcGbeSgmiiModeSelect (Port, PortId, MV_NETC_GBE_SGMII);
+ }
+
+ /* Configure the sample stages */
+ MvGop110NetcSampleStagesTiming (Port, 0);
+ break;
+ case MV_NETC_SECOND_PHASE:
+
+ /* De-assert the relevant PortId HB Reset */
+ MvGop110NetcPortRfReset (Port, PortId, 1);
+ break;
+ }
+}
+
+STATIC
+VOID
+MvGop110NetcMacToRxaui (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN enum MvNetcPhase Phase,
+ IN enum MvNetcLanes Lanes
+ )
+{
+ /* Currently only RXAUI0 supPorted */
+ if (PortId != 0)
+ return;
+
+ switch (Phase) {
+ case MV_NETC_FIRST_PHASE:
+
+ /* RXAUI Serdes/s Clock alignment */
+ if (Lanes == MV_NETC_LANE_23) {
+ MvGop110NetcRxaui0Enable (Port, PortId, 1);
+ } else {
+ MvGop110NetcRxaui1Enable (Port, PortId, 1);
+ }
+ break;
+ case MV_NETC_SECOND_PHASE:
+
+ /* De-assert the relevant PortId HB Reset */
+ MvGop110NetcPortRfReset (Port, PortId, 1);
+ break;
+ }
+}
+
+STATIC
+VOID
+MvGop110NetcMacToXaui (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 PortId,
+ IN enum MvNetcPhase Phase
+ )
+{
+ switch (Phase) {
+ case MV_NETC_FIRST_PHASE:
+
+ /* RXAUI Serdes/s Clock alignment */
+ MvGop110NetcXauiEnable (Port, PortId, 1);
+ break;
+ case MV_NETC_SECOND_PHASE:
+
+ /* De-assert the relevant PortId HB Reset */
+ MvGop110NetcPortRfReset (Port, PortId, 1);
+ break;
+ }
+}
+
+INT32
+MvGop110NetcInit (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 NetCompConfig,
+ IN enum MvNetcPhase Phase
+ )
+{
+ UINT32 c = NetCompConfig;
+
+ if (c & MV_NETC_GE_MAC0_RXAUI_L23) {
+ MvGop110NetcMacToRxaui (Port, 0, Phase, MV_NETC_LANE_23);
+ }
+
+ if (c & MV_NETC_GE_MAC0_RXAUI_L45) {
+ MvGop110NetcMacToRxaui (Port, 0, Phase, MV_NETC_LANE_45);
+ }
+
+ if (c & MV_NETC_GE_MAC0_XAUI) {
+ MvGop110NetcMacToXaui (Port, 0, Phase);
+ }
+
+ if (c & MV_NETC_GE_MAC2_SGMII) {
+ MvGop110NetcMacToSgmii (Port, 2, Phase);
+ } else {
+ MvGop110NetcMacToXgmii (Port, 2, Phase);
+ }
+
+ if (c & MV_NETC_GE_MAC3_SGMII) {
+ MvGop110NetcMacToSgmii (Port, 3, Phase);
+ } else {
+ MvGop110NetcMacToXgmii (Port, 3, Phase);
+ if (c & MV_NETC_GE_MAC3_RGMII) {
+ MvGop110NetcMiiMode (Port, 3, MV_NETC_GBE_RGMII);
+ } else {
+ MvGop110NetcMiiMode (Port, 3, MV_NETC_GBE_MII);
+ }
+ }
+
+ /* Activate gop Ports 0, 2, 3 */
+ MvGop110NetcActivePort (Port, 0, 1);
+ MvGop110NetcActivePort (Port, 2, 1);
+ MvGop110NetcActivePort (Port, 3, 1);
+
+ if (Phase == MV_NETC_SECOND_PHASE) {
+
+ /* Enable the GOP internal clock logic */
+ MvGop110NetcGopClockLogicSet (Port, 1);
+
+ /* De-assert GOP unit Reset */
+ MvGop110NetcGopReset (Port, 1);
+ }
+
+ return 0;
+}
+
+UINT32
+MvpPp2xGop110NetcCfgCreate (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val = 0;
+
+ if (Port->GopIndex == 0) {
+ if (Port->PhyInterface == MV_MODE_XAUI) {
+ Val |= MV_NETC_GE_MAC0_XAUI;
+ } else if (Port->PhyInterface == MV_MODE_RXAUI) {
+ Val |= MV_NETC_GE_MAC0_RXAUI_L23;
+ }
+ }
+
+ if (Port->GopIndex == 2) {
+ if (Port->PhyInterface == MV_MODE_SGMII) {
+ Val |= MV_NETC_GE_MAC2_SGMII;
+ }
+ }
+
+ if (Port->GopIndex == 3) {
+ if (Port->PhyInterface == MV_MODE_SGMII) {
+ Val |= MV_NETC_GE_MAC3_SGMII;
+ } else if (Port->PhyInterface == MV_MODE_RGMII) {
+ Val |= MV_NETC_GE_MAC3_RGMII;
+ }
+ }
+
+ return Val;
+}
+
+/*
+ * Initialize physical Port. Configure the Port mode and
+ * all its elements accordingly.
+ */
+INT32
+MvGop110PortInit (
+ IN PP2DXE_PORT *Port
+ )
+{
+
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ MvGop110GmacReset (Port, RESET);
+
+ /* Configure PCS */
+ MvGop110GpcsModeCfg (Port, FALSE);
+ MvGop110BypassClkCfg (Port, TRUE);
+
+ /* Configure MAC */
+ MvGop110GmacModeCfg (Port);
+
+ /* PCS unreset */
+ MvGop110GpcsReset (Port, UNRESET);
+
+ /* MAC unreset */
+ MvGop110GmacReset (Port, UNRESET);
+ break;
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+
+ /* Configure PCS */
+ MvGop110GpcsModeCfg (Port, TRUE);
+
+ /* Configure MAC */
+ MvGop110GmacModeCfg (Port);
+
+ /* Select proper MAC mode */
+ MvGop110Xlg2GigMacCfg (Port);
+
+ /* PCS unreset */
+ MvGop110GpcsReset (Port, UNRESET);
+
+ /* MAC unreset */
+ MvGop110GmacReset (Port, UNRESET);
+ break;
+ case MV_MODE_SFI:
+ /* Configure PCS */
+ MvGopXpcsModeCfg (Port, MVPP2_SFI_LANE_COUNT);
+
+ MvGopMpcsModeCfg (Port);
+
+ /* Configure MAC */
+ MvGopXlgMacModeCfg (Port);
+
+ /* PCS unreset */
+ MvGopXpcsUnreset (Port);
+
+ /* MAC unreset */
+ MvGopXlgMacUnreset (Port);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Set the MAC to Reset or exit from Reset */
+INT32
+MvGop110GmacReset (
+ IN PP2DXE_PORT *Port,
+ IN enum MvReset ResetCmd
+ )
+{
+ UINT32 RegAddr;
+ UINT32 Val;
+
+ RegAddr = MVPP2_PORT_CTRL2_REG;
+
+ Val = MvGop110GmacRead (Port, RegAddr);
+
+ if (ResetCmd == RESET) {
+ Val |= MVPP2_PORT_CTRL2_PORTMACRESET_MASK;
+ } else {
+ Val &= ~MVPP2_PORT_CTRL2_PORTMACRESET_MASK;
+ }
+
+ MvGop110GmacWrite (Port, RegAddr, Val);
+
+ return 0;
+}
+
+/* Enable/Disable Port to work with Gig PCS */
+INT32
+MvGop110GpcsModeCfg (
+ IN PP2DXE_PORT *Port,
+ BOOLEAN En
+ )
+{
+ UINT32 Val;
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+
+ if (En) {
+ Val |= MVPP2_PORT_CTRL2_PCS_EN_MASK;
+ } else {
+ Val &= ~MVPP2_PORT_CTRL2_PCS_EN_MASK;
+ }
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ return 0;
+}
+
+INT32
+MvGop110BypassClkCfg (
+ IN PP2DXE_PORT *Port,
+ IN BOOLEAN En
+ )
+{
+ UINT32 Val;
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+
+ if (En) {
+ Val |= MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK;
+ } else {
+ Val &= ~MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK;
+ }
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ return 0;
+}
+
+INT32
+MvGop110GpcsReset (
+ IN PP2DXE_PORT *Port,
+ IN enum MvReset ResetCmd
+ )
+{
+ UINT32 RegData;
+
+ RegData = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+
+ if (ResetCmd == RESET) {
+ U32_SET_FIELD (
+ RegData,
+ MVPP2_PORT_CTRL2_SGMII_MODE_MASK,
+ 0
+ );
+
+ } else {
+ U32_SET_FIELD (
+ RegData,
+ MVPP2_PORT_CTRL2_SGMII_MODE_MASK,
+ 1 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS
+ );
+
+ }
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, RegData);
+
+ return 0;
+}
+
+VOID
+MvGop110Xlg2GigMacCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegVal;
+
+ /* Relevant only for MAC0 (XLG0 and GMAC0) */
+ if (Port->GopIndex > 0) {
+ return;
+ }
+
+ /* Configure 1Gig MAC mode */
+ RegVal = Mvpp2XlgRead (Port, MV_XLG_PORT_MAC_CTRL3_REG);
+ U32_SET_FIELD (
+ RegVal,
+ MV_XLG_MAC_CTRL3_MACMODESELECT_MASK,
+ (0 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+ );
+
+ Mvpp2XlgWrite (Port, MV_XLG_PORT_MAC_CTRL3_REG, RegVal);
+}
+
+/* Set the internal mux's to the required MAC in the GOP */
+INT32
+MvGop110GmacModeCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegAddr;
+ UINT32 Val;
+
+ /* Set TX FIFO thresholds */
+ switch (Port->PhyInterface) {
+ case MV_MODE_SGMII:
+ if (Port->Speed == MV_PORT_SPEED_2500) {
+ MvGop110GmacSgmii25Cfg (Port);
+ } else {
+ MvGop110GmacSgmiiCfg (Port);
+ }
+ break;
+ case MV_MODE_RGMII:
+ MvGop110GmacRgmiiCfg (Port);
+ break;
+ case MV_MODE_QSGMII:
+ MvGop110GmacQsgmiiCfg (Port);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Jumbo frame supPort - 0x1400*2= 0x2800 bytes */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ U32_SET_FIELD (
+ Val,
+ MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK,
+ (0x1400 << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS)
+ );
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val);
+
+ /* PeriodicXonEn disable */
+ RegAddr = MVPP2_PORT_CTRL1_REG;
+ Val = MvGop110GmacRead (Port, RegAddr);
+ Val &= ~MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK;
+ MvGop110GmacWrite (Port, RegAddr, Val);
+
+ /* Mask all Ports interrupts */
+ MvGop110GmacPortLinkEventMask (Port);
+
+#if MV_PP2x_INTERRUPT
+ /* Unmask link change interrupt */
+ Val = MvGop110GmacRead (Port, MVPP2_INTERRUPT_MASK_REG);
+ Val |= MVPP2_INTERRUPT_CAUSE_LINK_CHANGE_MASK;
+ Val |= 1; /* Unmask summary bit */
+ MvGop110GmacWrite (Port, MVPP2_INTERRUPT_MASK_REG, Val);
+#endif
+
+ return 0;
+}
+
+VOID
+MvGop110GmacRgmiiCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val, thresh, an;
+
+ /* Configure minimal level of the Tx FIFO before the lower part starts to read a packet*/
+ thresh = MV_RGMII_TX_FIFO_MIN_TH;
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD (
+ Val,
+ MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+ );
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val);
+
+ /* Disable bypass of sync module */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG);
+ Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+
+ /* Configure DP clock select according to mode */
+ Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+ Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ Val |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val);
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+ Val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ /* Configure GIG MAC to SGMII mode */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val);
+
+ /* configure AN 0xb8e8 */
+ an = MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+VOID
+MvGop110GmacSgmii25Cfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val, thresh, an;
+
+ /*
+ * Configure minimal level of the Tx FIFO before
+ * the lower part starts to read a packet.
+ */
+ thresh = MV_SGMII2_5_TX_FIFO_MIN_TH;
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD (
+ Val,
+ MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+ );
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val);
+
+ /* Disable bypass of sync module */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG);
+ Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+
+ /* Configure DP clock select according to mode */
+ Val |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+
+ /* Configure QSGMII bypass according to mode */
+ Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val);
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+ Val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ /* Configure GIG MAC to 1000Base-X mode connected to a fiber transceiver */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ Val |= MVPP2_PORT_CTRL0_PORTTYPE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val);
+
+ /* configure AN 0x9268 */
+ an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+VOID
+MvGop110GmacSgmiiCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val, thresh, an;
+
+ /*
+ * Configure minimal level of the Tx FIFO before
+ * the lower part starts to read a packet.
+ */
+ thresh = MV_SGMII_TX_FIFO_MIN_TH;
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD (Val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS));
+ MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val);
+
+ /* Disable bypass of sync module */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG);
+ Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+
+ /* Configure DP clock select according to mode */
+ Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+
+ /* Configure QSGMII bypass according to mode */
+ Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val);
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+ Val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ /* Configure GIG MAC to SGMII mode */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val);
+
+ /* Configure AN */
+ an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+VOID
+MvGop110GmacQsgmiiCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 Val, thresh, an;
+
+ /*
+ * Configure minimal level of the Tx FIFO before
+ * the lower part starts to read a packet.
+ */
+ thresh = MV_SGMII_TX_FIFO_MIN_TH;
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG);
+ U32_SET_FIELD (
+ Val,
+ MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK,
+ (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+ );
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val);
+
+ /* Disable bypass of sync module */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG);
+ Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
+
+ /* Configure DP clock select according to mode */
+ Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
+ Val &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
+
+ /* Configure QSGMII bypass according to mode */
+ Val &= ~MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val);
+
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG);
+ Val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val);
+
+ /* Configure GIG MAC to SGMII mode */
+ Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val);
+
+ /* Configure AN 0xB8EC */
+ an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK |
+ MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK;
+ MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an);
+}
+
+INT32
+Mvpp2SmiPhyAddrCfg (
+ IN PP2DXE_PORT *Port,
+ IN INT32 PortId,
+ IN INT32 Addr
+ )
+{
+ Mvpp2SmiWrite (Port->Priv, MV_SMI_PHY_ADDRESS_REG(PortId), Addr);
+
+ return 0;
+}
+
+/* Set the internal mux's to the required PCS */
+EFI_STATUS
+MvGopXpcsModeCfg (
+ IN PP2DXE_PORT *Port,
+ IN INT32 NumOfLanes
+ )
+{
+ UINT8 LaneCoeff;
+
+ switch (NumOfLanes) {
+ case 1:
+ case 2:
+ case 4:
+ LaneCoeff = NumOfLanes >> 1;
+ default:
+ return EFI_INVALID_PARAMETER;
+ }
+
+ /* Configure XG MAC mode */
+ MmioAndThenOr32 (Port->Priv->XpcsBase + MVPP22_XPCS_GLOBAL_CFG_0_REG,
+ ~(MVPP22_XPCS_PCSMODE_MASK | MVPP22_XPCS_LANEACTIVE_MASK),
+ LaneCoeff << MVPP22_XPCS_LANEACTIVE_OFFS);
+
+ return EFI_SUCCESS;
+}
+
+VOID
+MvGopMpcsModeCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ /* Configure MPCS40G COMMON CONTROL */
+ MmioAnd32 (Port->Priv->MpcsBase + MVPP22_MPCS40G_COMMON_CONTROL,
+ ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK);
+
+ /* Configure MPCS CLOCK RESET */
+ MmioAndThenOr32 (Port->Priv->MpcsBase + MVPP22_MPCS_CLOCK_RESET,
+ ~(MVPP22_MPCS_CLK_DIVISION_RATIO_MASK | MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK),
+ MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT | MVPP22_MPCS_MAC_CLK_RESET_MASK |
+ MVPP22_MPCS_RX_SD_CLK_RESET_MASK | MVPP22_MPCS_TX_SD_CLK_RESET_MASK);
+}
+
+/* Set the internal mux's to the required MAC in the GOP */
+VOID
+MvGopXlgMacModeCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ /* Configure 10G MAC mode */
+ MmioOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL0_REG, MV_XLG_MAC_CTRL0_RXFCEN_MASK);
+
+ MmioAndThenOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL3_REG,
+ ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK,
+ MV_XLG_MAC_CTRL3_MACMODESELECT_10G);
+
+ MmioAndThenOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL4_REG,
+ ~(MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK | MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_MASK),
+ MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK | MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK);
+
+ /* Configure frame size limit */
+ MmioAndThenOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL1_REG,
+ ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK,
+ MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_DEFAULT);
+
+ /* Mask all port's external interrupts */
+ MvGop110XlgPortLinkEventMask (Port);
+
+ /* Unmask link change interrupt - enable automatic status update */
+ MmioOr32 (Port->XlgBase + MV_XLG_INTERRUPT_MASK_REG,
+ MV_XLG_INTERRUPT_LINK_CHANGE_MASK | MV_XLG_SUMMARY_INTERRUPT_MASK);
+}
+
+/* Set PCS to exit from reset */
+VOID
+MvGopXpcsUnreset (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MmioOr32 (Port->Priv->XpcsBase + MVPP22_XPCS_GLOBAL_CFG_0_REG, MVPP22_XPCS_PCSRESET);
+}
+
+/* Set the MAC to exit from reset */
+VOID
+MvGopXlgMacUnreset (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MmioOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL0_REG, MV_XLG_MAC_CTRL0_MACRESETN_MASK);
+}
+
+BOOLEAN
+MvGop110XlgLinkStatusGet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ return MmioRead32 (Port->XlgBase + MV_XLG_MAC_PORT_STATUS_REG) & MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK;
+}
+
+BOOLEAN
+MvGop110PortIsLinkUp (
+ IN PP2DXE_PORT *Port
+ )
+{
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ return MvGop110GmacLinkStatusGet (Port);
+ case MV_MODE_SFI:
+ return MvGop110XlgLinkStatusGet (Port);
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ return FALSE;
+ default:
+ return FALSE;
+ }
+}
+
+/* Get MAC link status */
+BOOLEAN
+MvGop110GmacLinkStatusGet (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegAddr;
+ UINT32 Val;
+
+ RegAddr = MVPP2_PORT_STATUS0_REG;
+
+ Val = MvGop110GmacRead (Port, RegAddr);
+
+ return (Val & 1) ? TRUE : FALSE;
+}
+
+STATIC
+VOID
+MvGop110XlgPortEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ /* Enable port and MIB counters update */
+ MmioAndThenOr32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL0_REG,
+ ~MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK,
+ MV_XLG_MAC_CTRL0_PORTEN_MASK);
+}
+
+STATIC
+VOID
+MvGop110XlgPortDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ /* Mask all port's external interrupts */
+ MvGop110XlgPortLinkEventMask (Port);
+
+ MmioAnd32 (Port->XlgBase + MV_XLG_PORT_MAC_CTRL0_REG, ~MV_XLG_MAC_CTRL0_PORTEN_MASK);
+}
+
+VOID
+MvGop110PortDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ MvGop110GmacPortDisable (Port);
+ break;
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ case MV_MODE_SFI:
+ MvGop110XlgPortDisable (Port);
+ break;
+ default:
+ return;
+ }
+}
+
+VOID
+MvGop110PortEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ MvGop110GmacPortEnable (Port);
+ break;
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ case MV_MODE_SFI:
+ MvGop110XlgPortEnable (Port);
+ break;
+ default:
+ return;
+ }
+}
+
+/* Enable Port and MIB counters */
+VOID
+MvGop110GmacPortEnable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ RegVal |= MVPP2_PORT_CTRL0_PORTEN_MASK;
+ RegVal |= MVPP2_PORT_CTRL0_COUNT_EN_MASK;
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, RegVal);
+}
+
+/* Disable Port */
+VOID
+MvGop110GmacPortDisable (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegVal;
+
+ /* Mask all Ports interrupts */
+ MvGop110GmacPortLinkEventMask (Port);
+
+ RegVal = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG);
+ RegVal &= ~MVPP2_PORT_CTRL0_PORTEN_MASK;
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, RegVal);
+}
+
+VOID
+MvGop110GmacPortLinkEventMask (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = MvGop110GmacRead (Port, MV_GMAC_INTERRUPT_SUM_MASK_REG);
+ RegVal &= ~MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
+ MvGop110GmacWrite (Port, MV_GMAC_INTERRUPT_SUM_MASK_REG, RegVal);
+}
+
+VOID
+MvGop110XlgPortLinkEventMask (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MmioAnd32 (Port->XlgBase + MV_XLG_EXTERNAL_INTERRUPT_MASK_REG,
+ ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_MASK);
+}
+
+INT32
+MvGop110PortEventsMask (
+ IN PP2DXE_PORT *Port
+ )
+{
+
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ MvGop110GmacPortLinkEventMask (Port);
+ break;
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ case MV_MODE_SFI:
+ MvGop110XlgPortLinkEventMask (Port);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Sets "Force Link Pass" and clears "Do Not Force Link Fail" bits.
+ * This function should only be called when the port is disabled.
+ */
+VOID
+MvGop110GmacForceLinkUp (
+ IN PP2DXE_PORT *Port
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = MvGop110GmacRead (Port, MVPP2_PORT_AUTO_NEG_CFG_REG);
+
+ RegVal |= MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK;
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK;
+
+ MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, RegVal);
+}
+
+INT32
+MvGop110FlCfg (
+ IN PP2DXE_PORT *Port
+ )
+{
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ /* Disable AN */
+ MvGop110SpeedDuplexSet (Port, Port->Speed, MV_PORT_DUPLEX_FULL);
+ break;
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ case MV_MODE_SFI:
+ return 0;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Set Port Speed and Duplex */
+INT32
+MvGop110SpeedDuplexSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Speed,
+ IN enum MvPortDuplex Duplex
+ )
+{
+ switch (Port->PhyInterface) {
+ case MV_MODE_RGMII:
+ case MV_MODE_SGMII:
+ case MV_MODE_QSGMII:
+ MvGop110GmacSpeedDuplexSet (Port, Speed, Duplex);
+ break;
+ case MV_MODE_XAUI:
+ case MV_MODE_RXAUI:
+ case MV_MODE_SFI:
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Sets Port Speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+ * Sets Port Duplex to Auto Negotiation / Full / Half Duplex.
+ */
+INT32
+MvGop110GmacSpeedDuplexSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Speed,
+ IN enum MvPortDuplex Duplex
+ )
+{
+ UINT32 RegVal;
+
+ RegVal = Mvpp2GmacRead (Port, MVPP2_PORT_AUTO_NEG_CFG_REG);
+
+ switch (Speed) {
+ case MV_PORT_SPEED_2500:
+ case MV_PORT_SPEED_1000:
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ /* The 100/10 bit doesn't matter in this case */
+ break;
+ case MV_PORT_SPEED_100:
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK;
+ break;
+ case MV_PORT_SPEED_10:
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK;
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK;
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK;
+ break;
+ default:
+ return MVPP2_EINVAL;
+ }
+
+ switch (Duplex) {
+ case MV_PORT_DUPLEX_AN:
+ RegVal |= MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ /* The other bits don't matter in this case */
+ break;
+ case MV_PORT_DUPLEX_HALF:
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK;
+ break;
+ case MV_PORT_DUPLEX_FULL:
+ RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK;
+ RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK;
+ break;
+ default:
+ return MVPP2_EINVAL;
+ }
+
+ Mvpp2GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, RegVal);
+
+ return 0;
+}
+
+VOID
+Mvpp2AxiConfig (
+ IN MVPP2_SHARED *Priv
+ )
+{
+ /* Config AXI Read&Write Normal and Soop mode */
+ Mvpp2Write (Priv, MVPP22_AXI_BM_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_BM_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+ Mvpp2Write (Priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT);
+}
+
+/* Cleanup Tx Ports */
+VOID
+Mvpp2TxpClean (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Txp,
+ IN MVPP2_TX_QUEUE *Txq
+ )
+{
+ INT32 Delay, Pending;
+ UINT32 RegVal;
+
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id);
+ RegVal = Mvpp2Read (Port->Priv, MVPP2_TXQ_PREF_BUF_REG);
+ RegVal |= MVPP2_TXQ_DRAIN_EN_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal);
+
+ /*
+ * The Queue has been stopped so wait for all packets
+ * to be transmitted.
+ */
+ Delay = 0;
+ do {
+ if (Delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+ Mvpp2Printf ("Port %d: cleaning Queue %d timed out\n", Port->Id, Txq->LogId);
+ break;
+ }
+ Mvpp2Mdelay (1);
+ Delay++;
+
+ Pending = Mvpp2TxqPendDescNumGet (Port, Txq);
+ } while (Pending);
+
+ RegVal &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+ Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal);
+}
+
+/* Cleanup all Tx Queues */
+VOID
+Mvpp2CleanupTxqs (
+ IN PP2DXE_PORT *Port
+ )
+{
+ MVPP2_TX_QUEUE *Txq;
+ INT32 Txp, Queue;
+ UINT32 RegVal;
+
+ RegVal = Mvpp2Read (Port->Priv, MVPP2_TX_PORT_FLUSH_REG);
+
+ /* Reset Tx Ports and delete Tx Queues */
+ for (Txp = 0; Txp < Port->TxpNum; Txp++) {
+ RegVal |= MVPP2_TX_PORT_FLUSH_MASK (Port->Id);
+ Mvpp2Write (Port->Priv, MVPP2_TX_PORT_FLUSH_REG, RegVal);
+
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ Txq = &Port->Txqs[Txp * TxqNumber + Queue];
+ Mvpp2TxpClean (Port, Txp, Txq);
+ Mvpp2TxqHwDeinit (Port, Txq);
+ }
+
+ RegVal &= ~MVPP2_TX_PORT_FLUSH_MASK (Port->Id);
+ Mvpp2Write (Port->Priv, MVPP2_TX_PORT_FLUSH_REG, RegVal);
+ }
+}
+
+/* Cleanup all Rx Queues */
+VOID
+Mvpp2CleanupRxqs (
+ IN PP2DXE_PORT *Port
+ )
+{
+ INT32 Queue;
+
+ for (Queue = 0; Queue < RxqNumber; Queue++) {
+ Mvpp2RxqHwDeinit (Port, &Port->Rxqs[Queue]);
+ }
+}
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.h b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.h
new file mode 100644
index 0000000000..3dc9ecdd20
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2Lib.h
@@ -0,0 +1,762 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MVPP2_LIB_H__
+#define __MVPP2_LIB_H__
+
+#include "Mvpp2LibHw.h"
+#include "Pp2Dxe.h"
+
+/* number of RXQs used by single Port */
+STATIC INT32 RxqNumber = 1;
+/* number of TXQs used by single Port */
+STATIC INT32 TxqNumber = 1;
+
+VOID
+Mvpp2PrsMacPromiscSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN BOOLEAN Add
+ );
+
+VOID
+Mvpp2PrsMacMultiSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN INT32 Index,
+ IN BOOLEAN Add
+ );
+
+INT32
+Mvpp2PrsDefaultInit (
+ IN MVPP2_SHARED *Priv
+ );
+
+INT32
+Mvpp2PrsMacDaAccept (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN const UINT8 *Da,
+ IN BOOLEAN Add
+ );
+
+VOID
+Mvpp2PrsMcastDelAll (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId
+ );
+
+INT32
+Mvpp2PrsTagModeSet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 PortId,
+ IN INT32 type
+ );
+
+INT32
+Mvpp2PrsDefFlow (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2ClsInit (
+ IN MVPP2_SHARED *Priv
+ );
+
+VOID
+Mvpp2ClsPortConfig (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2ClsOversizeRxqSet (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2BmPoolHwCreate (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_BMS_POOL *BmPool,
+ IN INT32 Size
+ );
+
+VOID
+Mvpp2BmPoolBufsizeSet (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_BMS_POOL *BmPool,
+ IN INT32 BufSize
+ );
+
+VOID
+Mvpp2BmStop (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pool
+ );
+
+VOID
+Mvpp2BmIrqClear (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pool
+ );
+
+VOID
+Mvpp2RxqLongPoolSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Lrxq,
+ IN INT32 LongPool
+ );
+
+VOID
+Mvpp2RxqShortPoolSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Lrxq,
+ IN INT32 ShortPool
+ );
+
+VOID
+Mvpp2BmPoolMcPut (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Pool,
+ IN UINT32 BufPhysAddr,
+ IN UINT32 BufVirtAddr,
+ IN INT32 McId
+ );
+
+VOID
+Mvpp2PoolRefill (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Bm,
+ IN UINT32 PhysAddr,
+ IN UINT32 Cookie
+ );
+
+INTN
+Mvpp2BmPoolCtrl (
+ IN MVPP2_SHARED *Priv,
+ IN INTN Pool,
+ IN enum Mvpp2Command cmd
+ );
+
+VOID
+Mvpp2InterruptsMask (
+ IN VOID *arg
+ );
+
+VOID
+Mvpp2InterruptsUnmask (
+ IN VOID *arg
+ );
+
+VOID
+Mvpp2PortEnable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2PortDisable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2DefaultsSet (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2IngressEnable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2IngressDisable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2EgressEnable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2EgressDisable (
+ IN PP2DXE_PORT *Port
+ );
+
+UINT32
+Mvpp2BmCookieBuild (
+ IN MVPP2_RX_DESC *RxDesc,
+ IN INT32 Cpu
+ );
+
+INT32
+Mvpp2TxqDrainSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Txq,
+ IN BOOLEAN En
+ );
+
+INT32
+Mvpp2TxqPendDescNumGet (
+ IN PP2DXE_PORT *Port,
+ IN MVPP2_TX_QUEUE *Txq
+ );
+
+UINT32
+Mvpp2AggrTxqPendDescNumGet (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Cpu
+ );
+
+MVPP2_TX_DESC *
+Mvpp2TxqNextDescGet (
+ MVPP2_TX_QUEUE *Txq
+ );
+
+VOID
+Mvpp2AggrTxqPendDescAdd (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Pending
+ );
+
+INT32
+Mvpp2AggrDescNumCheck (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_TX_QUEUE *AggrTxq,
+ IN INT32 Num,
+ IN INT32 Cpu
+ );
+
+INT32
+Mvpp2TxqAllocReservedDesc (
+ IN MVPP2_SHARED *Priv,
+ IN MVPP2_TX_QUEUE *Txq,
+ IN INT32 Num
+ );
+
+VOID
+Mvpp2TxqDescPut (
+ IN MVPP2_TX_QUEUE *Txq
+ );
+
+UINT32
+Mvpp2TxqDescCsum (
+ IN INT32 L3Offs,
+ IN INT32 L3Proto,
+ IN INT32 IpHdrLen,
+ IN INT32 L4Proto
+ );
+
+VOID
+Mvpp2TxqSentCounterClear (
+ IN OUT VOID *arg
+ );
+
+VOID
+Mvpp2GmacMaxRxSizeSet (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2TxpMaxTxSizeSet (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2RxPktsCoalSet (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN UINT32 Pkts
+ );
+
+VOID
+Mvpp2RxTimeCoalSet (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN UINT32 Usec
+ );
+
+VOID
+Mvpp2AggrTxqHwInit (
+ IN OUT MVPP2_TX_QUEUE *AggrTxq,
+ IN INT32 DescNum,
+ IN INT32 Cpu,
+ IN MVPP2_SHARED *Priv
+ );
+
+VOID
+Mvpp2RxqHwInit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq
+ );
+
+VOID
+Mvpp2RxqDropPkts (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq,
+ IN INT32 Cpu
+ );
+
+VOID
+Mvpp2TxqHwInit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_TX_QUEUE *Txq
+ );
+
+VOID
+Mvpp2TxqHwDeinit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_TX_QUEUE *Txq
+ );
+
+VOID
+Mvpp2PortPowerUp (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2RxFifoInit (
+ IN MVPP2_SHARED *Priv
+ );
+
+VOID
+Mvpp2RxqHwDeinit (
+ IN PP2DXE_PORT *Port,
+ IN OUT MVPP2_RX_QUEUE *Rxq
+ );
+
+INT32
+MvGop110NetcInit (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 NetCompConfig,
+ IN enum MvNetcPhase phase
+ );
+
+UINT32
+MvpPp2xGop110NetcCfgCreate (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110PortInit (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110GmacReset (
+ IN PP2DXE_PORT *Port,
+ IN enum MvReset ResetCmd
+ );
+
+INT32
+MvGop110GpcsModeCfg (
+ IN PP2DXE_PORT *Port,
+ BOOLEAN En
+ );
+
+INT32
+MvGop110BypassClkCfg (
+ IN PP2DXE_PORT *Port,
+ IN BOOLEAN En
+ );
+
+INT32
+MvGop110GpcsReset (
+ IN PP2DXE_PORT *Port,
+ IN enum MvReset ResetCmd
+ );
+
+VOID
+MvGop110Xlg2GigMacCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110GmacModeCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacRgmiiCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacSgmii25Cfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacSgmiiCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacQsgmiiCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+Mvpp2SmiPhyAddrCfg (
+ IN PP2DXE_PORT *Port,
+ IN INT32 PortId,
+ IN INT32 Addr
+ );
+
+EFI_STATUS
+MvGopXpcsModeCfg (
+ IN PP2DXE_PORT *Port,
+ IN INT32 NumOfLanes
+ );
+
+VOID
+MvGopMpcsModeCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGopXlgMacModeCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGopXpcsUnreset (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGopXlgMacUnreset (
+ IN PP2DXE_PORT *Port
+ );
+
+BOOLEAN
+MvGop110PortIsLinkUp (
+ IN PP2DXE_PORT *Port
+ );
+
+BOOLEAN
+MvGop110GmacLinkStatusGet (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110PortDisable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110PortEnable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacPortEnable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacPortDisable (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacPortLinkEventMask (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110PortEventsMask (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110XlgPortLinkEventMask (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+MvGop110GmacForceLinkUp (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110FlCfg (
+ IN PP2DXE_PORT *Port
+ );
+
+INT32
+MvGop110SpeedDuplexSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Speed,
+ IN enum MvPortDuplex Duplex
+ );
+
+INT32
+MvGop110GmacSpeedDuplexSet (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Speed,
+ IN enum MvPortDuplex Duplex
+ );
+
+VOID
+Mvpp2AxiConfig (
+ IN MVPP2_SHARED *Priv
+ );
+
+VOID
+Mvpp2TxpClean (
+ IN PP2DXE_PORT *Port,
+ IN INT32 Txp,
+ IN MVPP2_TX_QUEUE *Txq
+ );
+
+VOID
+Mvpp2CleanupTxqs (
+ IN PP2DXE_PORT *Port
+ );
+
+VOID
+Mvpp2CleanupRxqs (
+ IN PP2DXE_PORT *Port
+ );
+
+/* Get number of physical egress Port */
+STATIC
+inline
+INT32
+Mvpp2EgressPort (
+ IN PP2DXE_PORT *Port
+ )
+{
+ return MVPP2_MAX_TCONT + Port->Id;
+}
+
+/* Get number of physical TXQ */
+STATIC
+inline
+INT32
+Mvpp2TxqPhys (
+ IN INT32 PortId,
+ IN INT32 Txq
+ )
+{
+ return (MVPP2_MAX_TCONT + PortId) * MVPP2_MAX_TXQ + Txq;
+}
+
+/* Set Pool number in a BM Cookie */
+STATIC
+inline
+UINT32
+Mvpp2BmCookiePoolSet (
+ IN UINT32 Cookie,
+ IN INT32 Pool
+ )
+{
+ UINT32 Bm;
+
+ Bm = Cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
+ Bm |= ((Pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
+
+ return Bm;
+}
+
+/* Get Pool number from a BM Cookie */
+STATIC
+inline
+INT32
+Mvpp2BmCookiePoolGet (
+ IN UINT32 Cookie
+ )
+{
+ return (Cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
+}
+
+/* Release buffer to BM */
+STATIC
+inline
+VOID
+Mvpp2BmPoolPut (
+ IN MVPP2_SHARED *Priv,
+ IN INT32 Pool,
+ IN UINT64 BufPhysAddr,
+ IN UINT64 BufVirtAddr
+ )
+{
+ UINT32 Val = 0;
+
+ Val = (Upper32Bits(BufVirtAddr) & MVPP22_ADDR_HIGH_MASK) << MVPP22_BM_VIRT_HIGH_RLS_OFFST;
+ Val |= (Upper32Bits(BufPhysAddr) & MVPP22_ADDR_HIGH_MASK) << MVPP22_BM_PHY_HIGH_RLS_OFFSET;
+ Mvpp2Write(Priv, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG, Val);
+ Mvpp2Write(Priv, MVPP2_BM_VIRT_RLS_REG, (UINT32)BufVirtAddr);
+ Mvpp2Write(Priv, MVPP2_BM_PHY_RLS_REG(Pool), (UINT32)BufPhysAddr);
+}
+
+STATIC
+inline
+VOID
+Mvpp2InterruptsEnable (
+ IN PP2DXE_PORT *Port,
+ IN INT32 CpuMask
+ )
+{
+ Mvpp2Write(Port->Priv, MVPP2_ISR_ENABLE_REG(Port->Id), MVPP2_ISR_ENABLE_INTERRUPT(CpuMask));
+}
+
+STATIC
+inline
+VOID
+Mvpp2InterruptsDisable (
+ IN PP2DXE_PORT *Port,
+ IN INT32 CpuMask
+ )
+{
+ Mvpp2Write(Port->Priv, MVPP2_ISR_ENABLE_REG(Port->Id), MVPP2_ISR_DISABLE_INTERRUPT(CpuMask));
+}
+
+/* Get number of Rx descriptors occupied by received packets */
+STATIC
+inline
+INT32
+Mvpp2RxqReceived (
+ IN PP2DXE_PORT *Port,
+ IN INT32 RxqId
+ )
+{
+ UINT32 Val = Mvpp2Read(Port->Priv, MVPP2_RXQ_STATUS_REG(RxqId));
+
+ return Val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/*
+ * Update Rx Queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+STATIC
+inline
+VOID
+Mvpp2RxqStatusUpdate (
+ IN PP2DXE_PORT *Port,
+ IN INT32 RxqId,
+ IN INT32 UsedCount,
+ IN INT32 FreeCount
+ )
+{
+ /*
+ * Decrement the number of used descriptors and increment count
+ * increment the number of free descriptors.
+ */
+ UINT32 Val = UsedCount | (FreeCount << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+ Mvpp2Write(Port->Priv, MVPP2_RXQ_STATUS_UPDATE_REG(RxqId), Val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+STATIC
+inline
+MVPP2_RX_DESC *
+Mvpp2RxqNextDescGet (
+ IN MVPP2_RX_QUEUE *Rxq
+ )
+{
+ INT32 RxDesc = Rxq->NextDescToProc;
+
+ Rxq->NextDescToProc = MVPP2_QUEUE_NEXT_DESC(Rxq, RxDesc);
+ Mvpp2Prefetch(Rxq->Descs + Rxq->NextDescToProc);
+ return Rxq->Descs + RxDesc;
+}
+
+/*
+ * Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+STATIC
+inline
+INT32
+Mvpp2TxqSentDescProc (
+ IN PP2DXE_PORT *Port,
+ IN MVPP2_TX_QUEUE *Txq
+ )
+{
+ UINT32 Val;
+
+ /* Reading status reg resets transmitted descriptor counter */
+#ifdef MVPP2V1
+ Val = Mvpp2Read(Port->Priv, MVPP2_TXQ_SENT_REG(Txq->Id));
+#else
+ Val = Mvpp2Read(Port->Priv, MVPP22_TXQ_SENT_REG(Txq->Id));
+#endif
+
+ return (Val & MVPP2_TRANSMITTED_COUNT_MASK) >> MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+STATIC
+inline
+MVPP2_RX_QUEUE *
+Mvpp2GetRxQueue (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Cause
+ )
+{
+ INT32 Queue = Mvpp2Fls(Cause) - 1;
+
+ return &Port->Rxqs[Queue];
+}
+
+STATIC
+inline
+MVPP2_TX_QUEUE *
+Mvpp2GetTxQueue (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Cause
+ )
+{
+ INT32 Queue = Mvpp2Fls(Cause) - 1;
+
+ return &Port->Txqs[Queue];
+}
+
+STATIC
+inline
+void
+Mvpp2x2TxdescPhysAddrSet (
+ IN DmaAddrT PhysAddr,
+ IN MVPP2_TX_DESC *TxDesc
+ )
+{
+ UINT64 *BufPhysAddrP = &TxDesc->BufPhysAddrHwCmd2;
+
+ *BufPhysAddrP &= ~(MVPP22_ADDR_MASK);
+ *BufPhysAddrP |= PhysAddr & MVPP22_ADDR_MASK;
+}
+#endif /* __MVPP2_LIB_H__ */
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2LibHw.h b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2LibHw.h
new file mode 100644
index 0000000000..0ebf9367bb
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Mvpp2LibHw.h
@@ -0,0 +1,2015 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MVPP2_LIB_HW__
+#define __MVPP2_LIB_HW__
+
+#ifndef BIT
+#define BIT(nr) (1 << (nr))
+#endif
+
+/* PP2v2 registers offsets */
+#define MVPP22_SMI_OFFSET 0x12a200
+#define MVPP22_MPCS_OFFSET 0x130000
+#define MVPP22_XPCS_OFFSET 0x130400
+#define MVPP22_GMAC_OFFSET 0x130e00
+#define MVPP22_GMAC_REG_SIZE 0x1000
+#define MVPP22_XLG_OFFSET 0x130f00
+#define MVPP22_XLG_REG_SIZE 0x1000
+#define MVPP22_RFU1_OFFSET 0x441000
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
+#define MVPP2_RX_FIFO_INIT_REG 0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
+#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
+#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
+#define MVPP2_POOL_BUF_SIZE_OFFSET 5
+#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
+#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
+#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
+#define MVPP2_RXQ_POOL_SHORT_OFFS 20
+#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
+#define MVPP2_RXQ_POOL_LONG_OFFS 24
+#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
+#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
+#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
+#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
+#define MVPP2_PRS_PORT_LU_MAX 0xf
+#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
+#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
+#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
+#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
+#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
+#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
+#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG 0x1800
+#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG 0x1810
+#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG 0x1814
+#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
+#define MVPP2_CLS_LKP_TBL_REG 0x1818
+#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
+#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
+#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG 0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
+#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
+#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
+#define MVPP2_RXQ_NUM_NEW_OFFSET 16
+#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
+#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
+#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
+#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
+#define MVPP2_RXQ_THRESH_REG 0x204c
+#define MVPP2_OCCUPIED_THRESH_OFFSET 0
+#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
+#define MVPP2_RXQ_INDEX_REG 0x2050
+#define MVPP2_TXQ_NUM_REG 0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
+#define MVPP22_TXQ_DESC_ADDR_HIGH_REG 0x20a8
+#define MVPP22_TXQ_DESC_ADDR_HIGH_MASK 0xff
+#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
+#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
+#define MVPP2_TXQ_THRESH_REG 0x2094
+#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
+#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
+#define MVPP2_TXQ_INDEX_REG 0x2098
+#define MVPP2_TXQ_PREF_BUF_REG 0x209c
+#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
+#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
+#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
+#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
+#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
+#define MVPP2_TXQ_PENDING_REG 0x20a0
+#define MVPP2_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
+#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
+#define MVPP22_TXQ_SENT_REG(txq) (0x3e00 + 4 * (txq-128))
+#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
+#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
+#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
+#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
+#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
+#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE 0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
+#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
+#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
+#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
+#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
+#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
+#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
+#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
+#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
+#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
+#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
+#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
+#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
+#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
+#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
+#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
+#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
+#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
+#define MVPP2_BM_START_MASK BIT(0)
+#define MVPP2_BM_STOP_MASK BIT(1)
+#define MVPP2_BM_STATE_MASK BIT(4)
+#define MVPP2_BM_LOW_THRESH_OFFS 8
+#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
+#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << MVPP2_BM_LOW_THRESH_OFFS)
+#define MVPP2_BM_HIGH_THRESH_OFFS 16
+#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
+#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
+#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
+#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
+#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
+#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
+#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
+#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
+#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
+#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG 0x64c0
+#define MVPP2_BM_MC_RLS_REG 0x64c4
+#define MVPP2_BM_MC_ID_MASK 0xfff
+#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
+
+#define MVPP22_BM_PHY_VIRT_HIGH_ALLOC_REG 0x6444
+#define MVPP22_BM_PHY_HIGH_ALLOC_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_ALLOC_OFFSET 8
+#define MVPP22_BM_VIRT_HIGH_ALLOC_MASK 0xff00
+
+#define MVPP22_BM_PHY_VIRT_HIGH_RLS_REG 0x64c4
+
+#define MVPP22_BM_PHY_HIGH_RLS_OFFSET 0
+#define MVPP22_BM_VIRT_HIGH_RLS_OFFST 8
+
+#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
+#define MVPP2_BM_PRIO_CTRL_REG 0x6800
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
+#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
+#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
+#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
+#define MVPP2_TXP_SCHED_MTU_REG 0x801c
+#define MVPP2_TXP_MTU_MAX 0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
+#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
+#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
+#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
+#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
+#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
+#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG 0x8800
+#define MVPP2_TX_PORT_FLUSH_REG 0x8810
+#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE 0x24
+#define MVPP2_SRC_ADDR_HIGH 0x28
+#define MVPP2_PHY_AN_CFG0_REG 0x34
+#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * 0x400 + (port) * 0x400)
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+#define MVPP2_ISR_SUM_MASK_REG 0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG 0x0
+#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
+#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
+#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
+#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG 0x4
+#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
+#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
+#define MVPP2_GMAC_PCS_LB_EN_BIT 6
+#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
+#define MVPP2_GMAC_SA_LOW_OFFS 7
+#define MVPP2_GMAC_CTRL_2_REG 0x8
+#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
+#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
+#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
+#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
+#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
+#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
+#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
+#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
+#define MVPP2_GMAC_FC_ADV_EN BIT(9)
+#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
+#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+/* Port Interrupts */
+#define MV_GMAC_INTERRUPT_CAUSE_REG (0x0020)
+#define MV_GMAC_INTERRUPT_MASK_REG (0x0024)
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK (0x1 << MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary */
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_REG (0x00A0)
+#define MV_GMAC_INTERRUPT_SUM_MASK_REG (0x00A4)
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS 1
+#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK (0x1 << MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS)
+
+/* Port Mac Control0 */
+#define MVPP2_PORT_CTRL0_REG (0x0000)
+#define MVPP2_PORT_CTRL0_PORTEN_OFFS 0
+#define MVPP2_PORT_CTRL0_PORTEN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_PORTEN_OFFS)
+
+#define MVPP2_PORT_CTRL0_PORTTYPE_OFFS 1
+#define MVPP2_PORT_CTRL0_PORTTYPE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_PORTTYPE_OFFS)
+
+#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS 2
+#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS)
+
+#define MVPP2_PORT_CTRL0_COUNT_EN_OFFS 15
+#define MVPP2_PORT_CTRL0_COUNT_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL0_COUNT_EN_OFFS)
+
+/* Port Mac Control1 */
+#define MVPP2_PORT_CTRL1_REG (0x0004)
+#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS 0
+#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS)
+
+#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS 1
+#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS)
+
+#define MVPP2_PORT_CTRL1_MGMII_MODE_OFFS 2
+#define MVPP2_PORT_CTRL1_MGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_MGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS 3
+#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS 4
+#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS)
+
+#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS 5
+#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS)
+
+#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS 6
+#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS)
+
+#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS 7
+#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_MASK \
+ (0x000000ff << MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS)
+
+#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS 15
+#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS)
+
+/* Port Mac Control2 */
+#define MVPP2_PORT_CTRL2_REG (0x0008)
+#define MVPP2_PORT_CTRL2_SGMII_MODE_OFFS 0
+#define MVPP2_PORT_CTRL2_SGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_FC_MODE_OFFS 1
+#define MVPP2_PORT_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MVPP2_PORT_CTRL2_FC_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_PCS_EN_OFFS 3
+#define MVPP2_PORT_CTRL2_PCS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PCS_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_RGMII_MODE_OFFS 4
+#define MVPP2_PORT_CTRL2_RGMII_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_RGMII_MODE_OFFS)
+
+#define MVPP2_PORT_CTRL2_DIS_PADING_OFFS 5
+#define MVPP2_PORT_CTRL2_DIS_PADING_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_DIS_PADING_OFFS)
+
+#define MVPP2_PORT_CTRL2_PORTMACRESET_OFFS 6
+#define MVPP2_PORT_CTRL2_PORTMACRESET_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PORTMACRESET_OFFS)
+
+#define MVPP2_PORT_CTRL2_TX_DRAIN_OFFS 7
+#define MVPP2_PORT_CTRL2_TX_DRAIN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_TX_DRAIN_OFFS)
+
+#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS 8
+#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS)
+
+#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS 9
+#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS 10
+#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS 11
+#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS)
+
+#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS 12
+#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_MASK \
+ (0x00000003 << MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS)
+
+#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS 14
+#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS)
+
+#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS 15
+#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS)
+
+/* Port Auto-negotiation Configuration */
+#define MVPP2_PORT_AUTO_NEG_CFG_REG (0x000c)
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS 0
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS 1
+#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS 2
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS 3
+#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS 4
+#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS 5
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS 6
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS 7
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS 9
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS 10
+#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS 11
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS 12
+#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS 13
+#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS 14
+#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS)
+
+#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS 15
+#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK \
+ (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS)
+
+/* Port Status0 */
+#define MVPP2_PORT_STATUS0_REG (0x0010)
+#define MVPP2_PORT_STATUS0_LINKUP_OFFS 0
+#define MVPP2_PORT_STATUS0_LINKUP_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_LINKUP_OFFS)
+
+#define MVPP2_PORT_STATUS0_GMIISPEED_OFFS 1
+#define MVPP2_PORT_STATUS0_GMIISPEED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_GMIISPEED_OFFS)
+
+#define MVPP2_PORT_STATUS0_MIISPEED_OFFS 2
+#define MVPP2_PORT_STATUS0_MIISPEED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_MIISPEED_OFFS)
+
+#define MVPP2_PORT_STATUS0_FULLDX_OFFS 3
+#define MVPP2_PORT_STATUS0_FULLDX_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_FULLDX_OFFS)
+
+#define MVPP2_PORT_STATUS0_RXFCEN_OFFS 4
+#define MVPP2_PORT_STATUS0_RXFCEN_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_RXFCEN_OFFS)
+
+#define MVPP2_PORT_STATUS0_TXFCEN_OFFS 5
+#define MVPP2_PORT_STATUS0_TXFCEN_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_TXFCEN_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS 6
+#define MVPP2_PORT_STATUS0_PORTRXPAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS 7
+#define MVPP2_PORT_STATUS0_PORTTXPAUSE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS 8
+#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS)
+
+#define MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS 9
+#define MVPP2_PORT_STATUS0_PORTBUFFULL_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS)
+
+#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS 10
+#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS)
+
+#define MVPP2_PORT_STATUS0_ANDONE_OFFS 11
+#define MVPP2_PORT_STATUS0_ANDONE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_ANDONE_OFFS)
+
+#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS 12
+#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS)
+
+#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS 13
+#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS)
+
+#define MVPP2_PORT_STATUS0_SYNCOK_OFFS 14
+#define MVPP2_PORT_STATUS0_SYNCOK_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SYNCOK_OFFS)
+
+#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS 15
+#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS)
+
+/* Port Serial Parameters Configuration */
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REG (0x0014)
+#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS 0
+#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS 1
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS 2
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS 3
+#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS 4
+#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS 5
+#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS 6
+#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_MASK \
+ (0x0000003f << MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS 12
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS 13
+#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS 14
+#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS)
+
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS 15
+#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_MASK \
+ (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS)
+
+/* Port Fifo Configuration 0 */
+#define MVPP2_PORT_FIFO_CFG_0_REG (0x0018)
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS 0
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS 8
+#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS)
+
+/* Port Fifo Configuration 1 */
+#define MVPP2_PORT_FIFO_CFG_1_REG (0x001c)
+#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS 0
+#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_MASK \
+ (0x0000003f << MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS 6
+#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK \
+ (0x000000ff << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)
+
+#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS 15
+#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS)
+
+/* Port Serdes Configuration0 */
+#define MVPP2_PORT_SERDES_CFG0_REG (0x0028)
+#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS 0
+#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS 1
+#define MVPP2_PORT_SERDES_CFG0_PU_TX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS 2
+#define MVPP2_PORT_SERDES_CFG0_PU_RX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS 3
+#define MVPP2_PORT_SERDES_CFG0_PU_PLL_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS 4
+#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS 5
+#define MVPP2_PORT_SERDES_CFG0_TESTEN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS 6
+#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS 7
+#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS 8
+#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS 9
+#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS 10
+#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS 11
+#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS 12
+#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS 13
+#define MVPP2_PORT_SERDES_CFG0_OUTAMP_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS 14
+#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS)
+
+#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS 15
+#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS)
+
+/* Port Serdes Configuration1 */
+#define MVPP2_PORT_SERDES_CFG1_REG (0x002c)
+#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS 0
+#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS 1
+#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS 2
+#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_MASK \
+ (0x00000003 << MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS 4
+#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS 5
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS 6
+#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS 7
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS 8
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_MASK \
+ (0x0000001f << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS 13
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS 14
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS)
+
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS 15
+#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_MASK \
+ (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS)
+
+/* Port Serdes Configuration2 */
+#define MVPP2_PORT_SERDES_CFG2_REG (0x0030)
+#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS 0
+#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_MASK \
+ (0x0000ffff << MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS)
+
+/* Port Serdes Configuration3 */
+#define MVPP2_PORT_SERDES_CFG3_REG (0x0034)
+#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS 0
+#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_MASK \
+ (0x0000ffff << MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS)
+
+/* Port Prbs Status */
+#define MVPP2_PORT_PRBS_STATUS_REG (0x0038)
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS 0
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_MASK \
+ (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS)
+
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS 1
+#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_MASK \
+ (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS)
+
+/* Port Prbs Error Counter */
+#define MVPP2_PORT_PRBS_ERR_CNTR_REG (0x003c)
+#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS 0
+#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_MASK \
+ (0x0000ffff << MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS)
+
+/* Port Status1 */
+#define MVPP2_PORT_STATUS1_REG (0x0040)
+#define MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS 0
+#define MVPP2_PORT_STATUS1_MEDIAACTIVE_MASK \
+ (0x00000001 << MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS)
+
+/* Port Mib Counters Control */
+#define MVPP2_PORT_MIB_CNTRS_CTRL_REG (0x0044)
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS 0
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS 1
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS 2
+#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS 3
+#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS 5
+#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 6
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 7
+#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Port Mac Control3 */
+#define MVPP2_PORT_CTRL3_REG (0x0048)
+#define MVPP2_PORT_CTRL3_BUF_SIZE_OFFS 0
+#define MVPP2_PORT_CTRL3_BUF_SIZE_MASK \
+ (0x0000003f << MVPP2_PORT_CTRL3_BUF_SIZE_OFFS)
+
+#define MVPP2_PORT_CTRL3_IPG_DATA_OFFS 6
+#define MVPP2_PORT_CTRL3_IPG_DATA_MASK \
+ (0x000001ff << MVPP2_PORT_CTRL3_IPG_DATA_OFFS)
+
+#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS 15
+#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS)
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
+
+/* Port Mac Control4 */
+#define MVPP2_PORT_CTRL4_REG (0x0090)
+#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS 0
+#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS)
+
+#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS 1
+#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS)
+
+#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS 2
+#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_FC_EN_RX_OFFS 3
+#define MVPP2_PORT_CTRL4_FC_EN_RX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_RX_OFFS)
+
+#define MVPP2_PORT_CTRL4_FC_EN_TX_OFFS 4
+#define MVPP2_PORT_CTRL4_FC_EN_TX_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_TX_OFFS)
+
+#define MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS 5
+#define MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS)
+
+#define MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS 6
+#define MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS)
+
+#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS 7
+#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS)
+
+#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS 8
+#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS 9
+#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_MASK \
+ (0x00000001 << MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS)
+
+#define MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS 10
+#define MVPP2_PORT_CTRL4_LEDS_NUMBER_MASK \
+ (0x0000003f << MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS)
+
+/* XPCS registers */
+
+/* Global Configuration 0 */
+#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
+#define MVPP22_XPCS_PCSRESET BIT(0)
+#define MVPP22_XPCS_PCSMODE_OFFS 3
+#define MVPP22_XPCS_PCSMODE_MASK (0x3 << MVPP22_XPCS_PCSMODE_OFFS)
+#define MVPP22_XPCS_LANEACTIVE_OFFS 5
+#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << MVPP22_XPCS_LANEACTIVE_OFFS)
+
+/* MPCS registers */
+
+#define MVPP22_MPCS40G_COMMON_CONTROL 0x14
+#define MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK BIT(10)
+
+#define MVPP22_MPCS_CLOCK_RESET 0x14c
+#define MVPP22_MPCS_TX_SD_CLK_RESET_MASK BIT(0)
+#define MVPP22_MPCS_RX_SD_CLK_RESET_MASK BIT(1)
+#define MVPP22_MPCS_MAC_CLK_RESET_MASK BIT(2)
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS 4
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_MASK (0x7 << MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS)
+#define MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT (0x1 << MVPP22_MPCS_CLK_DIVISION_RATIO_OFFS)
+#define MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK BIT(11)
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) (((index) < (q)->LastDesc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
+#define MVPP2_RX_COAL_PKTS 32
+#define MVPP2_RX_COAL_USEC 100
+
+/*
+ * The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE 2
+#define MVPP2_ETH_TYPE_LEN 2
+#define MVPP2_PPPOE_HDR_SIZE 8
+#define MVPP2_VLAN_TAG_LEN 4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE 0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
+#define MVPP2_TX_CSUM_MAX_SIZE 9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
+
+#define MVPP2_TX_MTU_MAX 0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT 16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS 4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ 8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ 8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ 4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD 64
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD 32
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK 64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE 256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE 32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE 16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK BIT(0)
+
+/* SD1 Control1 */
+#define SD1_CONTROL_1_REG (0x148)
+
+#define SD1_CONTROL_XAUI_EN_OFFSET 28
+#define SD1_CONTROL_XAUI_EN_MASK (0x1 << SD1_CONTROL_XAUI_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI0_L23_EN_OFFSET 27
+#define SD1_CONTROL_RXAUI0_L23_EN_MASK (0x1 << SD1_CONTROL_RXAUI0_L23_EN_OFFSET)
+
+#define SD1_CONTROL_RXAUI1_L45_EN_OFFSET 26
+#define SD1_CONTROL_RXAUI1_L45_EN_MASK (0x1 << SD1_CONTROL_RXAUI1_L45_EN_OFFSET)
+
+/* System Soft Reset 1 */
+#define MV_GOP_SOFT_RESET_1_REG (0x108)
+
+#define NETC_GOP_SOFT_RESET_OFFSET 6
+#define NETC_GOP_SOFT_RESET_MASK (0x1 << NETC_GOP_SOFT_RESET_OFFSET)
+
+/* Ports Control 0 */
+#define MV_NETCOMP_PORTS_CONTROL_0 (0x110)
+
+#define NETC_CLK_DIV_PHASE_OFFSET 31
+#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFSET)
+
+#define NETC_GIG_RX_DATA_SAMPLE_OFFSET 29
+#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << NETC_GIG_RX_DATA_SAMPLE_OFFSET)
+
+#define NETC_BUS_WIDTH_SELECT_OFFSET 1
+#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << NETC_BUS_WIDTH_SELECT_OFFSET)
+
+#define NETC_GOP_ENABLE_OFFSET 0
+#define NETC_GOP_ENABLE_MASK (0x1 << NETC_GOP_ENABLE_OFFSET)
+
+/* Ports Control 1 */
+#define MV_NETCOMP_PORTS_CONTROL_1 (0x114)
+
+#define NETC_PORT_GIG_RF_RESET_OFFSET(port) (28 + port)
+#define NETC_PORT_GIG_RF_RESET_MASK(port) (0x1 << NETC_PORT_GIG_RF_RESET_OFFSET(port))
+
+#define NETC_PORTS_ACTIVE_OFFSET(port) (0 + port)
+#define NETC_PORTS_ACTIVE_MASK(port) (0x1 << NETC_PORTS_ACTIVE_OFFSET(port))
+
+/* Ports Status */
+#define MV_NETCOMP_PORTS_STATUS (0x11C)
+#define NETC_PORTS_STATUS_OFFSET(port) (0 + port)
+#define NETC_PORTS_STATUS_MASK(port) (0x1 << NETC_PORTS_STATUS_OFFSET(port))
+
+/* Networking Complex Control 0 */
+#define MV_NETCOMP_CONTROL_0 (0x120)
+
+#define NETC_GBE_PORT1_MII_MODE_OFFSET 2
+#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << NETC_GBE_PORT1_MII_MODE_OFFSET)
+
+#define NETC_GBE_PORT1_SGMII_MODE_OFFSET 1
+#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << NETC_GBE_PORT1_SGMII_MODE_OFFSET)
+
+#define NETC_GBE_PORT0_SGMII_MODE_OFFSET 0
+#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << NETC_GBE_PORT0_SGMII_MODE_OFFSET)
+
+/* Port Mac Control0 */
+#define MV_XLG_PORT_MAC_CTRL0_REG ( 0x0000)
+#define MV_XLG_MAC_CTRL0_PORTEN_OFFS 0
+#define MV_XLG_MAC_CTRL0_PORTEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PORTEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MACRESETN_OFFS 1
+#define MV_XLG_MAC_CTRL0_MACRESETN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MACRESETN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS 2
+#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS 3
+#define MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS 5
+#define MV_XLG_MAC_CTRL0_TXIPGMODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXFCEN_OFFS 7
+#define MV_XLG_MAC_CTRL0_RXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_TXFCEN_OFFS 8
+#define MV_XLG_MAC_CTRL0_TXFCEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_TXFCEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS 9
+#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS 10
+#define MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS 11
+#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS 13
+#define MV_XLG_MAC_CTRL0_PADDINGDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS 14
+#define MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS)
+
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS 15
+#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS)
+
+/* Port Mac Control1 */
+#define MV_XLG_PORT_MAC_CTRL1_REG (0x0004)
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS 0
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK \
+ (0x00001fff << MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS)
+#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_DEFAULT 0x1400
+
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS 13
+#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS 14
+#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS)
+
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS 15
+#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS)
+
+/* Port Mac Control2 */
+#define MV_XLG_PORT_MAC_CTRL2_REG (0x0008)
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS 0
+#define MV_XLG_MAC_CTRL2_SALOW_7_0_MASK \
+ (0x000000ff << MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS)
+
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS 8
+#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS 9
+#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS)
+
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS 10
+#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS 13
+#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS)
+
+#define MV_XLG_MAC_CTRL2_FC_MODE_OFFS 14
+#define MV_XLG_MAC_CTRL2_FC_MODE_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL2_FC_MODE_OFFS)
+
+/* Port Status */
+#define MV_XLG_MAC_PORT_STATUS_REG (0x000c)
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS 1
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS 2
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS 3
+#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS 4
+#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS 5
+#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS 6
+#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS 7
+#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS)
+
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS 8
+#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS)
+
+/* Port Fifos Thresholds Configuration */
+#define MV_XLG_PORT_FIFOS_THRS_CFG_REG (0x0010)
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS 0
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS 5
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS)
+
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS 11
+#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_MASK \
+ (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS)
+
+/* Port Mac Control3 */
+#define MV_XLG_PORT_MAC_CTRL3_REG (0x001c)
+#define MV_XLG_MAC_CTRL3_BUFSIZE_OFFS 0
+#define MV_XLG_MAC_CTRL3_BUFSIZE_MASK \
+ (0x0000003f << MV_XLG_MAC_CTRL3_BUFSIZE_OFFS)
+
+#define MV_XLG_MAC_CTRL3_XTRAIPG_OFFS 6
+#define MV_XLG_MAC_CTRL3_XTRAIPG_MASK \
+ (0x0000007f << MV_XLG_MAC_CTRL3_XTRAIPG_OFFS)
+
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS 13
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+#define MV_XLG_MAC_CTRL3_MACMODESELECT_10G \
+ (0x00000001 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)
+
+/* Port Per Prio Flow Control Status */
+#define MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG (0x0020)
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS 0
+#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS)
+
+/* Debug Bus Status */
+#define MV_XLG_DEBUG_BUS_STATUS_REG (0x0024)
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS 0
+#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_MASK \
+ (0x0000ffff << MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS)
+
+/* Port Metal Fix */
+#define MV_XLG_PORT_METAL_FIX_REG (0x002c)
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS 0
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS 1
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS 2
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS 3
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS 4
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS 5
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS 6
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS 7
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS 8
+#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_MASK \
+ (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS 9
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_MASK \
+ (0x0000000f << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS)
+
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS 13
+#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_MASK \
+ (0x00000007 << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS)
+
+/* Xg Mib Counters Control */
+#define MV_XLG_MIB_CNTRS_CTRL_REG (0x0030)
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS 0
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS 1
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS 2
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS 3
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS 5
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_MASK \
+ (0x0000003f << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 11
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS)
+
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 12
+#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \
+ (0x00000001 << MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS)
+
+/* Cn/ccfc Timer%i */
+#define MV_XLG_CNCCFC_TIMERI_REG(t) ((0x0038 + (t) * 4))
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS 0
+#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_MASK \
+ (0x0000ffff << MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS)
+
+/* Ppfc Control */
+#define MV_XLG_MAC_PPFC_CTRL_REG (0x0060)
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS 0
+#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS)
+
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS 9
+#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS)
+
+/* Fc Dsa Tag 0 */
+#define MV_XLG_MAC_FC_DSA_TAG_0_REG (0x0068)
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS)
+
+/* Fc Dsa Tag 1 */
+#define MV_XLG_MAC_FC_DSA_TAG_1_REG (0x006c)
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS)
+
+/* Fc Dsa Tag 2 */
+#define MV_XLG_MAC_FC_DSA_TAG_2_REG (0x0070)
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS)
+
+/* Fc Dsa Tag 3 */
+#define MV_XLG_MAC_FC_DSA_TAG_3_REG (0x0074)
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS 0
+#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_MASK \
+ (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS)
+
+/* Dic Budget Compensation */
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG (0x0080)
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS 0
+#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_MASK \
+ (0x0000ffff << MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS)
+
+/* Port Mac Control4 */
+#define MV_XLG_PORT_MAC_CTRL4_REG (0x0084)
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS 0
+#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS)
+
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS 1
+#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS 2
+#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS 3
+#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS)
+
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS 4
+#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS 5
+#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS 6
+#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS 7
+#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS)
+
+#define MV_XLG_MAC_CTRL4_USE_XPCS_OFFS 8
+#define MV_XLG_MAC_CTRL4_USE_XPCS_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_USE_XPCS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS 9
+#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS)
+
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS 10
+#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_MASK \
+ (0x00000003 << MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS)
+
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS 12
+#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS)
+
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK 14
+#define MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK_MASK \
+ (0x00000001 << MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK)
+
+/* Port Mac Control5 */
+#define MV_XLG_PORT_MAC_CTRL5_REG (0x0088)
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS 0
+#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_MASK \
+ (0x0000000f << MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS 4
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS 7
+#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS)
+
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS 10
+#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS)
+
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS 13
+#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_MASK \
+ (0x00000007 << MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS)
+
+/* External Control */
+#define MV_XLG_MAC_EXT_CTRL_REG (0x0090)
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS 0
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS 1
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS 2
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS 3
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS 4
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS 5
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS 6
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS 7
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS 8
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS 9
+#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS 10
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS 11
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS 12
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS 13
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS 14
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS 15
+#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS)
+
+/* Macro Control */
+#define MV_XLG_MAC_MACRO_CTRL_REG (0x0094)
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS 0
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS 1
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS 2
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS 3
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS 4
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS 5
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS 6
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS 7
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS 8
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS 9
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS 10
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS 11
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS 12
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS 13
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS 14
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS)
+
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS 15
+#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_MASK \
+ (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS)
+
+#define MV_XLG_MAC_DIC_PPM_IPG_REDUCE_REG (0x0094)
+
+/* Port Interrupt Cause */
+#define MV_XLG_INTERRUPT_CAUSE_REG (0x0014)
+/* Port Interrupt Mask */
+#define MV_XLG_INTERRUPT_MASK_REG (0x0018)
+#define MV_XLG_SUMMARY_INTERRUPT_OFFSET 0
+#define MV_XLG_SUMMARY_INTERRUPT_MASK \
+ (0x1 << MV_XLG_SUMMARY_INTERRUPT_OFFSET)
+#define MV_XLG_INTERRUPT_LINK_CHANGE_OFFS 1
+#define MV_XLG_INTERRUPT_LINK_CHANGE_MASK \
+ (0x1 << MV_XLG_INTERRUPT_LINK_CHANGE_OFFS)
+
+/* Port Interrupt Summary Cause */
+#define MV_XLG_EXTERNAL_INTERRUPT_CAUSE_REG (0x0058)
+/* Port Interrupt Summary Mask */
+#define MV_XLG_EXTERNAL_INTERRUPT_MASK_REG (0x005C)
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS 1
+#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_MASK \
+ (0x1 << MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS)
+
+/*All PPV22 Addresses are 40-bit */
+#define MVPP22_ADDR_HIGH_SIZE 8
+#define MVPP22_ADDR_HIGH_MASK ((1<<MVPP22_ADDR_HIGH_SIZE) - 1)
+#define MVPP22_ADDR_MASK (0xFFFFFFFFFF)
+
+/* Desc addr shift */
+#define MVPP21_DESC_ADDR_SHIFT 0 /*Applies to RXQ, AGGR_TXQ*/
+#define MVPP22_DESC_ADDR_SHIFT 8 /*Applies to RXQ, AGGR_TXQ*/
+
+/* AXI Bridge Registers */
+#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
+#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
+#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
+#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
+#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
+#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
+#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
+#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
+#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
+#define MVPP22_AXI_RD_SNP_CODE_REG 0x4154
+#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
+#define MVPP22_AXI_WR_SNP_CODE_REG 0x4164
+
+#define MVPP22_AXI_RD_CODE_MASK 0x33
+#define MVPP22_AXI_WR_CODE_MASK 0x33
+
+#define MVPP22_AXI_ATTR_CACHE_OFFS 0
+#define MVPP22_AXI_ATTR_CACHE_SIZE 4
+#define MVPP22_AXI_ATTR_CACHE_MASK 0x0000000F
+
+#define MVPP22_AXI_ATTR_QOS_OFFS 4
+#define MVPP22_AXI_ATTR_QOS_SIZE 4
+#define MVPP22_AXI_ATTR_QOS_MASK 0x000000F0
+
+#define MVPP22_AXI_ATTR_TC_OFFS 8
+#define MVPP22_AXI_ATTR_TC_SIZE 4
+#define MVPP22_AXI_ATTR_TC_MASK 0x00000F00
+
+#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
+#define MVPP22_AXI_ATTR_DOMAIN_SIZE 2
+#define MVPP22_AXI_ATTR_DOMAIN_MASK 0x00003000
+
+#define MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT BIT(16)
+
+/* PHY address register */
+#define MV_SMI_PHY_ADDRESS_REG(n) (0xC + 0x4 * (n))
+#define MV_SMI_PHY_ADDRESS_PHYAD_OFFS 0
+#define MV_SMI_PHY_ADDRESS_PHYAD_MASK \
+ (0x1F << MV_SMI_PHY_ADDRESS_PHYAD_OFFS)
+
+/* Marvell tag types */
+enum Mvpp2TagType {
+ MVPP2_TAG_TYPE_NONE = 0,
+ MVPP2_TAG_TYPE_MH = 1,
+ MVPP2_TAG_TYPE_DSA = 2,
+ MVPP2_TAG_TYPE_EDSA = 3,
+ MVPP2_TAG_TYPE_VLAN = 4,
+ MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE 256
+#define MVPP2_PRS_TCAM_WORDS 6
+#define MVPP2_PRS_SRAM_WORDS 4
+#define MVPP2_PRS_FLOW_ID_SIZE 64
+#define MVPP2_PRS_FLOW_ID_MASK 0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
+#define MVPP2_PRS_IPV4_HEAD 0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
+#define MVPP2_PRS_IPV4_MC 0xe0
+#define MVPP2_PRS_IPV4_MC_MASK 0xf0
+#define MVPP2_PRS_IPV4_BC_MASK 0xff
+#define MVPP2_PRS_IPV4_IHL 0x5
+#define MVPP2_PRS_IPV4_IHL_MASK 0xf
+#define MVPP2_PRS_IPV6_MC 0xff
+#define MVPP2_PRS_IPV6_MC_MASK 0xff
+#define MVPP2_PRS_IPV6_HOP_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX 100
+
+/*
+ * Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS 8
+#define MVPP2_PRS_PORT_MASK 0xff
+#define MVPP2_PRS_LU_MASK 0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs) (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) (((offs) * 2) - ((offs) % 2) + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE 16
+#define MVPP2_PRS_TCAM_PORT_BYTE 17
+#define MVPP2_PRS_TCAM_LU_BYTE 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD 5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL 0
+#define MVPP2_PE_FIRST_FREE_TID 1
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/*
+ * Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS 0
+#define MVPP2_PRS_SRAM_RI_WORD 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
+#define MVPP2_PRS_SRAM_UDF_OFFS 73
+#define MVPP2_PRS_SRAM_UDF_BITS 8
+#define MVPP2_PRS_SRAM_UDF_MASK 0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
+#define MVPP2_PRS_SRAM_AI_OFFS 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
+#define MVPP2_PRS_SRAM_AI_MASK 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
+#define MVPP2_PRS_RI_DSA_MASK 0x2
+#define MVPP2_PRS_RI_VLAN_MASK 0xc
+#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK 0x600
+#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK 0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000
+#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4 BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6 BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000
+#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
+#define MVPP2_PRS_RI_UDF3_MASK 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
+#define MVPP2_PRS_RI_L4_TCP BIT(22)
+#define MVPP2_PRS_RI_L4_UDP BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI 0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED TRUE
+#define MVPP2_PRS_UNTAGGED FALSE
+#define MVPP2_PRS_EDSA TRUE
+#define MVPP2_PRS_DSA FALSE
+
+/* MAC entries, shadow udf */
+enum Mvpp2PrsUdf {
+ MVPP2_PRS_UDF_MAC_DEF,
+ MVPP2_PRS_UDF_MAC_RANGE,
+ MVPP2_PRS_UDF_L2_DEF,
+ MVPP2_PRS_UDF_L2_DEF_COPY,
+ MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum Mvpp2PrsLookup {
+ MVPP2_PRS_LU_MH,
+ MVPP2_PRS_LU_MAC,
+ MVPP2_PRS_LU_DSA,
+ MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_L2,
+ MVPP2_PRS_LU_PPPOE,
+ MVPP2_PRS_LU_IP4,
+ MVPP2_PRS_LU_IP6,
+ MVPP2_PRS_LU_FLOWS,
+ MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum Mvpp2PrsL3Cast {
+ MVPP2_PRS_L3_UNI_CAST,
+ MVPP2_PRS_L3_MULTI_CAST,
+ MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE 512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE 64
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS 8
+#define MVPP2_BM_COOKIE_CPU_OFFS 24
+
+/*
+ * The MVPP2_TX_DESC and MVPP2_RX_DESC structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+#define MVPP2_TXD_L3_OFF_SHIFT 0
+#define MVPP2_TXD_IP_HLEN_SHIFT 8
+#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE BIT(23)
+#define MVPP2_TXD_L4_UDP BIT(24)
+#define MVPP2_TXD_L3_IP6 BIT(26)
+#define MVPP2_TXD_L_DESC BIT(28)
+#define MVPP2_TXD_F_DESC BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC 0x0
+#define MVPP2_RXD_ERR_OVERRUN BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS 16
+#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
+#define MVPP2_RXD_L4_TCP BIT(25)
+#define MVPP2_RXD_L4_UDP BIT(26)
+#define MVPP2_RXD_L3_IP4 BIT(28)
+#define MVPP2_RXD_L3_IP6 BIT(30)
+#define MVPP2_RXD_BUF_HDR BIT(31)
+
+typedef struct {
+ UINT32 command; /* Options used by HW for packet transmitting.*/
+ UINT8 PacketOffset; /* the offset from the buffer beginning */
+ UINT8 PhysTxq; /* destination queue ID */
+ UINT16 DataSize; /* data size of transmitted packet in bytes */
+ UINT64 RsrvdHwCmd1; /* HwCmd (BM, PON, PNC) */
+ UINT64 BufPhysAddrHwCmd2;
+ UINT64 BufCookieBmQsetHwCmd3;
+} MVPP2_TX_DESC;
+
+typedef struct {
+ UINT32 status; /* info about received packet */
+ UINT16 reserved1; /* ParserInfo (for future use, PnC) */
+ UINT16 DataSize; /* size of received packet in bytes */
+ UINT16 RsrvdGem; /* GemPortId (for future use, PON) */
+ UINT16 RsrvdL4csum; /* CsumL4 (for future use, PnC) */
+ UINT32 RsrvdTimestamp;
+ UINT64 BufPhysAddrKeyHash;
+ UINT64 BufCookieBmQsetClsInfo;
+} MVPP2_RX_DESC;
+
+union Mvpp2PrsTcamEntry {
+ UINT32 Word[MVPP2_PRS_TCAM_WORDS];
+ UINT8 Byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union Mvpp2PrsSramEntry {
+ UINT32 Word[MVPP2_PRS_SRAM_WORDS];
+ UINT8 Byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+typedef struct {
+ UINT32 Index;
+ union Mvpp2PrsTcamEntry Tcam;
+ union Mvpp2PrsSramEntry Sram;
+} MVPP2_PRS_ENTRY;
+
+typedef struct {
+ BOOLEAN Valid;
+ BOOLEAN Finish;
+
+ /* Lookup ID */
+ INT32 Lu;
+
+ /* User defined offset */
+ INT32 Udf;
+
+ /* Result info */
+ UINT32 Ri;
+ UINT32 RiMask;
+} MVPP2_PRS_SHADOW;
+
+typedef struct {
+ UINT32 Index;
+ UINT32 Data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+} MVPP2_CLS_FLOW_ENTRY;
+
+typedef struct {
+ UINT32 Lkpid;
+ UINT32 Way;
+ UINT32 Data;
+} MVPP2_CLS_LOOKUP_ENTRY;
+
+typedef struct {
+ UINT32 NextBuffPhysAddr;
+ UINT32 NextBuffVirtAddr;
+ UINT16 ByteCount;
+ UINT16 info;
+ UINT8 reserved1; /* BmQset (for future use, BM) */
+} MVPP2_BUFF_HDR;
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* SerDes */
+#define MVPP2_SFI_LANE_COUNT 1
+
+/* Net Complex */
+enum MvNetcTopology {
+ MV_NETC_GE_MAC0_RXAUI_L23 = BIT(0),
+ MV_NETC_GE_MAC0_RXAUI_L45 = BIT(1),
+ MV_NETC_GE_MAC0_XAUI = BIT(2),
+ MV_NETC_GE_MAC2_SGMII = BIT(3),
+ MV_NETC_GE_MAC3_SGMII = BIT(4),
+ MV_NETC_GE_MAC3_RGMII = BIT(5),
+};
+
+enum MvNetcPhase {
+ MV_NETC_FIRST_PHASE,
+ MV_NETC_SECOND_PHASE,
+};
+
+enum MvNetcSgmiiXmiMode {
+ MV_NETC_GBE_SGMII,
+ MV_NETC_GBE_XMII,
+};
+
+enum MvNetcMiiMode {
+ MV_NETC_GBE_RGMII,
+ MV_NETC_GBE_MII,
+};
+
+enum MvNetcLanes {
+ MV_NETC_LANE_23,
+ MV_NETC_LANE_45,
+};
+
+/* Port related */
+enum MvReset {
+ RESET,
+ UNRESET
+};
+
+enum Mvpp2Command {
+ MVPP2_START, /* Start */
+ MVPP2_STOP, /* Stop */
+ MVPP2_PAUSE, /* Pause */
+ MVPP2_RESTART /* Restart */
+};
+
+enum MvPortDuplex {
+ MV_PORT_DUPLEX_AN,
+ MV_PORT_DUPLEX_HALF,
+ MV_PORT_DUPLEX_FULL
+};
+
+#endif /* __MVPP2_LIB_HW__ */
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c
new file mode 100644
index 0000000000..b0a38b3c90
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.c
@@ -0,0 +1,1396 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include <Protocol/DevicePath.h>
+#include <Protocol/DriverBinding.h>
+#include <Protocol/SimpleNetwork.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/DebugLib.h>
+#include <Library/IoLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/MvHwDescLib.h>
+#include <Library/NetLib.h>
+#include <Library/PcdLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiLib.h>
+
+#include "Mvpp2LibHw.h"
+#include "Mvpp2Lib.h"
+#include "Pp2Dxe.h"
+
+#define ReturnUnlock(tpl, status) do { gBS->RestoreTPL (tpl); return (status); } while(0)
+
+DECLARE_A7K8K_PP2_TEMPLATE;
+
+STATIC PP2_DEVICE_PATH Pp2DevicePathTemplate = {
+ {
+ {
+ MESSAGING_DEVICE_PATH,
+ MSG_MAC_ADDR_DP,
+ {
+ (UINT8) (sizeof(MAC_ADDR_DEVICE_PATH)),
+ (UINT8) ((sizeof(MAC_ADDR_DEVICE_PATH)) >> 8)
+ }
+ },
+ { { 0 } },
+ 0
+ },
+ {
+ END_DEVICE_PATH_TYPE,
+ END_ENTIRE_DEVICE_PATH_SUBTYPE,
+ { sizeof(EFI_DEVICE_PATH_PROTOCOL), 0 }
+ }
+};
+
+EFI_SIMPLE_NETWORK_PROTOCOL Pp2SnpTemplate = {
+ EFI_SIMPLE_NETWORK_PROTOCOL_REVISION, // Revision
+ Pp2SnpStart, // Start
+ Pp2SnpStop, // Stop
+ Pp2DxeSnpInitialize, // Initialize
+ Pp2SnpReset, // Reset
+ Pp2SnpShutdown, // Shutdown
+ Pp2SnpReceiveFilters, // ReceiveFilters
+ Pp2SnpStationAddress, // StationAddress
+ Pp2SnpNetStat, // Statistics
+ Pp2SnpIpToMac, // MCastIpToMac
+ NULL, // NvData
+ Pp2SnpGetStatus, // GetStatus
+ Pp2SnpTransmit, // Transmit
+ Pp2SnpReceive, // Receive
+ NULL, // WaitForPacket
+ NULL // Mode
+};
+
+EFI_SIMPLE_NETWORK_MODE Pp2SnpModeTemplate = {
+ EfiSimpleNetworkStopped, // State
+ NET_ETHER_ADDR_LEN, // HwAddressSize
+ sizeof (ETHER_HEAD), // MediaHeaderSize
+ EFI_PAGE_SIZE, // MaxPacketSize
+ 0, // NvRamSize
+ 0, // MvRamAccessSize
+ EFI_SIMPLE_NETWORK_RECEIVE_UNICAST |
+ EFI_SIMPLE_NETWORK_RECEIVE_MULTICAST |
+ EFI_SIMPLE_NETWORK_RECEIVE_BROADCAST |
+ EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS |
+ EFI_SIMPLE_NETWORK_RECEIVE_PROMISCUOUS_MULTICAST, // ReceiveFilterMask
+ EFI_SIMPLE_NETWORK_RECEIVE_UNICAST |
+ EFI_SIMPLE_NETWORK_RECEIVE_MULTICAST |
+ EFI_SIMPLE_NETWORK_RECEIVE_BROADCAST, // ReceiveFilterSetting
+ MAX_MCAST_FILTER_CNT, // MacMCastFilterCount
+ 0, // MCastFilterCount
+ {
+ { { 0 } }
+ }, // MCastFilter
+ {
+ { 0 }
+ }, // CurrentAddress
+ {
+ { 0 }
+ }, // BroadcastAddress
+ {
+ { 0 }
+ }, // Permanent Address
+ NET_IFTYPE_ETHERNET, // IfType
+ TRUE, // MacAddressChangeable
+ FALSE, // MultipleTxSupported
+ TRUE, // MediaPresentSupported
+ FALSE // MediaPresent
+};
+
+#define QueueNext(off) ((((off) + 1) >= QUEUE_DEPTH) ? 0 : ((off) + 1))
+
+STATIC
+EFI_STATUS
+QueueInsert (
+ IN PP2DXE_CONTEXT *Pp2Context,
+ IN VOID *Buffer
+ )
+{
+
+ if (QueueNext (Pp2Context->CompletionQueueTail) == Pp2Context->CompletionQueueHead) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Pp2Context->CompletionQueue[Pp2Context->CompletionQueueTail] = Buffer;
+ Pp2Context->CompletionQueueTail = QueueNext (Pp2Context->CompletionQueueTail);
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+VOID *
+QueueRemove (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ VOID *Buffer;
+
+ if (Pp2Context->CompletionQueueTail == Pp2Context->CompletionQueueHead) {
+ return NULL;
+ }
+
+ Buffer = Pp2Context->CompletionQueue[Pp2Context->CompletionQueueHead];
+ Pp2Context->CompletionQueue[Pp2Context->CompletionQueueHead] = NULL;
+ Pp2Context->CompletionQueueHead = QueueNext (Pp2Context->CompletionQueueHead);
+
+ return Buffer;
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeBmPoolInit (
+ MVPP2_SHARED *Mvpp2Shared
+ )
+{
+ INTN Index;
+ UINT8 *PoolAddr;
+ UINT32 PoolSize;
+ EFI_STATUS Status;
+
+ ASSERT(MVPP2_BM_POOL_PTR_ALIGN >= sizeof(UINTN));
+
+ PoolSize = (sizeof(VOID *) * MVPP2_BM_SIZE) * 2 + MVPP2_BM_POOL_PTR_ALIGN;
+
+ for (Index = 0; Index < MVPP2_BM_POOLS_NUM; Index++) {
+ /* BmIrqClear */
+ Mvpp2BmIrqClear(Mvpp2Shared, Index);
+ }
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2Shared->BmPools[Index] = AllocateZeroPool (sizeof(MVPP2_BMS_POOL));
+
+ if (Mvpp2Shared->BmPools[Index] == NULL) {
+ Status = EFI_OUT_OF_RESOURCES;
+ goto FreePools;
+ }
+
+ Status = DmaAllocateAlignedBuffer (EfiBootServicesData,
+ EFI_SIZE_TO_PAGES (PoolSize),
+ MVPP2_BM_POOL_PTR_ALIGN,
+ (VOID **)&PoolAddr);
+ if (EFI_ERROR (Status)) {
+ goto FreeBmPools;
+ }
+
+ ZeroMem (PoolAddr, PoolSize);
+
+ Mvpp2Shared->BmPools[Index]->Id = Index;
+ Mvpp2Shared->BmPools[Index]->VirtAddr = (UINT32 *)PoolAddr;
+ Mvpp2Shared->BmPools[Index]->PhysAddr = (UINTN)PoolAddr;
+
+ Mvpp2BmPoolHwCreate(Mvpp2Shared, Mvpp2Shared->BmPools[Index], MVPP2_BM_SIZE);
+ }
+
+ return EFI_SUCCESS;
+
+FreeBmPools:
+ FreePool (Mvpp2Shared->BmPools[Index]);
+FreePools:
+ while (Index-- >= 0) {
+ FreePool (Mvpp2Shared->BmPools[Index]);
+ DmaFreeBuffer (
+ EFI_SIZE_TO_PAGES (PoolSize),
+ Mvpp2Shared->BmPools[Index]->VirtAddr
+ );
+ }
+ return Status;
+}
+
+/* Enable and fill BM pool */
+STATIC
+EFI_STATUS
+Pp2DxeBmStart (
+ MVPP2_SHARED *Mvpp2Shared
+ )
+{
+ UINT8 *Buff, *BuffPhys;
+ INTN Index, Pool;
+
+ ASSERT(BM_ALIGN >= sizeof(UINTN));
+
+ for (Pool = 0; Pool < MVPP2_MAX_PORT; Pool++) {
+ Mvpp2BmPoolCtrl(Mvpp2Shared, Pool, MVPP2_START);
+ Mvpp2BmPoolBufsizeSet(Mvpp2Shared, Mvpp2Shared->BmPools[Pool], RX_BUFFER_SIZE);
+
+ /* Fill BM pool with Buffers */
+ for (Index = 0; Index < MVPP2_BM_SIZE; Index++) {
+ Buff = (UINT8 *)(Mvpp2Shared->BufferLocation.RxBuffers[Pool] + (Index * RX_BUFFER_SIZE));
+ if (Buff == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ BuffPhys = ALIGN_POINTER(Buff, BM_ALIGN);
+ Mvpp2BmPoolPut(Mvpp2Shared, Pool, (UINTN)BuffPhys, (UINTN)BuffPhys);
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+VOID
+Pp2DxeStartDev (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+
+ /* Config classifier decoding table */
+ Mvpp2ClsPortConfig(Port);
+ Mvpp2ClsOversizeRxqSet(Port);
+ MvGop110PortEventsMask(Port);
+ MvGop110PortEnable(Port);
+
+ /* Enable transmit and receive */
+ Mvpp2EgressEnable(Port);
+ Mvpp2IngressEnable(Port);
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeSetupRxqs (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ INTN Queue;
+ EFI_STATUS Status;
+ MVPP2_RX_QUEUE *Rxq;
+
+ for (Queue = 0; Queue < RxqNumber; Queue++) {
+ Rxq = &Pp2Context->Port.Rxqs[Queue];
+ Rxq->DescsPhys = (DmaAddrT)Rxq->Descs;
+ if (Rxq->Descs == NULL) {
+ Status = EFI_OUT_OF_RESOURCES;
+ goto ErrCleanup;
+ }
+
+ Mvpp2RxqHwInit(&Pp2Context->Port, Rxq);
+ }
+
+ return EFI_SUCCESS;
+
+ErrCleanup:
+ Mvpp2CleanupRxqs(&Pp2Context->Port);
+ return Status;
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeSetupTxqs (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ INTN Queue;
+ MVPP2_TX_QUEUE *Txq;
+ EFI_STATUS Status;
+
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ Txq = &Pp2Context->Port.Txqs[Queue];
+ Txq->DescsPhys = (DmaAddrT)Txq->Descs;
+ if (Txq->Descs == NULL) {
+ Status = EFI_OUT_OF_RESOURCES;
+ goto ErrCleanup;
+ }
+
+ Mvpp2TxqHwInit(&Pp2Context->Port, Txq);
+ }
+
+ return EFI_SUCCESS;
+
+ErrCleanup:
+ Mvpp2CleanupTxqs(&Pp2Context->Port);
+ return Status;
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeSetupAggrTxqs (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ MVPP2_TX_QUEUE *AggrTxq;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+
+ AggrTxq = Mvpp2Shared->AggrTxqs;
+ AggrTxq->DescsPhys = (DmaAddrT)AggrTxq->Descs;
+ if (AggrTxq->Descs == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Mvpp2AggrTxqHwInit(AggrTxq, AggrTxq->Size, 0, Mvpp2Shared);
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeOpen (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ UINT8 MacBcast[NET_ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ UINT8 DevAddr[NET_ETHER_ADDR_LEN];
+ INTN Ret;
+ EFI_STATUS Status;
+
+ CopyMem (DevAddr, Pp2Context->Snp.Mode->CurrentAddress.Addr, NET_ETHER_ADDR_LEN);
+
+ Ret = Mvpp2PrsMacDaAccept(Mvpp2Shared, Port->Id, MacBcast, TRUE);
+ if (Ret != 0) {
+ return EFI_DEVICE_ERROR;
+ }
+ Ret = Mvpp2PrsMacDaAccept(Mvpp2Shared, Port->Id, DevAddr, TRUE);
+ if (Ret != 0) {
+ return EFI_DEVICE_ERROR;
+ }
+ Ret = Mvpp2PrsTagModeSet(Mvpp2Shared, Port->Id, MVPP2_TAG_TYPE_MH);
+ if (Ret != 0) {
+ return EFI_DEVICE_ERROR;
+ }
+ Ret = Mvpp2PrsDefFlow(Port);
+ if (Ret != 0) {
+ return EFI_DEVICE_ERROR;
+ }
+
+ Status = Pp2DxeSetupRxqs(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ Status = Pp2DxeSetupTxqs(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ Status = Pp2DxeSetupAggrTxqs(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ Pp2DxeStartDev(Pp2Context);
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeLatePortInitialize (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ INTN Queue;
+
+ Port->TxRingSize = MVPP2_MAX_TXD;
+ Port->RxRingSize = MVPP2_MAX_RXD;
+
+ Mvpp2EgressDisable(Port);
+ MvGop110PortEventsMask(Port);
+ MvGop110PortDisable(Port);
+
+ Port->Txqs = AllocateZeroPool (sizeof(MVPP2_TX_QUEUE) * TxqNumber);
+ if (Port->Txqs == NULL) {
+ DEBUG((DEBUG_ERROR, "Failed to allocate Txqs\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ /* Use preallocated area */
+ Port->Txqs[0].Descs = Mvpp2Shared->BufferLocation.TxDescs[Port->Id];
+
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ MVPP2_TX_QUEUE *Txq = &Port->Txqs[Queue];
+
+ Txq->Id = Mvpp2TxqPhys(Port->Id, Queue);
+ Txq->LogId = Queue;
+ Txq->Size = Port->TxRingSize;
+ }
+
+ Port->Rxqs = AllocateZeroPool (sizeof(MVPP2_RX_QUEUE) * RxqNumber);
+ if (Port->Rxqs == NULL) {
+ DEBUG((DEBUG_ERROR, "Failed to allocate Rxqs\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Port->Rxqs[0].Descs = Mvpp2Shared->BufferLocation.RxDescs[Port->Id];
+
+ for (Queue = 0; Queue < TxqNumber; Queue++) {
+ MVPP2_RX_QUEUE *Rxq = &Port->Rxqs[Queue];
+
+ Rxq->Id = Queue + Port->FirstRxq;
+ Rxq->Size = Port->RxRingSize;
+ }
+
+ Mvpp2IngressDisable(Port);
+
+ Mvpp2DefaultsSet(Port);
+
+ return Pp2DxeOpen(Pp2Context);
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeLateInitialize (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ EFI_STATUS Status;
+
+ if (!Pp2Context->LateInitialized) {
+ /* Full init on first call */
+ Status = Pp2DxeLatePortInitialize(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: late initialization failed\n"));
+ return Status;
+ }
+
+ /* Attach pool to Rxq */
+ Mvpp2RxqLongPoolSet(Port, 0, Port->Id);
+ Mvpp2RxqShortPoolSet(Port, 0, Port->Id);
+
+ /*
+ * Mark this port being fully initialized,
+ * otherwise it will be inited again
+ * during next networking transaction,
+ * including memory allocatation for
+ * TX/RX queue, PHY connect/configuration
+ * and address decode configuration.
+ */
+ Pp2Context->LateInitialized = TRUE;
+ } else {
+ /* Upon all following calls, this is enough */
+ MvGop110PortEventsMask(Port);
+ MvGop110PortEnable(Port);
+ }
+ return 0;
+}
+
+EFI_STATUS
+Pp2DxePhyInitialize (
+ PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ EFI_STATUS Status;
+
+ Status = gBS->LocateProtocol (
+ &gMarvellPhyProtocolGuid,
+ NULL,
+ (VOID **) &Pp2Context->Phy
+ );
+
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ if (Pp2Context->Port.PhyIndex == 0xff) {
+ /* PHY iniitalization not required */
+ return EFI_SUCCESS;
+ }
+
+ Status = Pp2Context->Phy->Init(
+ Pp2Context->Phy,
+ Pp2Context->Port.PhyIndex,
+ Pp2Context->Port.PhyInterface,
+ &Pp2Context->PhyDev
+ );
+
+ if (EFI_ERROR(Status) && Status != EFI_TIMEOUT) {
+ return Status;
+ }
+
+ Pp2Context->Phy->Status(Pp2Context->Phy, Pp2Context->PhyDev);
+ Mvpp2SmiPhyAddrCfg(&Pp2Context->Port, Pp2Context->Port.GopIndex, Pp2Context->PhyDev->Addr);
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2DxeSnpInitialize (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINTN ExtraRxBufferSize OPTIONAL,
+ IN UINTN ExtraTxBufferSize OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ PP2DXE_CONTEXT *Pp2Context;
+ Pp2Context = INSTANCE_FROM_SNP(This);
+ UINT32 State = This->Mode->State;
+ EFI_TPL SavedTpl;
+
+ if (ExtraRxBufferSize != 0 || ExtraTxBufferSize != 0) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: non-zero buffer requests\n", Pp2Context->Instance));
+ return EFI_UNSUPPORTED;
+ }
+
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+
+ if (State != EfiSimpleNetworkStarted) {
+ switch (State) {
+ case EfiSimpleNetworkInitialized:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: already initialized\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_SUCCESS);
+ case EfiSimpleNetworkStopped:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: network stopped\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_NOT_STARTED);
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ /* Successfully started, change state to Initialized */
+ This->Mode->State = EfiSimpleNetworkInitialized;
+
+ if (Pp2Context->Initialized) {
+ ReturnUnlock(SavedTpl, EFI_SUCCESS);
+ }
+
+ Pp2Context->Initialized = TRUE;
+
+ Status = Pp2DxePhyInitialize(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ ReturnUnlock (SavedTpl, Status);
+ }
+
+ Status = Pp2DxeLateInitialize(Pp2Context);
+ ReturnUnlock (SavedTpl, Status);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpStart (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ )
+{
+ PP2DXE_CONTEXT *Pp2Context;
+ UINT32 State = This->Mode->State;
+ EFI_TPL SavedTpl;
+
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+ Pp2Context = INSTANCE_FROM_SNP(This);
+
+ if (State != EfiSimpleNetworkStopped) {
+ switch (State) {
+ case EfiSimpleNetworkStarted:
+ case EfiSimpleNetworkInitialized:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: already initialized\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_ALREADY_STARTED);
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ This->Mode->State = EfiSimpleNetworkStarted;
+ ReturnUnlock (SavedTpl, EFI_SUCCESS);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpStop (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ )
+{
+ EFI_TPL SavedTpl;
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(This);
+ UINT32 State = This->Mode->State;
+
+ if (State != EfiSimpleNetworkStarted && State != EfiSimpleNetworkInitialized) {
+ switch (State) {
+ case EfiSimpleNetworkStopped:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: not started\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_NOT_STARTED);
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ This->Mode->State = EfiSimpleNetworkStopped;
+ ReturnUnlock (SavedTpl, EFI_SUCCESS);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReset (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN ExtendedVerification
+ )
+{
+ return EFI_SUCCESS;
+}
+
+VOID
+EFIAPI
+Pp2DxeHalt (
+ IN EFI_EVENT Event,
+ IN VOID *Context
+ )
+{
+ PP2DXE_CONTEXT *Pp2Context = Context;
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ STATIC BOOLEAN CommonPartHalted = FALSE;
+ INTN Index;
+
+ if (!CommonPartHalted) {
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2BmStop(Mvpp2Shared, Index);
+ }
+
+ CommonPartHalted = TRUE;
+ }
+
+ Mvpp2TxqDrainSet(Port, 0, TRUE);
+ Mvpp2IngressDisable(Port);
+ Mvpp2EgressDisable(Port);
+
+ MvGop110PortEventsMask(Port);
+ MvGop110PortDisable(Port);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpShutdown (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ )
+{
+ EFI_TPL SavedTpl;
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(This);
+ UINT32 State = This->Mode->State;
+
+ if (State != EfiSimpleNetworkInitialized) {
+ switch (State) {
+ case EfiSimpleNetworkStopped:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: not started\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_NOT_STARTED);
+ case EfiSimpleNetworkStarted:
+ /* Fall through */
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ ReturnUnlock (SavedTpl, EFI_SUCCESS);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReceiveFilters (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINT32 Enable,
+ IN UINT32 Disable,
+ IN BOOLEAN ResetMCastFilter,
+ IN UINTN MCastFilterCnt OPTIONAL,
+ IN EFI_MAC_ADDRESS *MCastFilter OPTIONAL
+ )
+{
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpStationAddress (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *Snp,
+ IN BOOLEAN Reset,
+ IN EFI_MAC_ADDRESS *NewMac
+)
+{
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(Snp);
+ PP2_DEVICE_PATH *Pp2DevicePath = Pp2Context->DevicePath;
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ UINT32 State = Snp->Mode->State;
+ EFI_TPL SavedTpl;
+ INTN Ret;
+
+ /* Check Snp instance */
+ ASSERT(Snp != NULL);
+
+ /* Serialize access to data and registers */
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+
+ /* Check that driver was started and initialised */
+ if (State != EfiSimpleNetworkInitialized) {
+ switch (State) {
+ case EfiSimpleNetworkStopped:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: not started\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_NOT_STARTED);
+ case EfiSimpleNetworkStarted:
+ /* Fall through */
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ /* Invalidate old unicast address in parser */
+ Ret = Mvpp2PrsMacDaAccept(Mvpp2Shared, Port->Id, Snp->Mode->CurrentAddress.Addr, FALSE);
+ if (Ret != 0) {
+ DEBUG((DEBUG_ERROR, "Pp2SnpStationAddress - Fail\n"));
+ return EFI_DEVICE_ERROR;
+ }
+
+ if (Reset) {
+ CopyMem (Snp->Mode->CurrentAddress.Addr, Snp->Mode->PermanentAddress.Addr, NET_ETHER_ADDR_LEN);
+ CopyMem (NewMac->Addr, Snp->Mode->PermanentAddress.Addr, NET_ETHER_ADDR_LEN);
+ CopyMem (Pp2DevicePath->Pp2Mac.MacAddress.Addr, Snp->Mode->PermanentAddress.Addr, NET_ETHER_ADDR_LEN);
+ } else {
+ if (NewMac == NULL) {
+ ReturnUnlock (SavedTpl, EFI_INVALID_PARAMETER);
+ }
+ CopyMem (Snp->Mode->CurrentAddress.Addr, NewMac->Addr, NET_ETHER_ADDR_LEN);
+ CopyMem (Pp2DevicePath->Pp2Mac.MacAddress.Addr, NewMac->Addr, NET_ETHER_ADDR_LEN);
+ }
+
+ /* Update parser with new unicast address */
+ Ret = Mvpp2PrsMacDaAccept(Mvpp2Shared, Port->Id, Snp->Mode->CurrentAddress.Addr, TRUE);
+ if (Ret != 0) {
+ DEBUG((DEBUG_ERROR, "Pp2SnpStationAddress - Fail\n"));
+ return EFI_DEVICE_ERROR;
+ }
+
+ /* Restore TPL and return */
+ gBS->RestoreTPL (SavedTpl);
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpNetStat (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN Reset,
+ IN OUT UINTN *StatisticsSize OPTIONAL,
+ OUT EFI_NETWORK_STATISTICS *StatisticsTable OPTIONAL
+ )
+{
+ return EFI_UNSUPPORTED;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpIpToMac (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN IPv6,
+ IN EFI_IP_ADDRESS *IP,
+ OUT EFI_MAC_ADDRESS *MAC
+ )
+{
+ return EFI_UNSUPPORTED;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpNvData (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN ReadWrite,
+ IN UINTN Offset,
+ IN UINTN BufferSize,
+ IN OUT VOID *Buffer
+ )
+{
+ return EFI_UNSUPPORTED;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpGetStatus (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *Snp,
+ OUT UINT32 *InterruptStatus OPTIONAL,
+ OUT VOID **TxBuf OPTIONAL
+ )
+{
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(Snp);
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ BOOLEAN LinkUp;
+ EFI_TPL SavedTpl;
+
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+
+ if (!Pp2Context->Initialized)
+ ReturnUnlock(SavedTpl, EFI_NOT_READY);
+
+ LinkUp = Port->AlwaysUp ? TRUE : MvGop110PortIsLinkUp(Port);
+
+ if (LinkUp != Snp->Mode->MediaPresent) {
+ DEBUG((DEBUG_INFO, "Pp2Dxe%d: Link ", Pp2Context->Instance));
+ DEBUG((DEBUG_INFO, LinkUp ? "up\n" : "down\n"));
+ }
+ Snp->Mode->MediaPresent = LinkUp;
+
+ if (TxBuf != NULL) {
+ *TxBuf = QueueRemove (Pp2Context);
+ }
+
+ ReturnUnlock(SavedTpl, EFI_SUCCESS);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpTransmit (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINTN HeaderSize,
+ IN UINTN BufferSize,
+ IN VOID *Buffer,
+ IN EFI_MAC_ADDRESS *SrcAddr OPTIONAL,
+ IN EFI_MAC_ADDRESS *DestAddr OPTIONAL,
+ IN UINT16 *EtherTypePtr OPTIONAL
+ )
+{
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(This);
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ MVPP2_TX_QUEUE *AggrTxq = Mvpp2Shared->AggrTxqs;
+ MVPP2_TX_DESC *TxDesc;
+ EFI_STATUS Status;
+ INTN PollingCount;
+ INTN TxSent;
+ UINT8 *DataPtr = Buffer;
+ UINT16 EtherType;
+ UINT32 State = This->Mode->State;
+ EFI_TPL SavedTpl;
+
+ if (This == NULL || Buffer == NULL) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: NULL Snp or Buffer\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (HeaderSize != 0) {
+ ASSERT (HeaderSize == This->Mode->MediaHeaderSize);
+ ASSERT (EtherTypePtr != NULL);
+ ASSERT (DestAddr != NULL);
+ }
+
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+
+ /* Check that driver was started and initialised */
+ if (State != EfiSimpleNetworkInitialized) {
+ switch (State) {
+ case EfiSimpleNetworkStopped:
+ DEBUG((DEBUG_WARN, "Pp2Dxe%d: not started\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_NOT_STARTED);
+ case EfiSimpleNetworkStarted:
+ /* Fall through */
+ default:
+ DEBUG((DEBUG_ERROR, "Pp2Dxe%d: wrong state\n", Pp2Context->Instance));
+ ReturnUnlock (SavedTpl, EFI_DEVICE_ERROR);
+ }
+ }
+
+ if (!This->Mode->MediaPresent) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: link not ready\n"));
+ ReturnUnlock(SavedTpl, EFI_NOT_READY);
+ }
+
+ EtherType = HTONS (*EtherTypePtr);
+
+ /* Fetch next descriptor */
+ TxDesc = Mvpp2TxqNextDescGet(AggrTxq);
+
+ if (!TxDesc) {
+ DEBUG((DEBUG_ERROR, "No tx descriptor to use\n"));
+ ReturnUnlock(SavedTpl, EFI_OUT_OF_RESOURCES);
+ }
+
+ if (HeaderSize != 0) {
+ CopyMem(DataPtr, DestAddr, NET_ETHER_ADDR_LEN);
+
+ if (SrcAddr != NULL)
+ CopyMem(DataPtr + NET_ETHER_ADDR_LEN, SrcAddr, NET_ETHER_ADDR_LEN);
+ else
+ CopyMem(DataPtr + NET_ETHER_ADDR_LEN, &This->Mode->CurrentAddress, NET_ETHER_ADDR_LEN);
+
+ CopyMem(DataPtr + NET_ETHER_ADDR_LEN * 2, &EtherType, 2);
+ }
+
+ /* Set descriptor fields */
+ TxDesc->command = MVPP2_TXD_IP_CSUM_DISABLE | MVPP2_TXD_L4_CSUM_NOT |
+ MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+ TxDesc->DataSize = BufferSize;
+ TxDesc->PacketOffset = (PhysAddrT)DataPtr & MVPP2_TX_DESC_ALIGN;
+ Mvpp2x2TxdescPhysAddrSet((PhysAddrT)DataPtr & ~MVPP2_TX_DESC_ALIGN, TxDesc);
+ TxDesc->PhysTxq = Mvpp2TxqPhys(Port->Id, 0);
+
+ InvalidateDataCacheRange (DataPtr, BufferSize);
+
+ /* Issue send */
+ Mvpp2AggrTxqPendDescAdd(Port, 1);
+
+ /*
+ * Egress processing:
+ * Wait until packet is passed from per-cpu aggregated queue
+ * to physical per-port TXQ.
+ */
+ PollingCount = 0;
+ TxSent = Mvpp2AggrTxqPendDescNumGet(Mvpp2Shared, 0);
+ do {
+ if (PollingCount++ > MVPP2_TX_SEND_MAX_POLLING_COUNT) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: transmit polling failed\n"));
+ ReturnUnlock(SavedTpl, EFI_TIMEOUT);
+ }
+ TxSent = Mvpp2AggrTxqPendDescNumGet(Mvpp2Shared, 0);
+ } while (TxSent);
+
+ /* Wait for packet to be transmitted by hardware. */
+ PollingCount = 0;
+ TxSent = Mvpp2TxqSentDescProc(Port, &Port->Txqs[0]);
+ while (!TxSent) {
+ if (PollingCount++ > MVPP2_TX_SEND_MAX_POLLING_COUNT) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: transmit polling failed\n"));
+ ReturnUnlock(SavedTpl, EFI_TIMEOUT);
+ }
+ TxSent = Mvpp2TxqSentDescProc(Port, &Port->Txqs[0]);
+ }
+
+ /*
+ * At this point TxSent has increased - HW sent the packet
+ * Add buffer to completion queue and return.
+ */
+ Status = QueueInsert (Pp2Context, Buffer);
+ ReturnUnlock (SavedTpl, Status);
+}
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReceive (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ OUT UINTN *HeaderSize OPTIONAL,
+ IN OUT UINTN *BufferSize,
+ OUT VOID *Buffer,
+ OUT EFI_MAC_ADDRESS *SrcAddr OPTIONAL,
+ OUT EFI_MAC_ADDRESS *DstAddr OPTIONAL,
+ OUT UINT16 *EtherType OPTIONAL
+ )
+{
+ INTN ReceivedPackets;
+ PP2DXE_CONTEXT *Pp2Context = INSTANCE_FROM_SNP(This);
+ PP2DXE_PORT *Port = &Pp2Context->Port;
+ MVPP2_SHARED *Mvpp2Shared = Pp2Context->Port.Priv;
+ UINTN PhysAddr, VirtAddr;
+ EFI_STATUS Status = EFI_SUCCESS;
+ EFI_TPL SavedTpl;
+ UINT32 StatusReg;
+ INTN PoolId;
+ UINTN PktLength;
+ UINT8 *DataPtr;
+ MVPP2_RX_DESC *RxDesc;
+ MVPP2_RX_QUEUE *Rxq = &Port->Rxqs[0];
+
+ ASSERT (Port != NULL);
+ ASSERT (Rxq != NULL);
+
+ SavedTpl = gBS->RaiseTPL (TPL_CALLBACK);
+ ReceivedPackets = Mvpp2RxqReceived(Port, Rxq->Id);
+
+ if (ReceivedPackets == 0) {
+ ReturnUnlock(SavedTpl, EFI_NOT_READY);
+ }
+
+ /* Process one packet per call */
+ RxDesc = Mvpp2RxqNextDescGet(Rxq);
+ StatusReg = RxDesc->status;
+
+ /* extract addresses from descriptor */
+ PhysAddr = RxDesc->BufPhysAddrKeyHash & MVPP22_ADDR_MASK;
+ VirtAddr = RxDesc->BufCookieBmQsetClsInfo & MVPP22_ADDR_MASK;
+
+ /* Drop packets with error or with buffer header (MC, SG) */
+ if ((StatusReg & MVPP2_RXD_BUF_HDR) || (StatusReg & MVPP2_RXD_ERR_SUMMARY)) {
+ DEBUG((DEBUG_WARN, "Pp2Dxe: dropping packet\n"));
+ Status = EFI_DEVICE_ERROR;
+ goto drop;
+ }
+
+ PktLength = (UINTN) RxDesc->DataSize - 2;
+ if (PktLength > *BufferSize) {
+ *BufferSize = PktLength;
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: buffer too small\n"));
+ ReturnUnlock(SavedTpl, EFI_BUFFER_TOO_SMALL);
+ }
+
+ CopyMem (Buffer, (VOID*) (PhysAddr + 2), PktLength);
+ *BufferSize = PktLength;
+
+ if (HeaderSize != NULL) {
+ *HeaderSize = Pp2Context->Snp.Mode->MediaHeaderSize;
+ }
+
+ DataPtr = Buffer;
+
+ /* Extract the destination address */
+ if (DstAddr != NULL) {
+ ZeroMem (DstAddr, sizeof(EFI_MAC_ADDRESS));
+ CopyMem (DstAddr, &DataPtr[0], NET_ETHER_ADDR_LEN);
+ }
+
+ /* Get the source address */
+ if (SrcAddr != NULL) {
+ ZeroMem (SrcAddr, sizeof(EFI_MAC_ADDRESS));
+ CopyMem (SrcAddr, &DataPtr[6], NET_ETHER_ADDR_LEN);
+ }
+
+ /* Obtain Ether Type */
+ if (EtherType != NULL) {
+ *EtherType = NTOHS (*(UINT16 *)(&DataPtr[12]));
+ }
+
+drop:
+ /* Refill: pass packet back to BM */
+ PoolId = (StatusReg & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS;
+ Mvpp2BmPoolPut(Mvpp2Shared, PoolId, PhysAddr, VirtAddr);
+
+ /* Update counters with 1 packet received and 1 packet refilled */
+ Mvpp2RxqStatusUpdate(Port, Rxq->Id, 1, 1);
+
+ ReturnUnlock(SavedTpl, Status);
+}
+
+EFI_STATUS
+Pp2DxeSnpInstall (
+ IN PP2DXE_CONTEXT *Pp2Context
+ )
+{
+ EFI_HANDLE Handle = NULL;
+ EFI_STATUS Status;
+ PP2_DEVICE_PATH *Pp2DevicePath;
+ EFI_SIMPLE_NETWORK_MODE *SnpMode;
+
+ Pp2DevicePath = AllocateCopyPool (sizeof (PP2_DEVICE_PATH), &Pp2DevicePathTemplate);
+ if (Pp2DevicePath == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ SnpMode = AllocateZeroPool (sizeof (EFI_SIMPLE_NETWORK_MODE));
+ if (SnpMode == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ /* Copy SNP data from templates */
+ CopyMem (&Pp2Context->Snp, &Pp2SnpTemplate, sizeof (EFI_SIMPLE_NETWORK_PROTOCOL));
+ CopyMem (SnpMode, &Pp2SnpModeTemplate, sizeof (EFI_SIMPLE_NETWORK_MODE));
+
+ /* Handle device path of the controller */
+ Pp2DevicePath->Pp2Mac.MacAddress.Addr[5] = Pp2Context->Instance + 1;
+ Pp2Context->Signature = PP2DXE_SIGNATURE;
+ Pp2Context->DevicePath = Pp2DevicePath;
+ Pp2DevicePath->Pp2Mac.IfType = SnpMode->IfType;
+
+ /* Update SNP Mode */
+ CopyMem (SnpMode->CurrentAddress.Addr, Pp2DevicePath->Pp2Mac.MacAddress.Addr, NET_ETHER_ADDR_LEN);
+ CopyMem (SnpMode->PermanentAddress.Addr, Pp2DevicePath->Pp2Mac.MacAddress.Addr, NET_ETHER_ADDR_LEN);
+ ZeroMem (&SnpMode->MCastFilter, MAX_MCAST_FILTER_CNT * sizeof(EFI_MAC_ADDRESS));
+ SetMem (&SnpMode->BroadcastAddress, sizeof (EFI_MAC_ADDRESS), 0xFF);
+
+ Pp2Context->Snp.Mode = SnpMode;
+
+ /* Install protocol */
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gEfiSimpleNetworkProtocolGuid, &Pp2Context->Snp,
+ &gEfiDevicePathProtocolGuid, Pp2DevicePath,
+ NULL
+ );
+
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Failed to install protocols.\n"));
+ }
+
+ return Status;
+}
+
+STATIC
+VOID
+Pp2DxeParsePortPcd (
+ IN PP2DXE_CONTEXT *Pp2Context,
+ IN INTN Index
+ )
+{
+ UINT8 *PortIds, *GopIndexes, *PhyConnectionTypes, *AlwaysUp, *Speed, *PhyIndexes;
+
+ PortIds = PcdGetPtr (PcdPp2PortIds);
+ GopIndexes = PcdGetPtr (PcdPp2GopIndexes);
+ PhyConnectionTypes = PcdGetPtr (PcdPp2PhyConnectionTypes);
+ PhyIndexes = PcdGetPtr (PcdPp2PhyIndexes);
+ AlwaysUp = PcdGetPtr (PcdPp2InterfaceAlwaysUp);
+ Speed = PcdGetPtr (PcdPp2InterfaceSpeed);
+
+ ASSERT (PcdGetSize (PcdPp2GopIndexes) == PcdGetSize (PcdPp2PortIds));
+ ASSERT (PcdGetSize (PcdPp2PhyConnectionTypes) == PcdGetSize (PcdPp2PortIds));
+ ASSERT (PcdGetSize (PcdPp2InterfaceAlwaysUp) == PcdGetSize (PcdPp2PortIds));
+ ASSERT (PcdGetSize (PcdPp2InterfaceSpeed) == PcdGetSize (PcdPp2PortIds));
+ ASSERT (PcdGetSize (PcdPp2PhyIndexes) == PcdGetSize (PcdPp2PortIds));
+
+ Pp2Context->Port.Id = PortIds[Index];
+ Pp2Context->Port.GopIndex = GopIndexes[Index];
+ Pp2Context->Port.PhyInterface = PhyConnectionTypes[Index];
+ Pp2Context->Port.PhyIndex = PhyIndexes[Index];
+ Pp2Context->Port.AlwaysUp = AlwaysUp[Index];
+ Pp2Context->Port.Speed = Speed[Index];
+}
+
+STATIC
+EFI_STATUS
+Pp2DxeInitialiseController (
+ IN UINT8 ControllerIndex,
+ IN MVPP2_SHARED *Mvpp2Shared,
+ IN UINTN BaseAddress,
+ IN UINTN ClockFrequency
+ )
+{
+ PP2DXE_CONTEXT *Pp2Context = NULL;
+ EFI_STATUS Status;
+ INTN Index;
+ INTN PortIndex = 0;
+ VOID *BufferSpace;
+ UINT32 NetCompConfig = 0;
+ STATIC UINT8 DeviceInstance;
+ UINT8 *Pp2PortMappingTable;
+
+ Mvpp2Shared->Base = BaseAddress;
+ Mvpp2Shared->Rfu1Base = Mvpp2Shared->Base + MVPP22_RFU1_OFFSET;
+ Mvpp2Shared->XpcsBase = Mvpp2Shared->Base + MVPP22_XPCS_OFFSET;
+ Mvpp2Shared->MpcsBase = Mvpp2Shared->Base + MVPP22_MPCS_OFFSET;
+ Mvpp2Shared->SmiBase = Mvpp2Shared->Base + MVPP22_SMI_OFFSET;
+ Mvpp2Shared->Tclk = ClockFrequency;
+
+ /* Prepare buffers */
+ Status = DmaAllocateAlignedBuffer (EfiBootServicesData,
+ EFI_SIZE_TO_PAGES (BD_SPACE),
+ MVPP2_BUFFER_ALIGN_SIZE,
+ &BufferSpace);
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "Failed to allocate buffer space\n"));
+ return Status;
+ }
+
+ ZeroMem (BufferSpace, BD_SPACE);
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2Shared->BufferLocation.TxDescs[Index] = (MVPP2_TX_DESC *)
+ (BufferSpace + Index * MVPP2_MAX_TXD * sizeof(MVPP2_TX_DESC));
+ }
+
+ Mvpp2Shared->BufferLocation.AggrTxDescs = (MVPP2_TX_DESC *)
+ ((UINTN)BufferSpace + MVPP2_MAX_TXD * MVPP2_MAX_PORT * sizeof(MVPP2_TX_DESC));
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2Shared->BufferLocation.RxDescs[Index] = (MVPP2_RX_DESC *)
+ ((UINTN)BufferSpace + (MVPP2_MAX_TXD * MVPP2_MAX_PORT + MVPP2_AGGR_TXQ_SIZE) *
+ sizeof(MVPP2_TX_DESC) + Index * MVPP2_MAX_RXD * sizeof(MVPP2_RX_DESC));
+ }
+
+ for (Index = 0; Index < MVPP2_MAX_PORT; Index++) {
+ Mvpp2Shared->BufferLocation.RxBuffers[Index] = (DmaAddrT)
+ (BufferSpace + (MVPP2_MAX_TXD * MVPP2_MAX_PORT + MVPP2_AGGR_TXQ_SIZE) *
+ sizeof(MVPP2_TX_DESC) + MVPP2_MAX_RXD * MVPP2_MAX_PORT * sizeof(MVPP2_RX_DESC) +
+ Index * MVPP2_BM_SIZE * RX_BUFFER_SIZE);
+ }
+
+ /* Initialize HW */
+ Mvpp2AxiConfig(Mvpp2Shared);
+ Pp2DxeBmPoolInit (Mvpp2Shared);
+ Mvpp2RxFifoInit(Mvpp2Shared);
+
+ Mvpp2Shared->PrsShadow = AllocateZeroPool (sizeof(MVPP2_PRS_SHADOW) * MVPP2_PRS_TCAM_SRAM_SIZE);
+ if (Mvpp2Shared->PrsShadow == NULL) {
+ DEBUG((DEBUG_ERROR, "Failed to allocate PrsShadow\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Status = Mvpp2PrsDefaultInit(Mvpp2Shared);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Failed to intialize prs\n"));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Mvpp2ClsInit(Mvpp2Shared);
+
+ Status = Pp2DxeBmStart (Mvpp2Shared);
+ if (EFI_ERROR(Status)) {
+ DEBUG((DEBUG_ERROR, "Pp2Dxe: BM start error\n"));
+ return Status;
+ }
+
+ /* Initialize aggregated transmit queues */
+ Mvpp2Shared->AggrTxqs = AllocateZeroPool (sizeof(MVPP2_TX_QUEUE));
+ if (Mvpp2Shared->AggrTxqs == NULL) {
+ DEBUG((DEBUG_ERROR, "Failed to allocate aggregated Txqs\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Mvpp2Shared->AggrTxqs->Descs = Mvpp2Shared->BufferLocation.AggrTxDescs;
+ Mvpp2Shared->AggrTxqs->Id = 0;
+ Mvpp2Shared->AggrTxqs->LogId = 0;
+ Mvpp2Shared->AggrTxqs->Size = MVPP2_AGGR_TXQ_SIZE;
+
+ Pp2PortMappingTable = (UINT8 *)PcdGetPtr (PcdPp2Port2Controller);
+
+ for (Index = 0; Index < PcdGetSize (PcdPp2Port2Controller); Index++) {
+ if (Pp2PortMappingTable[Index] != ControllerIndex) {
+ continue;
+ }
+
+ if (PortIndex++ > MVPP2_MAX_PORT) {
+ DEBUG ((DEBUG_ERROR, "Pp2Dxe: Wrong too many ports for single controller\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ Pp2Context = AllocateZeroPool (sizeof (PP2DXE_CONTEXT));
+ if (Pp2Context == NULL) {
+ /*
+ * If allocation fails, all resources allocated before will get freed
+ * at ExitBootServices, as only EfiBootServicesData is used.
+ */
+ DEBUG((DEBUG_ERROR, "Allocation fail.\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ /* Instances are enumerated from 0 */
+ Pp2Context->Instance = DeviceInstance;
+ DeviceInstance++;
+
+ /* Install SNP protocol */
+ Status = Pp2DxeSnpInstall(Pp2Context);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ Pp2DxeParsePortPcd(Pp2Context, Index);
+ Pp2Context->Port.TxpNum = 1;
+ Pp2Context->Port.Priv = Mvpp2Shared;
+ Pp2Context->Port.FirstRxq = 4 * (PortIndex - 1);
+ Pp2Context->Port.GmacBase = Mvpp2Shared->Base + MVPP22_GMAC_OFFSET +
+ MVPP22_GMAC_REG_SIZE * Pp2Context->Port.GopIndex;
+ Pp2Context->Port.XlgBase = Mvpp2Shared->Base + MVPP22_XLG_OFFSET +
+ MVPP22_XLG_REG_SIZE * Pp2Context->Port.GopIndex;
+
+ /* Gather accumulated configuration data of all ports' MAC's */
+ NetCompConfig |= MvpPp2xGop110NetcCfgCreate(&Pp2Context->Port);
+
+ MvGop110PortInit(&Pp2Context->Port);
+
+ if (Pp2Context->Port.AlwaysUp == TRUE) {
+ MvGop110GmacForceLinkUp (&Pp2Context->Port);
+ MvGop110FlCfg (&Pp2Context->Port);
+ }
+
+ Status = gBS->CreateEvent (
+ EVT_SIGNAL_EXIT_BOOT_SERVICES,
+ TPL_NOTIFY,
+ Pp2DxeHalt,
+ Pp2Context,
+ &Pp2Context->EfiExitBootServicesEvent
+ );
+
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+ }
+
+ MvGop110NetcInit(&Pp2Context->Port, NetCompConfig, MV_NETC_FIRST_PHASE);
+ MvGop110NetcInit(&Pp2Context->Port, NetCompConfig, MV_NETC_SECOND_PHASE);
+
+ return EFI_SUCCESS;
+}
+
+EFI_STATUS
+EFIAPI
+Pp2DxeInitialise (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ MVHW_PP2_DESC *Desc = &mA7k8kPp2DescTemplate;
+ UINT8 *Pp2DeviceTable, Index;
+ MVPP2_SHARED *Mvpp2Shared;
+ EFI_STATUS Status;
+
+ /* Obtain table with enabled Pp2 devices */
+ Pp2DeviceTable = (UINT8 *)PcdGetPtr (PcdPp2Controllers);
+ if (Pp2DeviceTable == NULL) {
+ DEBUG ((DEBUG_ERROR, "Missing PcdPp2Controllers\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (PcdGetSize (PcdPp2Controllers) > MVHW_MAX_PP2_DEVS) {
+ DEBUG ((DEBUG_ERROR, "Wrong PcdPp2Controllers format\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ /* Check amount of declared ports */
+ if (PcdGetSize (PcdPp2Port2Controller) > Desc->Pp2DevCount * MVPP2_MAX_PORT) {
+ DEBUG ((DEBUG_ERROR, "Pp2Dxe: Wrong too many ports declared\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ /* Initialize enabled chips */
+ for (Index = 0; Index < PcdGetSize (PcdPp2Controllers); Index++) {
+ if (!MVHW_DEV_ENABLED (Pp2, Index)) {
+ DEBUG ((DEBUG_ERROR, "Skip Pp2 controller %d\n", Index));
+ continue;
+ }
+
+ /* Initialize private data */
+ Mvpp2Shared = AllocateZeroPool (sizeof (MVPP2_SHARED));
+ if (Mvpp2Shared == NULL) {
+ DEBUG ((DEBUG_ERROR, "Pp2Dxe #%d: Mvpp2Shared allocation fail\n", Index));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Status = Pp2DxeInitialiseController (
+ Index,
+ Mvpp2Shared,
+ Desc->Pp2BaseAddresses[Index],
+ Desc->Pp2ClockFrequency[Index]
+ );
+ if (EFI_ERROR(Status)) {
+ FreePool (Mvpp2Shared);
+ DEBUG ((DEBUG_ERROR, "Pp2Dxe #%d: Controller initialisation fail\n", Index));
+ return Status;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h
new file mode 100644
index 0000000000..60f40be1f5
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.h
@@ -0,0 +1,622 @@
+/********************************************************************************
+Copyright (C) 2016 Marvell International Ltd.
+
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Marvell nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __PP2_DXE_H__
+#define __PP2_DXE_H__
+
+#include <Protocol/Cpu.h>
+#include <Protocol/DevicePath.h>
+#include <Protocol/DriverBinding.h>
+#include <Protocol/Ip4.h>
+#include <Protocol/Ip6.h>
+#include <Protocol/MvPhy.h>
+#include <Protocol/SimpleNetwork.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/DmaLib.h>
+#include <Library/IoLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/NetLib.h>
+#include <Library/PcdLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiLib.h>
+
+#include "Mvpp2LibHw.h"
+
+#define MVPP2_MAX_PORT 3
+
+#define PP2DXE_SIGNATURE SIGNATURE_32('P', 'P', '2', 'D')
+#define INSTANCE_FROM_SNP(a) CR((a), PP2DXE_CONTEXT, Snp, PP2DXE_SIGNATURE)
+
+/* OS API */
+#define Mvpp2Alloc(v) AllocateZeroPool(v)
+#define Mvpp2Free(p) FreePool(p)
+#define Mvpp2Memset(a, v, s) SetMem((a), (s), (v))
+#define Mvpp2Mdelay(t) gBS->Stall((t) * 1000)
+#define Mvpp2Fls(v) 1
+#define Mvpp2IsBroadcastEtherAddr(da) 1
+#define Mvpp2IsMulticastEtherAddr(da) 1
+#define Mvpp2Prefetch(v) do {} while(0);
+#define Mvpp2Printf(...) do {} while(0);
+#define Mvpp2SwapVariables(a,b) do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#define Mvpp2SwapBytes16(x) SwapBytes16((x))
+#define Mvpp2Iphdr EFI_IP4_HEADER
+#define Mvpp2Ipv6hdr EFI_IP6_HEADER
+#define MVPP2_ALIGN(x, m) ALIGN_VALUE((x), (m))
+#define MVPP2_ENOMEM -1
+#define MVPP2_EINVAL -2
+#define MVPP2_ERANGE -3
+#define MVPP2_USEC_PER_SEC 1000000L
+
+#define DmaAddrT UINTN
+#define PhysAddrT UINTN
+
+#define Upper32Bits(n) ((UINT32)(((n) >> 16) >> 16))
+#define Lower32Bits(n) ((UINT32)(n))
+
+#define ARCH_DMA_MINALIGN 64
+
+/* Port speeds */
+#define MV_PORT_SPEED_10 SPEED_10
+#define MV_PORT_SPEED_100 SPEED_100
+#define MV_PORT_SPEED_1000 SPEED_1000
+#define MV_PORT_SPEED_2500 SPEED_2500
+#define MV_PORT_SPEED_10000 SPEED_10000
+
+/* L2 and L3 protocol macros */
+#define MV_IPPR_TCP 0
+#define MV_IPPR_UDP 1
+#define MV_IPPR_IPIP 2
+#define MV_IPPR_ICMPV6 3
+#define MV_IPPR_IGMP 4
+#define MV_ETH_P_IP 5
+#define MV_ETH_P_IPV6 6
+#define MV_ETH_P_PPP_SES 7
+#define MV_ETH_P_ARP 8
+#define MV_ETH_P_8021Q 9
+#define MV_ETH_P_8021AD 10
+#define MV_ETH_P_EDSA 11
+#define MV_PPP_IP 12
+#define MV_PPP_IPV6 13
+#define MV_ETH_ALEN NET_ETHER_ADDR_LEN
+
+/* PHY modes */
+#define MV_MODE_SGMII PHY_CONNECTION_SGMII
+#define MV_MODE_RGMII PHY_CONNECTION_RGMII
+#define MV_MODE_XAUI PHY_CONNECTION_XAUI
+#define MV_MODE_RXAUI PHY_CONNECTION_RXAUI
+#define MV_MODE_SFI PHY_CONNECTION_SFI
+#define MV_MODE_QSGMII 100
+#define PP2DXE_MAX_PHY 2
+
+/* Gop */
+/* Sets the field located at the specified in data */
+#define U32_SET_FIELD(data, mask, val) ((data) = (((data) & ~(mask)) | (val)))
+#define MV_RGMII_TX_FIFO_MIN_TH 0x41
+#define MV_SGMII_TX_FIFO_MIN_TH 0x5
+#define MV_SGMII2_5_TX_FIFO_MIN_TH 0xB
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM 8
+#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_SHORT_BUF_NUM 2048
+#define MVPP2_BM_POOL_SIZE_MAX (SIZE_16KB - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN 128
+#define MVPP2_BM_SWF_LONG_POOL(Port) ((Port > 2) ? 2 : Port)
+#define MVPP2_BM_SWF_SHORT_POOL 3
+#define MVPP2_BM_POOL 0
+#define MVPP2_BM_SIZE 64
+
+/*
+ * BM short pool packet Size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE 512
+
+/*
+ * Page table entries are set to 1MB, or multiples of 1MB
+ * (not < 1MB). driver uses less bd's so use 1MB bdspace.
+ */
+#define BD_SPACE (1 << 20)
+
+/* Buffer has to be aligned to 1M */
+#define MVPP2_BUFFER_ALIGN_SIZE (1 << 20)
+
+/* RX constants */
+#define RX_BUFFER_SIZE (ALIGN_VALUE(MTU + WRAP, ARCH_DMA_MINALIGN))
+#define MVPP2_RXQ_OFFSET 0
+#define BUFF_HDR_OFFS 32
+#define BM_ALIGN 32
+#define ETH_HLEN 14
+
+/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
+#define WRAP (2 + ETH_HLEN + 4 + 32)
+#define MTU 1500
+
+/*
+ * Maximum retries of checking, wheter HW really sent the packet
+ * after it was done is software.
+ */
+#define MVPP2_TX_SEND_MAX_POLLING_COUNT 10000
+
+/* Structures */
+typedef struct {
+ /* Physical number of this Tx queue */
+ UINT8 Id;
+
+ /* Logical number of this Tx queue */
+ UINT8 LogId;
+
+ /* Number of Tx DMA descriptors in the descriptor ring */
+ INT32 Size;
+
+ /* Number of currently used Tx DMA descriptor in the descriptor ring */
+ INT32 count;
+
+ UINT32 DonePktsCoal;
+
+ /* Virtual address of thex Tx DMA descriptors array */
+ MVPP2_TX_DESC *Descs;
+
+ /* DMA address of the Tx DMA descriptors array */
+ DmaAddrT DescsPhys;
+
+ /* Index of the last Tx DMA descriptor */
+ INT32 LastDesc;
+
+ /* Index of the next Tx DMA descriptor to process */
+ INT32 NextDescToProc;
+} MVPP2_TX_QUEUE;
+
+typedef struct {
+ /* RX queue number, in the range 0-31 for physical RXQs */
+ UINT8 Id;
+
+ /* Num of rx descriptors in the rx descriptor ring */
+ INT32 Size;
+
+ UINT32 PktsCoal;
+ UINT32 TimeCoal;
+
+ /* Virtual address of the RX DMA descriptors array */
+ MVPP2_RX_DESC *Descs;
+
+ /* DMA address of the RX DMA descriptors array */
+ DmaAddrT DescsPhys;
+
+ /* Index of the last RX DMA descriptor */
+ INT32 LastDesc;
+
+ /* Index of the next RX DMA descriptor to process */
+ INT32 NextDescToProc;
+
+ /* ID of Port to which physical RXQ is mapped */
+ INT32 Port;
+
+ /* Port's logic RXQ number to which physical RXQ is mapped */
+ INT32 LogicRxq;
+} MVPP2_RX_QUEUE;
+
+enum Mvpp2BmType {
+ MVPP2_BM_FREE,
+ MVPP2_BM_SWF_LONG,
+ MVPP2_BM_SWF_SHORT
+};
+
+typedef struct {
+ /* Pool number in the range 0-7 */
+ INT32 Id;
+ enum Mvpp2BmType type;
+
+ /* Buffer Pointers Pool External (BPPE) Size */
+ INT32 Size;
+ /* Number of buffers for this pool */
+ INT32 BufNum;
+ /* Pool buffer Size */
+ INT32 BufSize;
+ /* Packet Size */
+ INT32 PktSize;
+
+ /* BPPE virtual base address */
+ UINT32 *VirtAddr;
+ /* BPPE physical base address */
+ DmaAddrT PhysAddr;
+
+ /* Ports using BM pool */
+ UINT32 PortMap;
+} MVPP2_BMS_POOL;
+
+typedef struct Pp2DxePort PP2DXE_PORT;
+
+/* Structure for preallocation for buffer */
+typedef struct {
+ MVPP2_TX_DESC *TxDescs[MVPP2_MAX_PORT];
+ MVPP2_TX_DESC *AggrTxDescs;
+ MVPP2_RX_DESC *RxDescs[MVPP2_MAX_PORT];
+ DmaAddrT RxBuffers[MVPP2_MAX_PORT];
+} BUFFER_LOCATION;
+
+/* Shared Packet Processor resources */
+typedef struct {
+ /* Shared registers' base addresses */
+ UINT64 Base;
+ UINT64 MpcsBase;
+ UINT64 Rfu1Base;
+ UINT64 SmiBase;
+ UINT64 XpcsBase;
+
+ /* Preallocated buffers */
+ BUFFER_LOCATION BufferLocation;
+
+ /* List of pointers to Port structures */
+ PP2DXE_PORT **PortList;
+
+ /* Aggregated TXQs */
+ MVPP2_TX_QUEUE *AggrTxqs;
+
+ /* BM pools */
+ MVPP2_BMS_POOL *BmPools[MVPP2_MAX_PORT];
+
+ /* PRS shadow table */
+ MVPP2_PRS_SHADOW *PrsShadow;
+ /* PRS auxiliary table for double vlan entries control */
+ BOOLEAN *PrsDoubleVlans;
+
+ /* Tclk value */
+ UINT32 Tclk;
+} MVPP2_SHARED;
+
+/* Individual Port structure */
+struct Pp2DxePort {
+ UINT8 Id;
+ UINT8 GopIndex;
+
+ INT32 Irq;
+
+ MVPP2_SHARED *Priv;
+
+ /* Per-Port registers' base address */
+ UINT64 GmacBase;
+ UINT64 XlgBase;
+
+ MVPP2_RX_QUEUE *Rxqs;
+ MVPP2_TX_QUEUE *Txqs;
+
+ INT32 PktSize;
+
+ UINT32 PendingCauseRx;
+
+ /* Flags */
+ UINTN Flags;
+
+ UINT16 TxRingSize;
+ UINT16 RxRingSize;
+
+ INT32 PhyInterface;
+ UINT32 PhyIndex;
+ BOOLEAN Link;
+ BOOLEAN Duplex;
+ BOOLEAN AlwaysUp;
+ PHY_SPEED Speed;
+
+ MVPP2_BMS_POOL *PoolLong;
+ MVPP2_BMS_POOL *PoolShort;
+
+ UINT8 TxpNum;
+
+ /* Index of first Port's physical RXQ */
+ UINT8 FirstRxq;
+};
+
+typedef struct {
+ MAC_ADDR_DEVICE_PATH Pp2Mac;
+ EFI_DEVICE_PATH_PROTOCOL End;
+} PP2_DEVICE_PATH;
+
+#define QUEUE_DEPTH 64
+typedef struct {
+ UINT32 Signature;
+ INTN Instance;
+ EFI_HANDLE Controller;
+ EFI_LOCK Lock;
+ EFI_SIMPLE_NETWORK_PROTOCOL Snp;
+ MARVELL_PHY_PROTOCOL *Phy;
+ PHY_DEVICE *PhyDev;
+ PP2DXE_PORT Port;
+ BOOLEAN Initialized;
+ BOOLEAN LateInitialized;
+ VOID *CompletionQueue[QUEUE_DEPTH];
+ UINTN CompletionQueueHead;
+ UINTN CompletionQueueTail;
+ EFI_EVENT EfiExitBootServicesEvent;
+ PP2_DEVICE_PATH *DevicePath;
+} PP2DXE_CONTEXT;
+
+/* Inline helpers */
+STATIC
+inline
+VOID
+Mvpp2Write (
+ IN MVPP2_SHARED *Priv,
+ IN UINT32 Offset,
+ IN UINT32 data
+ )
+{
+ ASSERT (Priv->Base != 0);
+ MmioWrite32 (Priv->Base + Offset, data);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2Read (
+ IN MVPP2_SHARED *Priv,
+ IN UINT32 Offset
+ )
+{
+ ASSERT (Priv->Base != 0);
+ return MmioRead32 (Priv->Base + Offset);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2Rfu1Read (
+ IN MVPP2_SHARED *Priv,
+ UINT32 Offset
+ )
+{
+ ASSERT (Priv->Rfu1Base != 0);
+ return MmioRead32 (Priv->Rfu1Base + Offset);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2Rfu1Write (
+ IN MVPP2_SHARED *Priv,
+ IN UINT32 Offset,
+ IN UINT32 Data
+ )
+{
+ ASSERT (Priv->Rfu1Base != 0);
+ return MmioWrite32 (Priv->Rfu1Base + Offset, Data);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2SmiRead (
+ IN MVPP2_SHARED *Priv,
+ IN UINT32 Offset
+ )
+{
+ ASSERT (Priv->SmiBase != 0);
+ return MmioRead32 (Priv->SmiBase + Offset);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2SmiWrite (
+ IN MVPP2_SHARED *Priv,
+ IN UINT32 Offset,
+ IN UINT32 Data
+ )
+{
+ ASSERT (Priv->SmiBase != 0);
+ return MmioWrite32 (Priv->SmiBase + Offset, Data);
+}
+
+STATIC
+inline
+VOID
+Mvpp2GmacWrite (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset,
+ IN UINT32 Data
+ )
+{
+ ASSERT (Port->Priv->Base != 0);
+ MmioWrite32 (Port->Priv->Base + Offset, Data);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2GmacRead (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset
+ )
+{
+ ASSERT (Port->Priv->Base != 0);
+ return MmioRead32 (Port->Priv->Base + Offset);
+}
+
+STATIC
+inline
+VOID
+MvGop110GmacWrite (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset,
+ IN UINT32 Data
+ )
+{
+ ASSERT (Port->GmacBase != 0);
+ MmioWrite32 (Port->GmacBase + Offset, Data);
+}
+
+STATIC
+inline
+UINT32
+MvGop110GmacRead (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset
+ )
+{
+ ASSERT (Port->GmacBase != 0);
+ return MmioRead32 (Port->GmacBase + Offset);
+}
+
+STATIC
+inline
+VOID
+Mvpp2XlgWrite (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset,
+ IN UINT32 Data
+ )
+{
+ ASSERT (Port->XlgBase != 0);
+ MmioWrite32 (Port->XlgBase + Offset, Data);
+}
+
+STATIC
+inline
+UINT32
+Mvpp2XlgRead (
+ IN PP2DXE_PORT *Port,
+ IN UINT32 Offset
+ )
+{
+ ASSERT (Port->XlgBase != 0);
+ return MmioRead32 (Port->XlgBase + Offset);
+}
+
+/* SNP callbacks */
+EFI_STATUS
+EFIAPI
+Pp2SnpStart (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpStop (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2DxeSnpInitialize (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINTN ExtraRxBufferSize OPTIONAL,
+ IN UINTN ExtraTxBufferSize OPTIONAL
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReset (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN ExtendedVerification
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpShutdown (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReceiveFilters (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINT32 Enable,
+ IN UINT32 Disable,
+ IN BOOLEAN ResetMCastFilter,
+ IN UINTN MCastFilterCnt OPTIONAL,
+ IN EFI_MAC_ADDRESS *MCastFilter OPTIONAL
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpStationAddress (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *Snp,
+ IN BOOLEAN Reset,
+ IN EFI_MAC_ADDRESS *NewMac
+);
+
+EFI_STATUS
+EFIAPI
+Pp2SnpNetStat (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN Reset,
+ IN OUT UINTN *StatisticsSize OPTIONAL,
+ OUT EFI_NETWORK_STATISTICS *StatisticsTable OPTIONAL
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpIpToMac (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN BOOLEAN IPv6,
+ IN EFI_IP_ADDRESS *IP,
+ OUT EFI_MAC_ADDRESS *MAC
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpGetStatus (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *Snp,
+ OUT UINT32 *InterruptStatus OPTIONAL,
+ OUT VOID **TxBuf OPTIONAL
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpTransmit (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ IN UINTN HeaderSize,
+ IN UINTN BufferSize,
+ IN VOID *Buffer,
+ IN EFI_MAC_ADDRESS *SrcAddr OPTIONAL,
+ IN EFI_MAC_ADDRESS *DestAddr OPTIONAL,
+ IN UINT16 *EtherTypePtr OPTIONAL
+ );
+
+EFI_STATUS
+EFIAPI
+Pp2SnpReceive (
+ IN EFI_SIMPLE_NETWORK_PROTOCOL *This,
+ OUT UINTN *HeaderSize OPTIONAL,
+ IN OUT UINTN *BufferSize,
+ OUT VOID *Buffer,
+ OUT EFI_MAC_ADDRESS *SrcAddr OPTIONAL,
+ OUT EFI_MAC_ADDRESS *DstAddr OPTIONAL,
+ OUT UINT16 *EtherType OPTIONAL
+ );
+#endif
diff --git a/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.inf b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.inf
new file mode 100644
index 0000000000..fcd0611b4d
--- /dev/null
+++ b/Silicon/Marvell/Drivers/Net/Pp2Dxe/Pp2Dxe.inf
@@ -0,0 +1,84 @@
+# Copyright (C) 2016 Marvell International Ltd.
+#
+# Marvell BSD License Option
+#
+# If you received this File from Marvell, you may opt to use, redistribute and/or
+# modify this File under the following licensing terms.
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of Marvell nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+[Defines]
+ INF_VERSION = 0x00010019
+ BASE_NAME = Pp2Dxe
+ FILE_GUID = 5ffc3843-d8d4-40ba-ae07-38967138509c
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ ENTRY_POINT = Pp2DxeInitialise
+
+[Sources.common]
+ Pp2Dxe.c
+ Mvpp2Lib.c
+
+[Packages]
+ EmbeddedPkg/EmbeddedPkg.dec
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ ArmPkg/ArmPkg.dec
+ Silicon/Marvell/Marvell.dec
+
+[LibraryClasses]
+ DmaLib
+ IoLib
+ PcdLib
+ BaseLib
+ BaseMemoryLib
+ DebugLib
+ UefiLib
+ NetLib
+ UefiDriverEntryPoint
+ UefiBootServicesTableLib
+ MemoryAllocationLib
+ CacheMaintenanceLib
+
+[Protocols]
+ gEfiSimpleNetworkProtocolGuid
+ gEfiDevicePathProtocolGuid
+ gEfiCpuArchProtocolGuid
+ gMarvellMdioProtocolGuid
+ gMarvellPhyProtocolGuid
+
+[Pcd]
+ gMarvellTokenSpaceGuid.PcdPp2Controllers
+ gMarvellTokenSpaceGuid.PcdPp2GopIndexes
+ gMarvellTokenSpaceGuid.PcdPp2InterfaceAlwaysUp
+ gMarvellTokenSpaceGuid.PcdPp2InterfaceSpeed
+ gMarvellTokenSpaceGuid.PcdPp2PhyConnectionTypes
+ gMarvellTokenSpaceGuid.PcdPp2PhyIndexes
+ gMarvellTokenSpaceGuid.PcdPp2Port2Controller
+ gMarvellTokenSpaceGuid.PcdPp2PortIds
+
+[Depex]
+ TRUE