From feeece2fbcc40d67ffdf183bc925106c203196d2 Mon Sep 17 00:00:00 2001 From: Rohit Vaswani Date: Wed, 8 Jun 2011 15:48:35 -0700 Subject: [PATCH] net: QFEC Ethernet driver QFEC is 1 Giga-bit Ethernet MAC module residing in FSM9XXX. Change-Id: I718fb578cfb56d598ec5fd8b9ffebad4414a7830 Acked-by: Kaushik Sikdar Signed-off-by: Rohit Vaswani --- Documentation/networking/qfec.txt | 309 ++++ drivers/net/ethernet/msm/Kconfig | 8 + drivers/net/ethernet/msm/Makefile | 1 + drivers/net/ethernet/msm/qfec.c | 2792 +++++++++++++++++++++++++++++ drivers/net/ethernet/msm/qfec.h | 800 +++++++++ 5 files changed, 3910 insertions(+) create mode 100644 Documentation/networking/qfec.txt create mode 100644 drivers/net/ethernet/msm/qfec.c create mode 100644 drivers/net/ethernet/msm/qfec.h diff --git a/Documentation/networking/qfec.txt b/Documentation/networking/qfec.txt new file mode 100644 index 000000000000..182043f6b517 --- /dev/null +++ b/Documentation/networking/qfec.txt @@ -0,0 +1,309 @@ +Driver name: Qualcomm FSM9xxx Ethernet Driver + +Supported hardware: FSM9xxx Ethernet Controller + +Maintainer(s): +Author(s): + + +Introduction: +============= + +The FSM9xxx Ethernet controller is register based with separate TX and RX DMA +engines supporting scatter/gather and support 1EEE-1588 timestamping. +MII, RevMII and RgMII interfaces are support. RgMII support 1G. + +The driver supports gather but not scatter, uses the controller DMA engines, +and timestamping. + + +Hardware description: +===================== + +The Ethernet Controller is a memory mapped register device with two +internal DMA engines for TX and RX path processing using separate +buffer-descriptors (BD) allocated from non-cached main memory for the TX +and RX paths. These BDs support scatter-gather but are only used to +transfer single max sized Ethernet frames. The BDs are sequentially +accessed as a ring, with an end-of-ring bit set in the last BD. Ownership +bits control access by hardware and software to individual BDs. + +An additional 4 words of space can be configured and is allocated between +each BD to store additional information about the sk_buff associated with it. +The driver software uses 2 ring structures and local functions to manage +them to keep in sync with the hardware the BDs . The number of BDs is +determined from the space allocated for them (PAGE_SIZE). The ratio of RX +to TX BD is set by a #define. + +Interrupts are used to service and replenish pre-allocated sk_buff for each +RX BD. TX frames are allocated to a TX BD and transmitted frames are +freed within the xmit() invoked to send the frame. No TX interrupts are +processed since sk_buffs are freed in the xmit(). + +Three PHY interfaces are supported: MII, RevMII and RgMII. The selected +interface is determined from the resource structure (to be completed) and +programmed into a register prior to resetting the Ethernet controller. + +Separate PLLs are managed to provide MAC/PHY clocks in RevMii and RgMii +modes, and a 25mHz clock timestamping. + + + +Software description +==================== + +Structures + +struct qfec_buf_desc { + uint32_t status; + uint32_t ctl; + void *p_buf; + void *next; +}; + +struct buf_desc { + struct qfec_buf_desc desc; /* must be first */ + + struct sk_buff *skb; + void *buf_virt_addr; + void *buf_phys_addr; + uint32_t last_bd_flag; +}; + +struct ring { + int head; + int tail; + int n_free; + int len; +}; + +struct qfec_priv { + struct net_device *net_dev; + struct net_device_stats stats; /* req statistics */ + + struct device dev; + + spinlock_t hw_lock; + + unsigned int state; /* driver state */ + + void *bd_base; /* addr buf-desc */ + dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */ + dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */ + + struct resource *mac_res; + void *mac_base; /* mac (virt) base address */ + + struct resource *clk_res; + void *clk_base; /* clk (virt) base address */ + + unsigned int n_tbd; /* # of TX buf-desc */ + struct ring ring_tbd; /* TX ring */ + struct buf_desc *p_tbd; /* # TX buf-desc */ + + unsigned int n_rbd; /* # of RX buf-desc */ + struct ring ring_rbd; /* RX ring */ + struct buf_desc *p_rbd; /* # RX buf-desc */ + + unsigned long cntr[cntr_last]; /* activity counters */ + + struct mii_if_info mii; + + int mdio_clk; /* phy mdio clock rate */ + int phy_id; /* default PHY addr (0) */ + struct timer_list phy_tmr; /* monitor PHY state */ +}; + + + +Initialization is divided between probe() and open() such that the +net_device is allocated, the address space is mapped for register access, +and procfs files created in probe(). BD memory is allocated and +initialized along with interrupts and timers in open(). BD is not +de-allocated in close() allowing it to be debugged after the interface is +ifconfig down'd. This approach is intended to aid with debugging by +allowing configuring the interface down and up may clear some early usage +problems + +Phy link state changes are monitored using a timer using some existing +functions from the mii library, but also with local functions intended to +support RGMII in the future. + +A variety of information is accessible through procFs. Counters are used +to track various driver events, these include abnormal and error +interrupts. Hardware counters of various frame statistics (e.g. types and +sizes of TX and RX frames) are available. Hardware registers and up to the +50 TX and RX BDs can be can be displayed. A table of procfs filenames and +functions are used to create and delete the procfs entries as needed. + +Probe() + +Allocate and initialize the net_device structure with resource information +specifying the Ethernet controller, clock control and MAC address memory +regions. Set netdev_ops to a statically defined sub-structure supporting +the device. + +Open() + +Use qfec_mem_alloc() to allocate space for the buffer-descriptors (BD). +TX BDs are initialized by clearing the ownership bit of each. Each RX BD +is initialized using qfec_rbd_init(). Qfec_rbd_init() pre-allocates an +sk_buff, saving the addresses of both the sk_buff and its data buffer in the +additional BD space, setting the BD buf pointer to the physical address of +the sk_buff data, and finally setting the ownership bit. + +Once the BDs are initialized, interface selected register is set to the +appropriate PHY interface configuration, and the Ethernet controller is +reset and its registers initialized, including the starting addresses of +the TX and RX BDs. + +The PHY monitor state is initialized and the timer initialized and started. + +Finally, the interrupt for the Ethernet controller is initialized. + + Note - Interrupts from both from the external PHY and internal RevMii + PHY, are available, but neither is used in preference to the + timer. + + +Interrupt Processing + +Besides recognizing abnormal error interrupts, RX, TX and GMAC interrupts +are recognized, although TX and GMAC interrupts are ignored but cleared and +counted. (The gmac interrupt can be ignored but must be disabled). + +RX interrupts invoke a handler to process the received frame, send it +to the stack and re-allocate a replacement sk_bufff for the buffer- +descriptor. + + +Receive Processing + +The RX buffer descriptors are initialized by _open() using qfec_rbd_init() +which pre-allocated an sk_buff, saving its address and the physical address +of its data in the additional BD space, as well as writing the physical +address to the BD pbuf entry read by HW. The size of the buffer and +other control information are written to the BD, as well as setting the +ownership bit. + +A received frame generates an interrupt invoking qfec_rx_int(). It +repeatedly checks the ownership the next available BD, and passing the +sk_buff containing the received frame to the stack via netif_rx(). + +Once all received frames are processed, it repeatedly calls qfec_rbd_init() +to allocate a new sk_buff with each available BD. + + +Transmit Processing + +Frames are transmitted through the start_xmit callback function. +qfec_tx_replenish() is immediately called to free sk_buffs from BD +that have been transmitted, before checking is a BD is available. +The sk_buff address is stored in the additional BD space and the +physical address of its data is store in the pbuf BD entry used +by the HW. The TX poll-demand register is accessed, causing the +HW to recheck the current BD and process it. + +While the TX interrupt could be processed to free sk_buffs as BD +are processed, they are ignored since the sk_buffs will be freed +with each call to _xmit(). + +procfs + +debug files are available to display the controller registers, +frame counters from the controller, driver activity counters, and +the first 50 entries of the RX and TX buffer descriptors. + + +Callbacks + +In addition to the functions described above, the following functions +are used to support their correspondingly named device operations: + + qfec_stop + qfec_do_ioctl + qfec_tx_timeout + qfec_set_mac_address + qfec_get_stats + qfec_set_config + + eth_change_mtu + eth_validate_addr + + +Power Management +================ +None + + +Interface: +========== + +- Module-init/exit +- standard network interface functions + + +Module parameters: +================== + +static struct resource qfec_resources [] = { + [0] = { + .start = QFEC_MAC_BASE, + .end = QFEC_MAC_BASE + QFEC_MAC_SIZE, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = QFEC_MAC_IRQ, + .end = QFEC_MAC_IRQ, + .flags = IORESOURCE_IRQ, + }, + [2] = { + .start = QFEC_CLK_BASE, + .end = QFEC_CLK_BASE + QFEC_CLK_SIZE, + .flags = IORESOURCE_IO, + }, + [3] = { + .start = QFEC_MAC_FUSE_BASE, + .end = QFEC_MAC_FUSE_BASE + QFEC_MAC_FUSE_SIZE, + .flags = IORESOURCE_DMA, + }, +}; + +static struct platform_device qfec_device = { + .name = "qfec", + .id = 0, + .num_resources = ARRAY_SIZE(qfec_resources), + .resource = qfec_resources, +}; + + +Resource entries exist for three address regions and one interrupt. The +interrupt is identified as IORESOURCE_IRQ, the controller registers as +OPRESOURCE_MEM, the clock control registers as IORESOURCE_IO, and the +MAC address fuses as IORESOURCE_DMA. + + +Dependencies: +============= +None + + +User space utilities: +===================== + +See procfs descriptions + + +Known issues: +============= + +- replace procfs w/ debugfs + + +To do: +====== + +- specify interface (MII/RevMII/RgMii) in resource structure +- RevMii support untested +- RgMii (10/100/1000) +- generic timestamp support diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig index 5c7770cd0fad..095cb4d67450 100644 --- a/drivers/net/ethernet/msm/Kconfig +++ b/drivers/net/ethernet/msm/Kconfig @@ -42,3 +42,11 @@ config MSM_RMNET_DEBUG help Debug stats on wakeup counts. +config QFEC + tristate "QFEC ethernet driver" + select MII + depends on ARM + help + This driver supports Ethernet in the FSM9xxx. + To compile this driver as a module, choose M here: the + module will be called qfec. diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile index db3f08ee8acb..7d9d4c63b158 100644 --- a/drivers/net/ethernet/msm/Makefile +++ b/drivers/net/ethernet/msm/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_MSM_RMNET) += msm_rmnet.o obj-$(CONFIG_MSM_RMNET_SDIO) += msm_rmnet_sdio.o obj-$(CONFIG_MSM_RMNET_BAM) += msm_rmnet_bam.o obj-$(CONFIG_MSM_RMNET_SMUX) += msm_rmnet_smux.o +obj-$(CONFIG_QFEC) += qfec.o diff --git a/drivers/net/ethernet/msm/qfec.c b/drivers/net/ethernet/msm/qfec.c new file mode 100644 index 000000000000..02618ec18d8e --- /dev/null +++ b/drivers/net/ethernet/msm/qfec.c @@ -0,0 +1,2792 @@ +/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include + +#include /* size_t */ +#include /* mark_bh */ + +#include /* struct device, and other headers */ +#include /* eth_type_trans */ +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "qfec.h" + +#define QFEC_NAME "qfec" +#define QFEC_DRV_VER "Nov 29 2011" + +#define ETH_BUF_SIZE 0x600 +#define MAX_N_BD 50 +#define MAC_ADDR_SIZE 6 + +#define RX_TX_BD_RATIO 8 +#define TX_BD_NUM 256 +#define RX_BD_NUM 256 +#define TX_BD_TI_RATIO 4 +#define MAX_MDIO_REG 32 + +#define H_DPLX 0 +#define F_DPLX 1 +/* + * logging macros + */ +#define QFEC_LOG_PR 1 +#define QFEC_LOG_DBG 2 +#define QFEC_LOG_DBG2 4 +#define QFEC_LOG_MDIO_W 8 +#define QFEC_LOG_MDIO_R 16 +#define QFEC_MII_EXP_MASK (EXPANSION_LCWP | EXPANSION_ENABLENPAGE \ + | EXPANSION_NPCAPABLE) + +static int qfec_debug = QFEC_LOG_PR; + +#ifdef QFEC_DEBUG +# define QFEC_LOG(flag, ...) \ + do { \ + if (flag & qfec_debug) \ + pr_info(__VA_ARGS__); \ + } while (0) +#else +# define QFEC_LOG(flag, ...) +#endif + +#define QFEC_LOG_ERR(...) pr_err(__VA_ARGS__) + +/* + * driver buffer-descriptor + * contains the 4 word HW descriptor plus an additional 4-words. + * (See the DSL bits in the BUS-Mode register). + */ +#define BD_FLAG_LAST_BD 1 + +struct buf_desc { + struct qfec_buf_desc *p_desc; + struct sk_buff *skb; + void *buf_virt_addr; + void *buf_phys_addr; + uint32_t last_bd_flag; +}; + +/* + *inline functions accessing non-struct qfec_buf_desc elements + */ + +/* skb */ +static inline struct sk_buff *qfec_bd_skbuf_get(struct buf_desc *p_bd) +{ + return p_bd->skb; +}; + +static inline void qfec_bd_skbuf_set(struct buf_desc *p_bd, struct sk_buff *p) +{ + p_bd->skb = p; +}; + +/* virtual addr */ +static inline void qfec_bd_virt_set(struct buf_desc *p_bd, void *addr) +{ + p_bd->buf_virt_addr = addr; +}; + +static inline void *qfec_bd_virt_get(struct buf_desc *p_bd) +{ + return p_bd->buf_virt_addr; +}; + +/* physical addr */ +static inline void qfec_bd_phys_set(struct buf_desc *p_bd, void *addr) +{ + p_bd->buf_phys_addr = addr; +}; + +static inline void *qfec_bd_phys_get(struct buf_desc *p_bd) +{ + return p_bd->buf_phys_addr; +}; + +/* last_bd_flag */ +static inline uint32_t qfec_bd_last_bd(struct buf_desc *p_bd) +{ + return (p_bd->last_bd_flag != 0); +}; + +static inline void qfec_bd_last_bd_set(struct buf_desc *p_bd) +{ + p_bd->last_bd_flag = BD_FLAG_LAST_BD; +}; + +/* + *inline functions accessing struct qfec_buf_desc elements + */ + +/* ownership bit */ +static inline uint32_t qfec_bd_own(struct buf_desc *p_bd) +{ + return p_bd->p_desc->status & BUF_OWN; +}; + +static inline void qfec_bd_own_set(struct buf_desc *p_bd) +{ + p_bd->p_desc->status |= BUF_OWN ; +}; + +static inline void qfec_bd_own_clr(struct buf_desc *p_bd) +{ + p_bd->p_desc->status &= ~(BUF_OWN); +}; + +static inline uint32_t qfec_bd_status_get(struct buf_desc *p_bd) +{ + return p_bd->p_desc->status; +}; + +static inline void qfec_bd_status_set(struct buf_desc *p_bd, uint32_t status) +{ + p_bd->p_desc->status = status; +}; + +static inline uint32_t qfec_bd_status_len(struct buf_desc *p_bd) +{ + return BUF_RX_FL_GET((*p_bd->p_desc)); +}; + +/* control register */ +static inline void qfec_bd_ctl_reset(struct buf_desc *p_bd) +{ + p_bd->p_desc->ctl = 0; +}; + +static inline uint32_t qfec_bd_ctl_get(struct buf_desc *p_bd) +{ + return p_bd->p_desc->ctl; +}; + +static inline void qfec_bd_ctl_set(struct buf_desc *p_bd, uint32_t val) +{ + p_bd->p_desc->ctl |= val; +}; + +static inline void qfec_bd_ctl_wr(struct buf_desc *p_bd, uint32_t val) +{ + p_bd->p_desc->ctl = val; +}; + +/* pbuf register */ +static inline void *qfec_bd_pbuf_get(struct buf_desc *p_bd) +{ + return p_bd->p_desc->p_buf; +} + +static inline void qfec_bd_pbuf_set(struct buf_desc *p_bd, void *p) +{ + p_bd->p_desc->p_buf = p; +} + +/* next register */ +static inline void *qfec_bd_next_get(struct buf_desc *p_bd) +{ + return p_bd->p_desc->next; +}; + +/* + * initialize an RX BD w/ a new buf + */ +static int qfec_rbd_init(struct net_device *dev, struct buf_desc *p_bd) +{ + struct sk_buff *skb; + void *p; + void *v; + + /* allocate and record ptrs for sk buff */ + skb = dev_alloc_skb(ETH_BUF_SIZE); + if (!skb) + goto err; + + qfec_bd_skbuf_set(p_bd, skb); + + v = skb_put(skb, ETH_BUF_SIZE); + qfec_bd_virt_set(p_bd, v); + + p = (void *) dma_map_single(&dev->dev, + (void *)skb->data, ETH_BUF_SIZE, DMA_FROM_DEVICE); + qfec_bd_pbuf_set(p_bd, p); + qfec_bd_phys_set(p_bd, p); + + /* populate control register */ + /* mark the last BD and set end-of-ring bit */ + qfec_bd_ctl_wr(p_bd, ETH_BUF_SIZE | + (qfec_bd_last_bd(p_bd) ? BUF_RX_RER : 0)); + + qfec_bd_status_set(p_bd, BUF_OWN); + + if (!(qfec_debug & QFEC_LOG_DBG2)) + return 0; + + /* debug messages */ + QFEC_LOG(QFEC_LOG_DBG2, "%s: %p bd\n", __func__, p_bd); + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %p skb\n", __func__, skb); + + QFEC_LOG(QFEC_LOG_DBG2, + "%s: %p p_bd, %p data, %p skb_put, %p virt, %p p_buf, %p p\n", + __func__, (void *)p_bd, + (void *)skb->data, v, /*(void *)skb_put(skb, ETH_BUF_SIZE), */ + (void *)qfec_bd_virt_get(p_bd), (void *)qfec_bd_pbuf_get(p_bd), + (void *)p); + + return 0; + +err: + return -ENOMEM; +}; + +/* + * ring structure used to maintain indices of buffer-descriptor (BD) usage + * + * The RX BDs are normally all pre-allocated with buffers available to be + * DMA'd into with received frames. The head indicates the first BD/buffer + * containing a received frame, and the tail indicates the oldest BD/buffer + * that needs to be restored for use. Head and tail are both initialized + * to zero, and n_free is initialized to zero, since all BD are initialized. + * + * The TX BDs are normally available for use, only being initialized as + * TX frames are requested for transmission. The head indicates the + * first available BD, and the tail indicate the oldest BD that has + * not been acknowledged as transmitted. Head and tail are both initialized + * to zero, and n_free is initialized to len, since all are available for use. + */ +struct ring { + int head; + int tail; + int n_free; + int len; +}; + +/* accessory in line functions for struct ring */ +static inline void qfec_ring_init(struct ring *p_ring, int size, int free) +{ + p_ring->head = p_ring->tail = 0; + p_ring->len = size; + p_ring->n_free = free; +} + +static inline int qfec_ring_full(struct ring *p_ring) +{ + return (p_ring->n_free == 0); +}; + +static inline int qfec_ring_empty(struct ring *p_ring) +{ + return (p_ring->n_free == p_ring->len); +} + +static inline void qfec_ring_head_adv(struct ring *p_ring) +{ + if (++p_ring->head == p_ring->len) + p_ring->head = 0; + p_ring->n_free--; +}; + +static inline void qfec_ring_tail_adv(struct ring *p_ring) +{ + if (++p_ring->tail == p_ring->len) + p_ring->tail = 0; + p_ring->n_free++; +}; + +static inline int qfec_ring_head(struct ring *p_ring) +{ + + return p_ring->head; +}; + +static inline int qfec_ring_tail(struct ring *p_ring) +{ + return p_ring->tail; +}; + +static inline int qfec_ring_room(struct ring *p_ring) +{ + return p_ring->n_free; +}; + +/* + * counters track normal and abnormal driver events and activity + */ +enum cntr { + isr = 0, + fatal_bus, + + early_tx, + tx_no_resource, + tx_proc_stopped, + tx_jabber_tmout, + + xmit, + tx_int, + tx_isr, + tx_owned, + tx_underflow, + + tx_replenish, + tx_skb_null, + tx_timeout, + tx_too_large, + + gmac_isr, + + /* half */ + norm_int, + abnorm_int, + + early_rx, + rx_buf_unavail, + rx_proc_stopped, + rx_watchdog, + + netif_rx_cntr, + rx_int, + rx_isr, + rx_owned, + rx_overflow, + + rx_dropped, + rx_skb_null, + queue_start, + queue_stop, + + rx_paddr_nok, + ts_ioctl, + ts_tx_en, + ts_tx_rtn, + + ts_rec, + cntr_last, +}; + +static char *cntr_name[] = { + "isr", + "fatal_bus", + + "early_tx", + "tx_no_resource", + "tx_proc_stopped", + "tx_jabber_tmout", + + "xmit", + "tx_int", + "tx_isr", + "tx_owned", + "tx_underflow", + + "tx_replenish", + "tx_skb_null", + "tx_timeout", + "tx_too_large", + + "gmac_isr", + + /* half */ + "norm_int", + "abnorm_int", + + "early_rx", + "rx_buf_unavail", + "rx_proc_stopped", + "rx_watchdog", + + "netif_rx", + "rx_int", + "rx_isr", + "rx_owned", + "rx_overflow", + + "rx_dropped", + "rx_skb_null", + "queue_start", + "queue_stop", + + "rx_paddr_nok", + "ts_ioctl", + "ts_tx_en", + "ts_tx_rtn", + + "ts_rec", + "" +}; + +/* + * private data + */ + +static struct net_device *qfec_dev; + +enum qfec_state { + timestamping = 0x04, +}; + +struct qfec_priv { + struct net_device *net_dev; + struct net_device_stats stats; /* req statistics */ + + struct device dev; + + spinlock_t xmit_lock; + spinlock_t mdio_lock; + + unsigned int state; /* driver state */ + + unsigned int bd_size; /* buf-desc alloc size */ + struct qfec_buf_desc *bd_base; /* * qfec-buf-desc */ + dma_addr_t tbd_dma; /* dma/phy-addr buf-desc */ + dma_addr_t rbd_dma; /* dma/phy-addr buf-desc */ + + struct resource *mac_res; + void *mac_base; /* mac (virt) base address */ + + struct resource *clk_res; + void *clk_base; /* clk (virt) base address */ + + struct resource *fuse_res; + void *fuse_base; /* mac addr fuses */ + + unsigned int n_tbd; /* # of TX buf-desc */ + struct ring ring_tbd; /* TX ring */ + struct buf_desc *p_tbd; + unsigned int tx_ic_mod; /* (%) val for setting IC */ + + unsigned int n_rbd; /* # of RX buf-desc */ + struct ring ring_rbd; /* RX ring */ + struct buf_desc *p_rbd; + + struct buf_desc *p_latest_rbd; + struct buf_desc *p_ending_rbd; + + unsigned long cntr[cntr_last]; /* activity counters */ + + struct mii_if_info mii; /* used by mii lib */ + + int mdio_clk; /* phy mdio clock rate */ + int phy_id; /* default PHY addr (0) */ + struct timer_list phy_tmr; /* monitor PHY state */ +}; + +/* + * cntrs display + */ + +static int qfec_cntrs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int h = (cntr_last + 1) / 2; + int l; + int n; + int count = PAGE_SIZE; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + l = snprintf(&buf[0], count, "%s:\n", __func__); + for (n = 0; n < h; n++) { + l += snprintf(&buf[l], count - l, + " %12lu %-16s %12lu %s\n", + priv->cntr[n], cntr_name[n], + priv->cntr[n+h], cntr_name[n+h]); + } + + return l; +} + +# define CNTR_INC(priv, name) (priv->cntr[name]++) + +/* + * functions that manage state + */ +static inline void qfec_queue_start(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + + if (netif_queue_stopped(dev)) { + netif_wake_queue(dev); + CNTR_INC(priv, queue_start); + } +}; + +static inline void qfec_queue_stop(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + CNTR_INC(priv, queue_stop); +}; + +/* + * functions to access and initialize the MAC registers + */ +static inline uint32_t qfec_reg_read(struct qfec_priv *priv, uint32_t reg) +{ + return ioread32((void *) (priv->mac_base + reg)); +} + +static void qfec_reg_write(struct qfec_priv *priv, uint32_t reg, uint32_t val) +{ + uint32_t addr = (uint32_t)priv->mac_base + reg; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val); + iowrite32(val, (void *)addr); +} + +/* + * speed/duplex/pause settings + */ +static int qfec_config_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int cfg = qfec_reg_read(priv, MAC_CONFIG_REG); + int flow = qfec_reg_read(priv, FLOW_CONTROL_REG); + int l = 0; + int count = PAGE_SIZE; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + l += snprintf(&buf[l], count, "%s:", __func__); + + l += snprintf(&buf[l], count - l, " [0x%08x] %4dM %s %s", cfg, + (cfg & MAC_CONFIG_REG_PS) + ? ((cfg & MAC_CONFIG_REG_FES) ? 100 : 10) : 1000, + cfg & MAC_CONFIG_REG_DM ? "FD" : "HD", + cfg & MAC_CONFIG_REG_IPC ? "IPC" : "NoIPC"); + + flow &= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE; + l += snprintf(&buf[l], count - l, " [0x%08x] %s", flow, + (flow == (FLOW_CONTROL_RFE | FLOW_CONTROL_TFE)) ? "PAUSE" + : ((flow == FLOW_CONTROL_RFE) ? "RX-PAUSE" + : ((flow == FLOW_CONTROL_TFE) ? "TX-PAUSE" : ""))); + + l += snprintf(&buf[l], count - l, " %s", QFEC_DRV_VER); + l += snprintf(&buf[l], count - l, "\n"); + return l; +} + + +/* + * table and functions to initialize controller registers + */ + +struct reg_entry { + unsigned int rdonly; + unsigned int addr; + char *label; + unsigned int val; +}; + +static struct reg_entry qfec_reg_tbl[] = { + { 0, BUS_MODE_REG, "BUS_MODE_REG", BUS_MODE_REG_DEFAULT }, + { 0, AXI_BUS_MODE_REG, "AXI_BUS_MODE_REG", AXI_BUS_MODE_DEFAULT }, + { 0, AXI_STATUS_REG, "AXI_STATUS_REG", 0 }, + + { 0, MAC_ADR_0_HIGH_REG, "MAC_ADR_0_HIGH_REG", 0x00000302 }, + { 0, MAC_ADR_0_LOW_REG, "MAC_ADR_0_LOW_REG", 0x01350702 }, + + { 1, RX_DES_LST_ADR_REG, "RX_DES_LST_ADR_REG", 0 }, + { 1, TX_DES_LST_ADR_REG, "TX_DES_LST_ADR_REG", 0 }, + { 1, STATUS_REG, "STATUS_REG", 0 }, + { 1, DEBUG_REG, "DEBUG_REG", 0 }, + + { 0, INTRP_EN_REG, "INTRP_EN_REG", QFEC_INTRP_SETUP}, + + { 1, CUR_HOST_TX_DES_REG, "CUR_HOST_TX_DES_REG", 0 }, + { 1, CUR_HOST_RX_DES_REG, "CUR_HOST_RX_DES_REG", 0 }, + { 1, CUR_HOST_TX_BU_ADR_REG, "CUR_HOST_TX_BU_ADR_REG", 0 }, + { 1, CUR_HOST_RX_BU_ADR_REG, "CUR_HOST_RX_BU_ADR_REG", 0 }, + + { 1, MAC_FR_FILTER_REG, "MAC_FR_FILTER_REG", 0 }, + + { 0, MAC_CONFIG_REG, "MAC_CONFIG_REG", MAC_CONFIG_REG_SPD_1G + | MAC_CONFIG_REG_DM + | MAC_CONFIG_REG_TE + | MAC_CONFIG_REG_RE + | MAC_CONFIG_REG_IPC }, + + { 1, INTRP_STATUS_REG, "INTRP_STATUS_REG", 0 }, + { 1, INTRP_MASK_REG, "INTRP_MASK_REG", 0 }, + + { 0, OPER_MODE_REG, "OPER_MODE_REG", OPER_MODE_REG_DEFAULT }, + + { 1, GMII_ADR_REG, "GMII_ADR_REG", 0 }, + { 1, GMII_DATA_REG, "GMII_DATA_REG", 0 }, + + { 0, MMC_INTR_MASK_RX_REG, "MMC_INTR_MASK_RX_REG", 0xFFFFFFFF }, + { 0, MMC_INTR_MASK_TX_REG, "MMC_INTR_MASK_TX_REG", 0xFFFFFFFF }, + + { 1, TS_HIGH_REG, "TS_HIGH_REG", 0 }, + { 1, TS_LOW_REG, "TS_LOW_REG", 0 }, + + { 1, TS_HI_UPDT_REG, "TS_HI_UPDATE_REG", 0 }, + { 1, TS_LO_UPDT_REG, "TS_LO_UPDATE_REG", 0 }, + { 0, TS_SUB_SEC_INCR_REG, "TS_SUB_SEC_INCR_REG", 1 }, + { 0, TS_CTL_REG, "TS_CTL_REG", TS_CTL_TSENALL + | TS_CTL_TSCTRLSSR + | TS_CTL_TSINIT + | TS_CTL_TSENA }, +}; + +static void qfec_reg_init(struct qfec_priv *priv) +{ + struct reg_entry *p = qfec_reg_tbl; + int n = ARRAY_SIZE(qfec_reg_tbl); + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + for (; n--; p++) { + if (!p->rdonly) + qfec_reg_write(priv, p->addr, p->val); + } +} + +/* + * display registers thru sysfs + */ +static int qfec_reg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + struct reg_entry *p = qfec_reg_tbl; + int n = ARRAY_SIZE(qfec_reg_tbl); + int l = 0; + int count = PAGE_SIZE; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + for (; n--; p++) { + l += snprintf(&buf[l], count - l, " %8p %04x %08x %s\n", + (void *)priv->mac_base + p->addr, p->addr, + qfec_reg_read(priv, p->addr), p->label); + } + + return l; +} + +/* + * set the MAC-0 address + */ +static void qfec_set_adr_regs(struct qfec_priv *priv, uint8_t *addr) +{ + uint32_t h = 0; + uint32_t l = 0; + + h = h << 8 | addr[5]; + h = h << 8 | addr[4]; + + l = l << 8 | addr[3]; + l = l << 8 | addr[2]; + l = l << 8 | addr[1]; + l = l << 8 | addr[0]; + + qfec_reg_write(priv, MAC_ADR_0_HIGH_REG, h); + qfec_reg_write(priv, MAC_ADR_0_LOW_REG, l); + + QFEC_LOG(QFEC_LOG_DBG, "%s: %08x %08x\n", __func__, h, l); +} + +/* + * set up the RX filter + */ +static void qfec_set_rx_mode(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + uint32_t filter_conf; + int index; + + /* Clear address filter entries */ + for (index = 1; index < MAC_ADR_MAX; ++index) { + qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), 0); + qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), 0); + } + + if (dev->flags & IFF_PROMISC) { + /* Receive all frames */ + filter_conf = MAC_FR_FILTER_RA; + } else if ((dev->flags & IFF_MULTICAST) == 0) { + /* Unicast filtering only */ + filter_conf = MAC_FR_FILTER_HPF; + } else if ((netdev_mc_count(dev) > MAC_ADR_MAX - 1) || + (dev->flags & IFF_ALLMULTI)) { + /* Unicast filtering is enabled, Pass all multicast frames */ + filter_conf = MAC_FR_FILTER_HPF | MAC_FR_FILTER_PM; + } else { + struct netdev_hw_addr *ha; + + /* Both unicast and multicast filtering are enabled */ + filter_conf = MAC_FR_FILTER_HPF; + + index = 1; + + netdev_for_each_mc_addr(ha, dev) { + uint32_t high, low; + + high = (1 << 31) | (ha->addr[5] << 8) | (ha->addr[4]); + low = (ha->addr[3] << 24) | (ha->addr[2] << 16) | + (ha->addr[1] << 8) | (ha->addr[0]); + + qfec_reg_write(priv, MAC_ADR_HIGH_REG_N(index), high); + qfec_reg_write(priv, MAC_ADR_LOW_REG_N(index), low); + + index++; + } + } + + qfec_reg_write(priv, MAC_FR_FILTER_REG, filter_conf); +} + +/* + * reset the controller + */ + +#define QFEC_RESET_TIMEOUT 10000 + /* reset should always clear but did not w/o test/delay + * in RgMii mode. there is no spec'd max timeout + */ + +static int qfec_hw_reset(struct qfec_priv *priv) +{ + int timeout = QFEC_RESET_TIMEOUT; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + qfec_reg_write(priv, BUS_MODE_REG, BUS_MODE_SWR); + + while (qfec_reg_read(priv, BUS_MODE_REG) & BUS_MODE_SWR) { + if (timeout-- == 0) { + QFEC_LOG_ERR("%s: timeout\n", __func__); + return -ETIME; + } + + /* there were problems resetting the controller + * in RGMII mode when there wasn't sufficient + * delay between register reads + */ + usleep_range(100, 200); + } + + return 0; +} + +/* + * initialize controller + */ +static int qfec_hw_init(struct qfec_priv *priv) +{ + int res = 0; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + res = qfec_hw_reset(priv); + if (res) + return res; + + qfec_reg_init(priv); + + /* config buf-desc locations */ + qfec_reg_write(priv, TX_DES_LST_ADR_REG, priv->tbd_dma); + qfec_reg_write(priv, RX_DES_LST_ADR_REG, priv->rbd_dma); + + /* clear interrupts */ + qfec_reg_write(priv, STATUS_REG, INTRP_EN_REG_NIE | INTRP_EN_REG_RIE + | INTRP_EN_REG_TIE | INTRP_EN_REG_TUE | INTRP_EN_REG_ETE); + + if (priv->mii.supports_gmii) { + /* Clear RGMII */ + qfec_reg_read(priv, SG_RG_SMII_STATUS_REG); + /* Disable RGMII int */ + qfec_reg_write(priv, INTRP_MASK_REG, 1); + } + + return res; +} + +/* + * en/disable controller + */ +static void qfec_hw_enable(struct qfec_priv *priv) +{ + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + qfec_reg_write(priv, OPER_MODE_REG, + qfec_reg_read(priv, OPER_MODE_REG) + | OPER_MODE_REG_ST | OPER_MODE_REG_SR); +} + +static void qfec_hw_disable(struct qfec_priv *priv) +{ + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + qfec_reg_write(priv, OPER_MODE_REG, + qfec_reg_read(priv, OPER_MODE_REG) + & ~(OPER_MODE_REG_ST | OPER_MODE_REG_SR)); +} + +/* + * interface selection + */ +struct intf_config { + uint32_t intf_sel; + uint32_t emac_ns; + uint32_t eth_x_en_ns; + uint32_t clkmux_sel; +}; + +#define ETH_X_EN_NS_REVMII (ETH_X_EN_NS_DEFAULT | ETH_TX_CLK_INV) +#define CLKMUX_REVMII (EMAC_CLKMUX_SEL_0 | EMAC_CLKMUX_SEL_1) + +static struct intf_config intf_config_tbl[] = { + { EMAC_PHY_INTF_SEL_MII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 }, + { EMAC_PHY_INTF_SEL_RGMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_DEFAULT, 0 }, + { EMAC_PHY_INTF_SEL_REVMII, EMAC_NS_DEFAULT, ETH_X_EN_NS_REVMII, + CLKMUX_REVMII } +}; + +/* + * emac clk register read and write functions + */ +static inline uint32_t qfec_clkreg_read(struct qfec_priv *priv, uint32_t reg) +{ + return ioread32((void *) (priv->clk_base + reg)); +} + +static inline void qfec_clkreg_write(struct qfec_priv *priv, + uint32_t reg, uint32_t val) +{ + uint32_t addr = (uint32_t)priv->clk_base + reg; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x <- %08x\n", __func__, addr, val); + iowrite32(val, (void *)addr); +} + +/* + * configure the PHY interface and clock routing and signal bits + */ +enum phy_intfc { + INTFC_MII = 0, + INTFC_RGMII = 1, + INTFC_REVMII = 2, +}; + +static int qfec_intf_sel(struct qfec_priv *priv, unsigned int intfc) +{ + struct intf_config *p; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %d\n", __func__, intfc); + + if (intfc > INTFC_REVMII) { + QFEC_LOG_ERR("%s: range\n", __func__); + return -ENXIO; + } + + p = &intf_config_tbl[intfc]; + + qfec_clkreg_write(priv, EMAC_PHY_INTF_SEL_REG, p->intf_sel); + qfec_clkreg_write(priv, EMAC_NS_REG, p->emac_ns); + qfec_clkreg_write(priv, ETH_X_EN_NS_REG, p->eth_x_en_ns); + qfec_clkreg_write(priv, EMAC_CLKMUX_SEL_REG, p->clkmux_sel); + + return 0; +} + +/* + * display registers thru proc-fs + */ +static struct qfec_clk_reg { + uint32_t offset; + char *label; +} qfec_clk_regs[] = { + { ETH_MD_REG, "ETH_MD_REG" }, + { ETH_NS_REG, "ETH_NS_REG" }, + { ETH_X_EN_NS_REG, "ETH_X_EN_NS_REG" }, + { EMAC_PTP_MD_REG, "EMAC_PTP_MD_REG" }, + { EMAC_PTP_NS_REG, "EMAC_PTP_NS_REG" }, + { EMAC_NS_REG, "EMAC_NS_REG" }, + { EMAC_TX_FS_REG, "EMAC_TX_FS_REG" }, + { EMAC_RX_FS_REG, "EMAC_RX_FS_REG" }, + { EMAC_PHY_INTF_SEL_REG, "EMAC_PHY_INTF_SEL_REG" }, + { EMAC_PHY_ADDR_REG, "EMAC_PHY_ADDR_REG" }, + { EMAC_REVMII_PHY_ADDR_REG, "EMAC_REVMII_PHY_ADDR_REG" }, + { EMAC_CLKMUX_SEL_REG, "EMAC_CLKMUX_SEL_REG" }, +}; + +static int qfec_clk_reg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + struct qfec_clk_reg *p = qfec_clk_regs; + int n = ARRAY_SIZE(qfec_clk_regs); + int l = 0; + int count = PAGE_SIZE; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + for (; n--; p++) { + l += snprintf(&buf[l], count - l, " %8p %8x %08x %s\n", + (void *)priv->clk_base + p->offset, p->offset, + qfec_clkreg_read(priv, p->offset), p->label); + } + + return l; +} + +/* + * speed selection + */ + +struct qfec_pll_cfg { + uint32_t spd; + uint32_t eth_md; /* M [31:16], NOT 2*D [15:0] */ + uint32_t eth_ns; /* NOT(M-N) [31:16], ctl bits [11:0] */ +}; + +static struct qfec_pll_cfg qfec_pll_cfg_tbl[] = { + /* 2.5 MHz */ + { MAC_CONFIG_REG_SPD_10, ETH_MD_M(1) | ETH_MD_2D_N(100), + ETH_NS_NM(100-1) + | ETH_NS_MCNTR_EN + | ETH_NS_MCNTR_MODE_DUAL + | ETH_NS_PRE_DIV(0) + | CLK_SRC_PLL_EMAC }, + /* 25 MHz */ + { MAC_CONFIG_REG_SPD_100, ETH_MD_M(1) | ETH_MD_2D_N(10), + ETH_NS_NM(10-1) + | ETH_NS_MCNTR_EN + | ETH_NS_MCNTR_MODE_DUAL + | ETH_NS_PRE_DIV(0) + | CLK_SRC_PLL_EMAC }, + /* 125 MHz */ + {MAC_CONFIG_REG_SPD_1G, 0, ETH_NS_PRE_DIV(1) + | CLK_SRC_PLL_EMAC }, +}; + +enum speed { + SPD_10 = 0, + SPD_100 = 1, + SPD_1000 = 2, +}; + +/* + * configure the PHY interface and clock routing and signal bits + */ +static int qfec_speed_cfg(struct net_device *dev, unsigned int spd, + unsigned int dplx) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct qfec_pll_cfg *p; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %d spd, %d dplx\n", __func__, spd, dplx); + + if (spd > SPD_1000) { + QFEC_LOG_ERR("%s: range\n", __func__); + return -ENODEV; + } + + p = &qfec_pll_cfg_tbl[spd]; + + /* set the MAC speed bits */ + qfec_reg_write(priv, MAC_CONFIG_REG, + (qfec_reg_read(priv, MAC_CONFIG_REG) + & ~(MAC_CONFIG_REG_SPD | MAC_CONFIG_REG_DM)) + | p->spd | (dplx ? MAC_CONFIG_REG_DM : H_DPLX)); + + qfec_clkreg_write(priv, ETH_MD_REG, p->eth_md); + qfec_clkreg_write(priv, ETH_NS_REG, p->eth_ns); + + return 0; +} + +/* + * configure PTP divider for 25 MHz assuming EMAC PLL 250 MHz + */ + +static struct qfec_pll_cfg qfec_pll_ptp = { + /* 19.2 MHz tcxo */ + 0, 0, ETH_NS_PRE_DIV(0) + | EMAC_PTP_NS_ROOT_EN + | EMAC_PTP_NS_CLK_EN + | CLK_SRC_TCXO +}; + +#define PLLTEST_PAD_CFG 0x01E0 +#define PLLTEST_PLL_7 0x3700 + +#define CLKTEST_REG 0x01EC +#define CLKTEST_EMAC_RX 0x3fc07f7a + +static int qfec_ptp_cfg(struct qfec_priv *priv) +{ + struct qfec_pll_cfg *p = &qfec_pll_ptp; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %08x md, %08x ns\n", + __func__, p->eth_md, p->eth_ns); + + qfec_clkreg_write(priv, EMAC_PTP_MD_REG, p->eth_md); + qfec_clkreg_write(priv, EMAC_PTP_NS_REG, p->eth_ns); + + /* configure HS/LS clk test ports to verify clks */ + qfec_clkreg_write(priv, CLKTEST_REG, CLKTEST_EMAC_RX); + qfec_clkreg_write(priv, PLLTEST_PAD_CFG, PLLTEST_PLL_7); + + return 0; +} + +/* + * MDIO operations + */ + +/* + * wait reasonable amount of time for MDIO operation to complete, not busy + */ +static int qfec_mdio_busy(struct net_device *dev) +{ + int i; + + for (i = 100; i > 0; i--) { + if (!(qfec_reg_read( + netdev_priv(dev), GMII_ADR_REG) & GMII_ADR_REG_GB)) { + return 0; + } + udelay(1); + } + + return -ETIME; +} + +/* + * initiate either a read or write MDIO operation + */ + +static int qfec_mdio_oper(struct net_device *dev, int phy_id, int reg, int wr) +{ + struct qfec_priv *priv = netdev_priv(dev); + int res = 0; + + /* insure phy not busy */ + res = qfec_mdio_busy(dev); + if (res) { + QFEC_LOG_ERR("%s: busy\n", __func__); + goto done; + } + + /* initiate operation */ + qfec_reg_write(priv, GMII_ADR_REG, + GMII_ADR_REG_ADR_SET(phy_id) + | GMII_ADR_REG_REG_SET(reg) + | GMII_ADR_REG_CSR_SET(priv->mdio_clk) + | (wr ? GMII_ADR_REG_GW : 0) + | GMII_ADR_REG_GB); + + /* wait for operation to complete */ + res = qfec_mdio_busy(dev); + if (res) + QFEC_LOG_ERR("%s: timeout\n", __func__); + +done: + return res; +} + +/* + * read MDIO register + */ +static int qfec_mdio_read(struct net_device *dev, int phy_id, int reg) +{ + struct qfec_priv *priv = netdev_priv(dev); + int res = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->mdio_lock, flags); + + res = qfec_mdio_oper(dev, phy_id, reg, 0); + if (res) { + QFEC_LOG_ERR("%s: oper\n", __func__); + goto done; + } + + res = qfec_reg_read(priv, GMII_DATA_REG); + QFEC_LOG(QFEC_LOG_MDIO_R, "%s: %2d reg, 0x%04x val\n", + __func__, reg, res); + +done: + spin_unlock_irqrestore(&priv->mdio_lock, flags); + return res; +} + +/* + * write MDIO register + */ +static void qfec_mdio_write(struct net_device *dev, int phy_id, int reg, + int val) +{ + struct qfec_priv *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->mdio_lock, flags); + + QFEC_LOG(QFEC_LOG_MDIO_W, "%s: %2d reg, %04x\n", + __func__, reg, val); + + qfec_reg_write(priv, GMII_DATA_REG, val); + + if (qfec_mdio_oper(dev, phy_id, reg, 1)) + QFEC_LOG_ERR("%s: oper\n", __func__); + + spin_unlock_irqrestore(&priv->mdio_lock, flags); +} + +/* + * MDIO show + */ +static int qfec_mdio_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int n; + int l = 0; + int count = PAGE_SIZE; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + for (n = 0; n < MAX_MDIO_REG; n++) { + if (!(n % 8)) + l += snprintf(&buf[l], count - l, "\n %02x: ", n); + + l += snprintf(&buf[l], count - l, " %04x", + qfec_mdio_read(to_net_dev(dev), priv->phy_id, n)); + } + l += snprintf(&buf[l], count - l, "\n"); + + return l; +} + +/* + * get auto-negotiation results + */ +#define QFEC_100 (LPA_100HALF | LPA_100FULL | LPA_100HALF) +#define QFEC_100_FD (LPA_100FULL | LPA_100BASE4) +#define QFEC_10 (LPA_10HALF | LPA_10FULL) +#define QFEC_10_FD LPA_10FULL + +static void qfec_get_an(struct net_device *dev, uint32_t *spd, uint32_t *dplx) +{ + struct qfec_priv *priv = netdev_priv(dev); + uint32_t advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE); + uint32_t lpa = qfec_mdio_read(dev, priv->phy_id, MII_LPA); + uint32_t mastCtrl = qfec_mdio_read(dev, priv->phy_id, MII_CTRL1000); + uint32_t mastStat = qfec_mdio_read(dev, priv->phy_id, MII_STAT1000); + uint32_t anExp = qfec_mdio_read(dev, priv->phy_id, MII_EXPANSION); + uint32_t status = advert & lpa; + uint32_t flow; + + if (priv->mii.supports_gmii) { + if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK) + && (mastCtrl & ADVERTISE_1000FULL) + && (mastStat & LPA_1000FULL)) { + *spd = SPD_1000; + *dplx = F_DPLX; + goto pause; + } + + else if (((anExp & QFEC_MII_EXP_MASK) == QFEC_MII_EXP_MASK) + && (mastCtrl & ADVERTISE_1000HALF) + && (mastStat & LPA_1000HALF)) { + *spd = SPD_1000; + *dplx = H_DPLX; + goto pause; + } + } + + /* mii speeds */ + if (status & QFEC_100) { + *spd = SPD_100; + *dplx = status & QFEC_100_FD ? F_DPLX : H_DPLX; + } + + else if (status & QFEC_10) { + *spd = SPD_10; + *dplx = status & QFEC_10_FD ? F_DPLX : H_DPLX; + } + + /* check pause */ +pause: + flow = qfec_reg_read(priv, FLOW_CONTROL_REG); + flow &= ~(FLOW_CONTROL_TFE | FLOW_CONTROL_RFE); + + if (status & ADVERTISE_PAUSE_CAP) { + flow |= FLOW_CONTROL_RFE | FLOW_CONTROL_TFE; + } else if (status & ADVERTISE_PAUSE_ASYM) { + if (lpa & ADVERTISE_PAUSE_CAP) + flow |= FLOW_CONTROL_TFE; + else if (advert & ADVERTISE_PAUSE_CAP) + flow |= FLOW_CONTROL_RFE; + } + + qfec_reg_write(priv, FLOW_CONTROL_REG, flow); +} + +/* + * monitor phy status, and process auto-neg results when changed + */ + +static void qfec_phy_monitor(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct qfec_priv *priv = netdev_priv(dev); + unsigned int spd = H_DPLX; + unsigned int dplx = F_DPLX; + + mod_timer(&priv->phy_tmr, jiffies + HZ); + + if (mii_link_ok(&priv->mii) && !netif_carrier_ok(priv->net_dev)) { + qfec_get_an(dev, &spd, &dplx); + qfec_speed_cfg(dev, spd, dplx); + QFEC_LOG(QFEC_LOG_DBG, "%s: link up, %d spd, %d dplx\n", + __func__, spd, dplx); + + netif_carrier_on(dev); + } + + else if (!mii_link_ok(&priv->mii) && netif_carrier_ok(priv->net_dev)) { + QFEC_LOG(QFEC_LOG_DBG, "%s: link down\n", __func__); + netif_carrier_off(dev); + } +} + +/* + * dealloc buffer descriptor memory + */ + +static void qfec_mem_dealloc(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + + dma_free_coherent(&dev->dev, + priv->bd_size, priv->bd_base, priv->tbd_dma); + priv->bd_base = 0; +} + +/* + * allocate shared device memory for TX/RX buf-desc (and buffers) + */ + +static int qfec_mem_alloc(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev); + + priv->bd_size = + (priv->n_tbd + priv->n_rbd) * sizeof(struct qfec_buf_desc); + + priv->p_tbd = kcalloc(priv->n_tbd, sizeof(struct buf_desc), GFP_KERNEL); + if (!priv->p_tbd) { + QFEC_LOG_ERR("%s: kcalloc failed p_tbd\n", __func__); + return -ENOMEM; + } + + priv->p_rbd = kcalloc(priv->n_rbd, sizeof(struct buf_desc), GFP_KERNEL); + if (!priv->p_rbd) { + QFEC_LOG_ERR("%s: kcalloc failed p_rbd\n", __func__); + return -ENOMEM; + } + + /* alloc mem for buf-desc, if not already alloc'd */ + if (!priv->bd_base) { + priv->bd_base = dma_alloc_coherent(&dev->dev, + priv->bd_size, &priv->tbd_dma, + GFP_KERNEL | __GFP_DMA); + } + + if (!priv->bd_base) { + QFEC_LOG_ERR("%s: dma_alloc_coherent failed\n", __func__); + return -ENOMEM; + } + + priv->rbd_dma = priv->tbd_dma + + (priv->n_tbd * sizeof(struct qfec_buf_desc)); + + QFEC_LOG(QFEC_LOG_DBG, + " %s: 0x%08x size, %d n_tbd, %d n_rbd\n", + __func__, priv->bd_size, priv->n_tbd, priv->n_rbd); + + return 0; +} + +/* + * display buffer descriptors + */ + +static int qfec_bd_fmt(char *buf, int size, struct buf_desc *p_bd) +{ + return snprintf(buf, size, + "%8p: %08x %08x %8p %8p %8p %8p %8p %x", + p_bd, qfec_bd_status_get(p_bd), + qfec_bd_ctl_get(p_bd), qfec_bd_pbuf_get(p_bd), + qfec_bd_next_get(p_bd), qfec_bd_skbuf_get(p_bd), + qfec_bd_virt_get(p_bd), qfec_bd_phys_get(p_bd), + qfec_bd_last_bd(p_bd)); +} + +static int qfec_bd_show(char *buf, int count, struct buf_desc *p_bd, int n_bd, + struct ring *p_ring, char *label) +{ + int l = 0; + int n; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, label); + + l += snprintf(&buf[l], count, "%s: %s\n", __func__, label); + if (!p_bd) + return l; + + n_bd = n_bd > MAX_N_BD ? MAX_N_BD : n_bd; + + for (n = 0; n < n_bd; n++, p_bd++) { + l += qfec_bd_fmt(&buf[l], count - l, p_bd); + l += snprintf(&buf[l], count - l, "%s%s\n", + (qfec_ring_head(p_ring) == n ? " < h" : ""), + (qfec_ring_tail(p_ring) == n ? " < t" : "")); + } + + return l; +} + +/* + * display TX BDs + */ +static int qfec_bd_tx_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + + return qfec_bd_show(buf, count, priv->p_tbd, priv->n_tbd, + &priv->ring_tbd, "TX"); +} + +/* + * display RX BDs + */ +static int qfec_bd_rx_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + + return qfec_bd_show(buf, count, priv->p_rbd, priv->n_rbd, + &priv->ring_rbd, "RX"); +} + +/* + * process timestamp values + * The pbuf and next fields of the buffer descriptors are overwritten + * with the timestamp high and low register values. + * + * The low register is incremented by the value in the subsec_increment + * register and overflows at 0x8000 0000 causing the high register to + * increment. + * + * The subsec_increment register is recommended to be set to the number + * of nanosec corresponding to each clock tic, scaled by 2^31 / 10^9 + * (e.g. 40 * 2^32 / 10^9 = 85.9, or 86 for 25 MHz). However, the + * rounding error in this case will result in a 1 sec error / ~14 mins. + * + * An alternate approach is used. The subsec_increment is set to 1, + * and the concatenation of the 2 timestamp registers used to count + * clock tics. The 63-bit result is manipulated to determine the number + * of sec and ns. + */ + +/* + * convert 19.2 MHz clock tics into sec/ns + */ +#define TS_LOW_REG_BITS 31 + +#define MILLION 1000000UL +#define BILLION 1000000000UL + +#define F_CLK 19200000UL +#define F_CLK_PRE_SC 24 +#define F_CLK_INV_Q 56 +#define F_CLK_INV (((unsigned long long)1 << F_CLK_INV_Q) / F_CLK) +#define F_CLK_TO_NS_Q 25 +#define F_CLK_TO_NS \ + (((((unsigned long long)1<> F_CLK_PRE_SC; + t *= F_CLK_INV; + t >>= F_CLK_INV_Q - F_CLK_PRE_SC; + *sec = t; + + t = *cnt - (t * F_CLK); + subsec = t; + + if (subsec >= F_CLK) { + subsec -= F_CLK; + *sec += 1; + } + + subsec *= F_CLK_TO_NS; + subsec >>= F_CLK_TO_NS_Q; + *ns = subsec; +} + +/* + * read ethernet timestamp registers, pass up raw register values + * and values converted to sec/ns + */ +static void qfec_read_timestamp(struct buf_desc *p_bd, + struct skb_shared_hwtstamps *ts) +{ + unsigned long long cnt; + unsigned int sec; + unsigned int subsec; + + cnt = (unsigned long)qfec_bd_next_get(p_bd); + cnt <<= TS_LOW_REG_BITS; + cnt |= (unsigned long)qfec_bd_pbuf_get(p_bd); + + /* report raw counts as concatenated 63 bits */ + sec = cnt >> 32; + subsec = cnt & 0xffffffff; + + ts->hwtstamp = ktime_set(sec, subsec); + + /* translate counts to sec and ns */ + qfec_get_sec(&cnt, &sec, &subsec); + + ts->syststamp = ktime_set(sec, subsec); +} + +/* + * capture the current system time in the timestamp registers + */ +static int qfec_cmd(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + struct timeval tv; + + if (!strncmp(buf, "setTs", 5)) { + unsigned long long cnt; + uint32_t ts_hi; + uint32_t ts_lo; + unsigned long long subsec; + + do_gettimeofday(&tv); + + /* convert raw sec/usec to ns */ + subsec = tv.tv_usec; + subsec *= US_TO_F_CLK; + subsec >>= US_TO_F_CLK_Q; + + cnt = tv.tv_sec; + cnt *= F_CLK; + cnt += subsec; + + ts_hi = cnt >> 31; + ts_lo = cnt & 0x7FFFFFFF; + + qfec_reg_write(priv, TS_HI_UPDT_REG, ts_hi); + qfec_reg_write(priv, TS_LO_UPDT_REG, ts_lo); + + qfec_reg_write(priv, TS_CTL_REG, + qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSINIT); + } else + pr_err("%s: unknown cmd, %s.\n", __func__, buf); + + return strnlen(buf, count); +} + +/* + * display ethernet tstamp and system time + */ +static int qfec_tstamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + int l; + struct timeval tv; + unsigned long long cnt; + uint32_t sec; + uint32_t ns; + uint32_t ts_hi; + uint32_t ts_lo; + + /* insure that ts_hi didn't increment during read */ + do { + ts_hi = qfec_reg_read(priv, TS_HIGH_REG); + ts_lo = qfec_reg_read(priv, TS_LOW_REG); + } while (ts_hi != qfec_reg_read(priv, TS_HIGH_REG)); + + cnt = ts_hi; + cnt <<= TS_LOW_REG_BITS; + cnt |= ts_lo; + + do_gettimeofday(&tv); + + ts_hi = cnt >> 32; + ts_lo = cnt & 0xffffffff; + + qfec_get_sec(&cnt, &sec, &ns); + + l = snprintf(buf, count, + "%12u.%09u sec 0x%08x 0x%08x tstamp %12u.%06u time-of-day\n", + sec, ns, ts_hi, ts_lo, (int)tv.tv_sec, (int)tv.tv_usec); + + return l; +} + +/* + * free transmitted skbufs from buffer-descriptor no owned by HW + */ +static int qfec_tx_replenish(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct ring *p_ring = &priv->ring_tbd; + struct buf_desc *p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)]; + struct sk_buff *skb; + unsigned long flags; + + CNTR_INC(priv, tx_replenish); + + spin_lock_irqsave(&priv->xmit_lock, flags); + + while (!qfec_ring_empty(p_ring)) { + if (qfec_bd_own(p_bd)) + break; /* done for now */ + + skb = qfec_bd_skbuf_get(p_bd); + if (unlikely(skb == NULL)) { + QFEC_LOG_ERR("%s: null sk_buff\n", __func__); + CNTR_INC(priv, tx_skb_null); + break; + } + + qfec_reg_write(priv, STATUS_REG, + STATUS_REG_TU | STATUS_REG_TI); + + /* retrieve timestamp if requested */ + if (qfec_bd_status_get(p_bd) & BUF_TX_TTSS) { + CNTR_INC(priv, ts_tx_rtn); + qfec_read_timestamp(p_bd, skb_hwtstamps(skb)); + skb_tstamp_tx(skb, skb_hwtstamps(skb)); + } + + /* update statistics before freeing skb */ + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + dma_unmap_single(&dev->dev, (dma_addr_t) qfec_bd_pbuf_get(p_bd), + skb->len, DMA_TO_DEVICE); + + dev_kfree_skb_any(skb); + qfec_bd_skbuf_set(p_bd, NULL); + + qfec_ring_tail_adv(p_ring); + p_bd = &priv->p_tbd[qfec_ring_tail(p_ring)]; + } + + spin_unlock_irqrestore(&priv->xmit_lock, flags); + + qfec_queue_start(dev); + + return 0; +} + +/* + * clear ownership bits of all TX buf-desc and release the sk-bufs + */ +static void qfec_tx_timeout(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct buf_desc *bd = priv->p_tbd; + int n; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + CNTR_INC(priv, tx_timeout); + + for (n = 0; n < priv->n_tbd; n++, bd++) + qfec_bd_own_clr(bd); + + qfec_tx_replenish(dev); +} + +/* + * rx() - process a received frame + */ +static void qfec_rx_int(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct ring *p_ring = &priv->ring_rbd; + struct buf_desc *p_bd = priv->p_latest_rbd; + uint32_t desc_status; + uint32_t mis_fr_reg; + + desc_status = qfec_bd_status_get(p_bd); + mis_fr_reg = qfec_reg_read(priv, MIS_FR_REG); + + CNTR_INC(priv, rx_int); + + /* check that valid interrupt occurred */ + if (unlikely(desc_status & BUF_OWN)) + return; + + /* accumulate missed-frame count (reg reset when read) */ + priv->stats.rx_missed_errors += mis_fr_reg + & MIS_FR_REG_MISS_CNT; + + /* process all unowned frames */ + while (!(desc_status & BUF_OWN) && (!qfec_ring_full(p_ring))) { + struct sk_buff *skb; + struct buf_desc *p_bd_next; + + skb = qfec_bd_skbuf_get(p_bd); + + if (unlikely(skb == NULL)) { + QFEC_LOG_ERR("%s: null sk_buff\n", __func__); + CNTR_INC(priv, rx_skb_null); + break; + } + + /* cache coherency before skb->data is accessed */ + dma_unmap_single(&dev->dev, + (dma_addr_t) qfec_bd_phys_get(p_bd), + ETH_BUF_SIZE, DMA_FROM_DEVICE); + prefetch(skb->data); + + if (unlikely(desc_status & BUF_RX_ES)) { + priv->stats.rx_dropped++; + CNTR_INC(priv, rx_dropped); + dev_kfree_skb(skb); + } else { + qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI); + + skb->len = BUF_RX_FL_GET_FROM_STATUS(desc_status); + + if (priv->state & timestamping) { + CNTR_INC(priv, ts_rec); + qfec_read_timestamp(p_bd, skb_hwtstamps(skb)); + } + + /* update statistics before freeing skb */ + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len; + + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (NET_RX_DROP == netif_rx(skb)) { + priv->stats.rx_dropped++; + CNTR_INC(priv, rx_dropped); + } + CNTR_INC(priv, netif_rx_cntr); + } + + if (p_bd != priv->p_ending_rbd) + p_bd_next = p_bd + 1; + else + p_bd_next = priv->p_rbd; + desc_status = qfec_bd_status_get(p_bd_next); + + qfec_bd_skbuf_set(p_bd, NULL); + + qfec_ring_head_adv(p_ring); + p_bd = p_bd_next; + } + + priv->p_latest_rbd = p_bd; + + /* replenish bufs */ + while (!qfec_ring_empty(p_ring)) { + if (qfec_rbd_init(dev, &priv->p_rbd[qfec_ring_tail(p_ring)])) + break; + qfec_ring_tail_adv(p_ring); + } + + qfec_reg_write(priv, STATUS_REG, STATUS_REG_RI); +} + +/* + * isr() - interrupt service routine + * determine cause of interrupt and invoke/schedule appropriate + * processing or error handling + */ +#define ISR_ERR_CHK(priv, status, interrupt, cntr) \ + if (status & interrupt) \ + CNTR_INC(priv, cntr) + +static irqreturn_t qfec_int(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct qfec_priv *priv = netdev_priv(dev); + uint32_t status = qfec_reg_read(priv, STATUS_REG); + uint32_t int_bits = STATUS_REG_NIS | STATUS_REG_AIS; + + QFEC_LOG(QFEC_LOG_DBG2, "%s: %s\n", __func__, dev->name); + + /* abnormal interrupt */ + if (status & STATUS_REG_AIS) { + QFEC_LOG(QFEC_LOG_DBG, "%s: abnormal status 0x%08x\n", + __func__, status); + + ISR_ERR_CHK(priv, status, STATUS_REG_RU, rx_buf_unavail); + ISR_ERR_CHK(priv, status, STATUS_REG_FBI, fatal_bus); + + ISR_ERR_CHK(priv, status, STATUS_REG_RWT, rx_watchdog); + ISR_ERR_CHK(priv, status, STATUS_REG_RPS, rx_proc_stopped); + ISR_ERR_CHK(priv, status, STATUS_REG_UNF, tx_underflow); + + ISR_ERR_CHK(priv, status, STATUS_REG_OVF, rx_overflow); + ISR_ERR_CHK(priv, status, STATUS_REG_TJT, tx_jabber_tmout); + ISR_ERR_CHK(priv, status, STATUS_REG_TPS, tx_proc_stopped); + + int_bits |= STATUS_REG_AIS_BITS; + CNTR_INC(priv, abnorm_int); + } + + if (status & STATUS_REG_NIS) + CNTR_INC(priv, norm_int); + + /* receive interrupt */ + if (status & STATUS_REG_RI) { + CNTR_INC(priv, rx_isr); + qfec_rx_int(dev); + } + + /* transmit interrupt */ + if (status & STATUS_REG_TI) { + CNTR_INC(priv, tx_isr); + qfec_tx_replenish(dev); + } + + /* gmac interrupt */ + if (status & (STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI)) { + status &= ~(STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI); + CNTR_INC(priv, gmac_isr); + int_bits |= STATUS_REG_GPI | STATUS_REG_GMI | STATUS_REG_GLI; + qfec_reg_read(priv, SG_RG_SMII_STATUS_REG); + } + + /* clear interrupts */ + qfec_reg_write(priv, STATUS_REG, int_bits); + CNTR_INC(priv, isr); + + return IRQ_HANDLED; +} + +/* + * open () - register system resources (IRQ, DMA, ...) + * turn on HW, perform device setup. + */ +static int qfec_open(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct buf_desc *p_bd; + struct ring *p_ring; + struct qfec_buf_desc *p_desc; + int n; + int res = 0; + + QFEC_LOG(QFEC_LOG_DBG, "%s: %p dev\n", __func__, dev); + + if (!dev) { + res = -EINVAL; + goto err; + } + + /* allocate TX/RX buffer-descriptors and buffers */ + + res = qfec_mem_alloc(dev); + if (res) + goto err; + + /* initialize TX */ + p_desc = priv->bd_base; + + for (n = 0, p_bd = priv->p_tbd; n < priv->n_tbd; n++, p_bd++) { + p_bd->p_desc = p_desc++; + + if (n == (priv->n_tbd - 1)) + qfec_bd_last_bd_set(p_bd); + + qfec_bd_own_clr(p_bd); /* clear ownership */ + } + + qfec_ring_init(&priv->ring_tbd, priv->n_tbd, priv->n_tbd); + + priv->tx_ic_mod = priv->n_tbd / TX_BD_TI_RATIO; + if (priv->tx_ic_mod == 0) + priv->tx_ic_mod = 1; + + /* initialize RX buffer descriptors and allocate sk_bufs */ + p_ring = &priv->ring_rbd; + qfec_ring_init(p_ring, priv->n_rbd, 0); + qfec_bd_last_bd_set(&priv->p_rbd[priv->n_rbd - 1]); + + for (n = 0, p_bd = priv->p_rbd; n < priv->n_rbd; n++, p_bd++) { + p_bd->p_desc = p_desc++; + + if (qfec_rbd_init(dev, p_bd)) + break; + qfec_ring_tail_adv(p_ring); + } + + priv->p_latest_rbd = priv->p_rbd; + priv->p_ending_rbd = priv->p_rbd + priv->n_rbd - 1; + + /* config ptp clock */ + qfec_ptp_cfg(priv); + + /* configure PHY - must be set before reset/hw_init */ + priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii); + if (priv->mii.supports_gmii) { + QFEC_LOG_ERR("%s: RGMII\n", __func__); + qfec_intf_sel(priv, INTFC_RGMII); + } else { + QFEC_LOG_ERR("%s: MII\n", __func__); + qfec_intf_sel(priv, INTFC_MII); + } + + /* initialize controller after BDs allocated */ + res = qfec_hw_init(priv); + if (res) + goto err1; + + /* get/set (primary) MAC address */ + qfec_set_adr_regs(priv, dev->dev_addr); + qfec_set_rx_mode(dev); + + /* start phy monitor */ + QFEC_LOG(QFEC_LOG_DBG, " %s: start timer\n", __func__); + netif_carrier_off(priv->net_dev); + setup_timer(&priv->phy_tmr, qfec_phy_monitor, (unsigned long)dev); + mod_timer(&priv->phy_tmr, jiffies + HZ); + + /* driver supports AN capable PHY only */ + qfec_mdio_write(dev, priv->phy_id, MII_BMCR, BMCR_RESET); + res = (BMCR_ANENABLE|BMCR_ANRESTART); + qfec_mdio_write(dev, priv->phy_id, MII_BMCR, res); + + /* initialize interrupts */ + QFEC_LOG(QFEC_LOG_DBG, " %s: request irq %d\n", __func__, dev->irq); + res = request_irq(dev->irq, qfec_int, 0, dev->name, dev); + if (res) + goto err1; + + /* enable controller */ + qfec_hw_enable(priv); + netif_start_queue(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s: %08x link, %08x carrier\n", __func__, + mii_link_ok(&priv->mii), netif_carrier_ok(priv->net_dev)); + + QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__); + return 0; + +err1: + qfec_mem_dealloc(dev); +err: + QFEC_LOG_ERR("%s: error - %d\n", __func__, res); + return res; +} + +/* + * stop() - "reverse operations performed at open time" + */ +static int qfec_stop(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct buf_desc *p_bd; + struct sk_buff *skb; + int n; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + del_timer_sync(&priv->phy_tmr); + + qfec_hw_disable(priv); + qfec_queue_stop(dev); + free_irq(dev->irq, dev); + + /* free all pending sk_bufs */ + for (n = priv->n_rbd, p_bd = priv->p_rbd; n > 0; n--, p_bd++) { + skb = qfec_bd_skbuf_get(p_bd); + if (skb) + dev_kfree_skb(skb); + } + + for (n = priv->n_tbd, p_bd = priv->p_tbd; n > 0; n--, p_bd++) { + skb = qfec_bd_skbuf_get(p_bd); + if (skb) + dev_kfree_skb(skb); + } + + qfec_mem_dealloc(dev); + + QFEC_LOG(QFEC_LOG_DBG, " %s: done\n", __func__); + + return 0; +} + +static int qfec_set_config(struct net_device *dev, struct ifmap *map) +{ + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + return 0; +} + +/* + * pass data from skbuf to buf-desc + */ +static int qfec_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct ring *p_ring = &priv->ring_tbd; + struct buf_desc *p_bd; + uint32_t ctrl = 0; + int ret = NETDEV_TX_OK; + unsigned long flags; + + CNTR_INC(priv, xmit); + + spin_lock_irqsave(&priv->xmit_lock, flags); + + /* If there is no room, on the ring try to free some up */ + if (qfec_ring_room(p_ring) == 0) + qfec_tx_replenish(dev); + + /* stop queuing if no resources available */ + if (qfec_ring_room(p_ring) == 0) { + qfec_queue_stop(dev); + CNTR_INC(priv, tx_no_resource); + + ret = NETDEV_TX_BUSY; + goto done; + } + + /* locate and save *sk_buff */ + p_bd = &priv->p_tbd[qfec_ring_head(p_ring)]; + qfec_bd_skbuf_set(p_bd, skb); + + /* set DMA ptr to sk_buff data and write cache to memory */ + qfec_bd_pbuf_set(p_bd, (void *) + dma_map_single(&dev->dev, + (void *)skb->data, skb->len, DMA_TO_DEVICE)); + + ctrl = skb->len; + if (!(qfec_ring_head(p_ring) % priv->tx_ic_mod)) + ctrl |= BUF_TX_IC; /* interrupt on complete */ + + /* check if timestamping enabled and requested */ + if (priv->state & timestamping) { + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + CNTR_INC(priv, ts_tx_en); + ctrl |= BUF_TX_IC; /* interrupt on complete */ + ctrl |= BUF_TX_TTSE; /* enable timestamp */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + } + + if (qfec_bd_last_bd(p_bd)) + ctrl |= BUF_RX_RER; + + /* no gather, no multi buf frames */ + ctrl |= BUF_TX_FS | BUF_TX_LS; /* 1st and last segment */ + + qfec_bd_ctl_wr(p_bd, ctrl); + qfec_bd_status_set(p_bd, BUF_OWN); + + qfec_ring_head_adv(p_ring); + qfec_reg_write(priv, TX_POLL_DEM_REG, 1); /* poll */ + +done: + spin_unlock_irqrestore(&priv->xmit_lock, flags); + + return ret; +} + +static int qfec_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct hwtstamp_config *cfg = (struct hwtstamp_config *) ifr; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + if (cmd == SIOCSHWTSTAMP) { + CNTR_INC(priv, ts_ioctl); + QFEC_LOG(QFEC_LOG_DBG, + "%s: SIOCSHWTSTAMP - %x flags %x tx %x rx\n", + __func__, cfg->flags, cfg->tx_type, cfg->rx_filter); + + cfg->flags = 0; + cfg->tx_type = HWTSTAMP_TX_ON; + cfg->rx_filter = HWTSTAMP_FILTER_ALL; + + priv->state |= timestamping; + qfec_reg_write(priv, TS_CTL_REG, + qfec_reg_read(priv, TS_CTL_REG) | TS_CTL_TSENALL); + + return 0; + } + + return generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL); +} + +static struct net_device_stats *qfec_get_stats(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG2, "qfec_stats:\n"); + + priv->stats.multicast = qfec_reg_read(priv, NUM_MULTCST_FRM_RCVD_G); + + return &priv->stats; +} + +/* + * accept new mac address + */ +static int qfec_set_mac_address(struct net_device *dev, void *p) +{ + struct qfec_priv *priv = netdev_priv(dev); + struct sockaddr *addr = p; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + qfec_set_adr_regs(priv, dev->dev_addr); + + return 0; +} + +/* + * read discontinuous MAC address from corrected fuse memory region + */ + +static int qfec_get_mac_address(char *buf, char *mac_base, int nBytes) +{ + static int offset[] = { 0, 1, 2, 3, 4, 8 }; + int n; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + for (n = 0; n < nBytes; n++) + buf[n] = ioread8(mac_base + offset[n]); + + /* check that MAC programmed */ + if ((buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5]) == 0) { + QFEC_LOG_ERR("%s: null MAC address\n", __func__); + return -ENODATA; + } + + return 0; +} + +/* + * static definition of driver functions + */ +static const struct net_device_ops qfec_netdev_ops = { + .ndo_open = qfec_open, + .ndo_stop = qfec_stop, + .ndo_start_xmit = qfec_xmit, + + .ndo_do_ioctl = qfec_do_ioctl, + .ndo_tx_timeout = qfec_tx_timeout, + .ndo_set_mac_address = qfec_set_mac_address, + .ndo_set_rx_mode = qfec_set_rx_mode, + + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + + .ndo_get_stats = qfec_get_stats, + .ndo_set_config = qfec_set_config, +}; + +/* + * ethtool functions + */ + +static int qfec_nway_reset(struct net_device *dev) +{ + struct qfec_priv *priv = netdev_priv(dev); + return mii_nway_restart(&priv->mii); +} + +/* + * speed, duplex, auto-neg settings + */ +static void qfec_ethtool_getpauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct qfec_priv *priv = netdev_priv(dev); + u32 flow = qfec_reg_read(priv, FLOW_CONTROL_REG); + u32 advert; + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + /* report current settings */ + pp->tx_pause = (flow & FLOW_CONTROL_TFE) != 0; + pp->rx_pause = (flow & FLOW_CONTROL_RFE) != 0; + + /* report if pause is being advertised */ + advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE); + pp->autoneg = + (advert & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; +} + +static int qfec_ethtool_setpauseparam(struct net_device *dev, + struct ethtool_pauseparam *pp) +{ + struct qfec_priv *priv = netdev_priv(dev); + u32 advert; + + QFEC_LOG(QFEC_LOG_DBG, "%s: %d aneg, %d rx, %d tx\n", __func__, + pp->autoneg, pp->rx_pause, pp->tx_pause); + + advert = qfec_mdio_read(dev, priv->phy_id, MII_ADVERTISE); + advert &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + /* If pause autonegotiation is enabled, but both rx and tx are not + * because neither was specified in the ethtool cmd, + * enable both symetrical and asymetrical pause. + * otherwise, only enable the pause mode indicated by rx/tx. + */ + if (pp->autoneg) { + if (pp->rx_pause) + advert |= ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP; + else if (pp->tx_pause) + advert |= ADVERTISE_PAUSE_ASYM; + else + advert |= ADVERTISE_PAUSE_CAP; + } + + qfec_mdio_write(dev, priv->phy_id, MII_ADVERTISE, advert); + + return 0; +} + +/* + * ethtool ring parameter (-g/G) support + */ + +/* + * setringparamam - change the tx/rx ring lengths + */ +#define MIN_RING_SIZE 3 +#define MAX_RING_SIZE 1000 +static int qfec_ethtool_setringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qfec_priv *priv = netdev_priv(dev); + u32 timeout = 20; + + /* notify stack the link is down */ + netif_carrier_off(dev); + + /* allow tx to complete & free skbufs on the tx ring */ + do { + usleep_range(10000, 100000); + qfec_tx_replenish(dev); + + if (timeout-- == 0) { + QFEC_LOG_ERR("%s: timeout\n", __func__); + return -ETIME; + } + } while (!qfec_ring_empty(&priv->ring_tbd)); + + + qfec_stop(dev); + + /* set tx ring size */ + if (ring->tx_pending < MIN_RING_SIZE) + ring->tx_pending = MIN_RING_SIZE; + else if (ring->tx_pending > MAX_RING_SIZE) + ring->tx_pending = MAX_RING_SIZE; + priv->n_tbd = ring->tx_pending; + + /* set rx ring size */ + if (ring->rx_pending < MIN_RING_SIZE) + ring->rx_pending = MIN_RING_SIZE; + else if (ring->rx_pending > MAX_RING_SIZE) + ring->rx_pending = MAX_RING_SIZE; + priv->n_rbd = ring->rx_pending; + + + qfec_open(dev); + + return 0; +} + +/* + * getringparamam - returns local values + */ +static void qfec_ethtool_getringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + ring->rx_max_pending = MAX_RING_SIZE; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->tx_max_pending = MAX_RING_SIZE; + + ring->rx_pending = priv->n_rbd; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; + ring->tx_pending = priv->n_tbd; +} + +/* + * speed, duplex, auto-neg settings + */ +static int +qfec_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + cmd->maxrxpkt = priv->n_rbd; + cmd->maxtxpkt = priv->n_tbd; + + return mii_ethtool_gset(&priv->mii, cmd); +} + +static int +qfec_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + return mii_ethtool_sset(&priv->mii, cmd); +} + +/* + * msg/debug level + */ +static u32 qfec_ethtool_getmsglevel(struct net_device *dev) +{ + return qfec_debug; +} + +static void qfec_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + qfec_debug ^= level; /* toggle on/off */ +} + +/* + * register dump + */ +#define DMA_DMP_OFFSET 0x0000 +#define DMA_REG_OFFSET 0x1000 +#define DMA_REG_LEN 23 + +#define MAC_DMP_OFFSET 0x0080 +#define MAC_REG_OFFSET 0x0000 +#define MAC_REG_LEN 55 + +#define TS_DMP_OFFSET 0x0180 +#define TS_REG_OFFSET 0x0700 +#define TS_REG_LEN 15 + +#define MDIO_DMP_OFFSET 0x0200 +#define MDIO_REG_LEN 16 + +#define REG_SIZE (MDIO_DMP_OFFSET + (MDIO_REG_LEN * sizeof(short))) + +static int qfec_ethtool_getregs_len(struct net_device *dev) +{ + return REG_SIZE; +} + +static void +qfec_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct qfec_priv *priv = netdev_priv(dev); + u32 *data = buf; + u16 *data16; + unsigned int i; + unsigned int j; + unsigned int n; + + memset(buf, 0, REG_SIZE); + + j = DMA_DMP_OFFSET / sizeof(u32); + for (i = DMA_REG_OFFSET, n = DMA_REG_LEN; n--; i += sizeof(u32)) + data[j++] = htonl(qfec_reg_read(priv, i)); + + j = MAC_DMP_OFFSET / sizeof(u32); + for (i = MAC_REG_OFFSET, n = MAC_REG_LEN; n--; i += sizeof(u32)) + data[j++] = htonl(qfec_reg_read(priv, i)); + + j = TS_DMP_OFFSET / sizeof(u32); + for (i = TS_REG_OFFSET, n = TS_REG_LEN; n--; i += sizeof(u32)) + data[j++] = htonl(qfec_reg_read(priv, i)); + + data16 = (u16 *)&data[MDIO_DMP_OFFSET / sizeof(u32)]; + for (i = 0, n = 0; i < MDIO_REG_LEN; i++) + data16[n++] = htons(qfec_mdio_read(dev, 0, i)); + + regs->len = REG_SIZE; + + QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, regs->len); +} + +/* + * statistics + * return counts of various ethernet activity. + * many of these are same as in struct net_device_stats + * + * missed-frames indicates the number of attempts made by the ethernet + * controller to write to a buffer-descriptor when the BD ownership + * bit was not set. The rxfifooverflow counter (0x1D4) is not + * available. The Missed Frame and Buffer Overflow Counter register + * (0x1020) is used, but has only 16-bits and is reset when read. + * It is read and updates the value in priv->stats.rx_missed_errors + * in qfec_rx_int(). + */ +static char qfec_stats_strings[][ETH_GSTRING_LEN] = { + "TX good/bad Bytes ", + "TX Bytes ", + "TX good/bad Frames ", + "TX Bcast Frames ", + "TX Mcast Frames ", + "TX Unicast Frames ", + "TX Pause Frames ", + "TX Vlan Frames ", + "TX Frames 64 ", + "TX Frames 65-127 ", + "TX Frames 128-255 ", + "TX Frames 256-511 ", + "TX Frames 512-1023 ", + "TX Frames 1024+ ", + "TX Pause Frames ", + "TX Collisions ", + "TX Late Collisions ", + "TX Excessive Collisions ", + + "RX good/bad Bytes ", + "RX Bytes ", + "RX good/bad Frames ", + "RX Bcast Frames ", + "RX Mcast Frames ", + "RX Unicast Frames ", + "RX Pause Frames ", + "RX Vlan Frames ", + "RX Frames 64 ", + "RX Frames 65-127 ", + "RX Frames 128-255 ", + "RX Frames 256-511 ", + "RX Frames 512-1023 ", + "RX Frames 1024+ ", + "RX Pause Frames ", + "RX Crc error Frames ", + "RX Length error Frames ", + "RX Alignment error Frames ", + "RX Runt Frames ", + "RX Oversize Frames ", + "RX Missed Frames ", + +}; + +static u32 qfec_stats_regs[] = { + + 69, 89, 70, 71, 72, 90, 92, 93, + 73, 74, 75, 76, 77, 78, 92, 84, + 86, 87, + + 97, 98, 96, 99, 100, 113, 116, 118, + 107, 108, 109, 110, 111, 112, 116, 101, + 114, 102, 103, 106 +}; + +static int qfec_stats_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct qfec_priv *priv = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + int l = 0; + int n; + + QFEC_LOG(QFEC_LOG_DBG2, "%s:\n", __func__); + + for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) { + l += snprintf(&buf[l], count - l, " %12u %s\n", + qfec_reg_read(priv, + qfec_stats_regs[n] * sizeof(uint32_t)), + qfec_stats_strings[n]); + } + + return l; +} + +static int qfec_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(qfec_stats_regs) + 1; /* missed frames */ + + default: + return -EOPNOTSUPP; + } +} + +static void qfec_ethtool_getstrings(struct net_device *dev, u32 stringset, + u8 *buf) +{ + QFEC_LOG(QFEC_LOG_DBG, "%s: %d bytes\n", __func__, + sizeof(qfec_stats_strings)); + + memcpy(buf, qfec_stats_strings, sizeof(qfec_stats_strings)); +} + +static void qfec_ethtool_getstats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct qfec_priv *priv = netdev_priv(dev); + int j = 0; + int n; + + for (n = 0; n < ARRAY_SIZE(qfec_stats_regs); n++) + data[j++] = qfec_reg_read(priv, + qfec_stats_regs[n] * sizeof(uint32_t)); + + data[j++] = priv->stats.rx_missed_errors; + + stats->n_stats = j; +} + +static void qfec_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, QFEC_NAME, sizeof(info->driver)); + strlcpy(info->version, QFEC_DRV_VER, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); + + info->eedump_len = 0; + info->regdump_len = qfec_ethtool_getregs_len(dev); +} + +/* + * ethtool ops table + */ +static const struct ethtool_ops qfec_ethtool_ops = { + .nway_reset = qfec_nway_reset, + + .get_settings = qfec_ethtool_getsettings, + .set_settings = qfec_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = qfec_ethtool_getdrvinfo, + .get_msglevel = qfec_ethtool_getmsglevel, + .set_msglevel = qfec_ethtool_setmsglevel, + .get_regs_len = qfec_ethtool_getregs_len, + .get_regs = qfec_ethtool_getregs, + + .get_ringparam = qfec_ethtool_getringparam, + .set_ringparam = qfec_ethtool_setringparam, + + .get_pauseparam = qfec_ethtool_getpauseparam, + .set_pauseparam = qfec_ethtool_setpauseparam, + + .get_sset_count = qfec_get_sset_count, + .get_strings = qfec_ethtool_getstrings, + .get_ethtool_stats = qfec_ethtool_getstats, +}; + +/* + * create sysfs entries + */ +static DEVICE_ATTR(bd_tx, 0444, qfec_bd_tx_show, NULL); +static DEVICE_ATTR(bd_rx, 0444, qfec_bd_rx_show, NULL); +static DEVICE_ATTR(cfg, 0444, qfec_config_show, NULL); +static DEVICE_ATTR(clk_reg, 0444, qfec_clk_reg_show, NULL); +static DEVICE_ATTR(cmd, 0222, NULL, qfec_cmd); +static DEVICE_ATTR(cntrs, 0444, qfec_cntrs_show, NULL); +static DEVICE_ATTR(reg, 0444, qfec_reg_show, NULL); +static DEVICE_ATTR(mdio, 0444, qfec_mdio_show, NULL); +static DEVICE_ATTR(stats, 0444, qfec_stats_show, NULL); +static DEVICE_ATTR(tstamp, 0444, qfec_tstamp_show, NULL); + +static void qfec_sysfs_create(struct net_device *dev) +{ + if (device_create_file(&(dev->dev), &dev_attr_bd_tx) || + device_create_file(&(dev->dev), &dev_attr_bd_rx) || + device_create_file(&(dev->dev), &dev_attr_cfg) || + device_create_file(&(dev->dev), &dev_attr_clk_reg) || + device_create_file(&(dev->dev), &dev_attr_cmd) || + device_create_file(&(dev->dev), &dev_attr_cntrs) || + device_create_file(&(dev->dev), &dev_attr_mdio) || + device_create_file(&(dev->dev), &dev_attr_reg) || + device_create_file(&(dev->dev), &dev_attr_stats) || + device_create_file(&(dev->dev), &dev_attr_tstamp)) + pr_err("qfec_sysfs_create failed to create sysfs files\n"); +} + +/* + * map a specified resource + */ +static int qfec_map_resource(struct platform_device *plat, int resource, + struct resource **priv_res, + void **addr) +{ + struct resource *res; + + QFEC_LOG(QFEC_LOG_DBG, "%s: 0x%x resource\n", __func__, resource); + + /* allocate region to access controller registers */ + *priv_res = res = platform_get_resource(plat, resource, 0); + if (!res) { + QFEC_LOG_ERR("%s: platform_get_resource failed\n", __func__); + return -ENODEV; + } + + res = request_mem_region(res->start, res->end - res->start, QFEC_NAME); + if (!res) { + QFEC_LOG_ERR("%s: request_mem_region failed, %08x %08x\n", + __func__, res->start, res->end - res->start); + return -EBUSY; + } + + *addr = ioremap(res->start, res->end - res->start); + if (!*addr) + return -ENOMEM; + + QFEC_LOG(QFEC_LOG_DBG, " %s: io mapped from %p to %p\n", + __func__, (void *)res->start, *addr); + + return 0; +}; + +/* + * free allocated io regions + */ +static void qfec_free_res(struct resource *res, void *base) +{ + + if (res) { + if (base) + iounmap((void __iomem *)base); + + release_mem_region(res->start, res->end - res->start); + } +}; + +/* + * probe function that obtain configuration info and allocate net_device + */ +static int qfec_probe(struct platform_device *plat) +{ + struct net_device *dev; + struct qfec_priv *priv; + int ret = 0; + + /* allocate device */ + dev = alloc_etherdev(sizeof(struct qfec_priv)); + if (!dev) { + QFEC_LOG_ERR("%s: alloc_etherdev failed\n", __func__); + ret = -ENOMEM; + goto err; + } + + QFEC_LOG(QFEC_LOG_DBG, "%s: %08x dev\n", __func__, (int)dev); + + qfec_dev = dev; + SET_NETDEV_DEV(dev, &plat->dev); + + dev->netdev_ops = &qfec_netdev_ops; + dev->ethtool_ops = &qfec_ethtool_ops; + dev->watchdog_timeo = 2 * HZ; + dev->irq = platform_get_irq(plat, 0); + + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + /* initialize private data */ + priv = (struct qfec_priv *)netdev_priv(dev); + memset((void *)priv, 0, sizeof(priv)); + + priv->net_dev = dev; + platform_set_drvdata(plat, dev); + + priv->n_tbd = TX_BD_NUM; + priv->n_rbd = RX_BD_NUM; + + /* initialize phy structure */ + priv->mii.phy_id_mask = 0x1F; + priv->mii.reg_num_mask = 0x1F; + priv->mii.dev = dev; + priv->mii.mdio_read = qfec_mdio_read; + priv->mii.mdio_write = qfec_mdio_write; + + /* map register regions */ + ret = qfec_map_resource( + plat, IORESOURCE_MEM, &priv->mac_res, &priv->mac_base); + if (ret) { + QFEC_LOG_ERR("%s: IORESOURCE_MEM mac failed\n", __func__); + goto err1; + } + + ret = qfec_map_resource( + plat, IORESOURCE_IO, &priv->clk_res, &priv->clk_base); + if (ret) { + QFEC_LOG_ERR("%s: IORESOURCE_IO clk failed\n", __func__); + goto err2; + } + + ret = qfec_map_resource( + plat, IORESOURCE_DMA, &priv->fuse_res, &priv->fuse_base); + if (ret) { + QFEC_LOG_ERR("%s: IORESOURCE_DMA fuse failed\n", __func__); + goto err3; + } + + /* initialize MAC addr */ + ret = qfec_get_mac_address(dev->dev_addr, priv->fuse_base, + MAC_ADDR_SIZE); + if (ret) + goto err4; + + QFEC_LOG(QFEC_LOG_DBG, "%s: mac %02x:%02x:%02x:%02x:%02x:%02x\n", + __func__, + dev->dev_addr[0], dev->dev_addr[1], + dev->dev_addr[2], dev->dev_addr[3], + dev->dev_addr[4], dev->dev_addr[5]); + + ret = register_netdev(dev); + if (ret) { + QFEC_LOG_ERR("%s: register_netdev failed\n", __func__); + goto err4; + } + + spin_lock_init(&priv->mdio_lock); + spin_lock_init(&priv->xmit_lock); + qfec_sysfs_create(dev); + + return 0; + + /* error handling */ +err4: + qfec_free_res(priv->fuse_res, priv->fuse_base); +err3: + qfec_free_res(priv->clk_res, priv->clk_base); +err2: + qfec_free_res(priv->mac_res, priv->mac_base); +err1: + free_netdev(dev); +err: + QFEC_LOG_ERR("%s: err\n", __func__); + return ret; +} + +/* + * module remove + */ +static int qfec_remove(struct platform_device *plat) +{ + struct net_device *dev = platform_get_drvdata(plat); + struct qfec_priv *priv = netdev_priv(dev); + + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + platform_set_drvdata(plat, NULL); + + qfec_free_res(priv->fuse_res, priv->fuse_base); + qfec_free_res(priv->clk_res, priv->clk_base); + qfec_free_res(priv->mac_res, priv->mac_base); + + unregister_netdev(dev); + free_netdev(dev); + + return 0; +} + +/* + * module support + * the FSM9xxx is not a mobile device does not support power management + */ + +static struct platform_driver qfec_driver = { + .probe = qfec_probe, + .remove = qfec_remove, + .driver = { + .name = QFEC_NAME, + .owner = THIS_MODULE, + }, +}; + +/* + * module init + */ +static int __init qfec_init_module(void) +{ + int res; + + QFEC_LOG(QFEC_LOG_DBG, "%s: %s\n", __func__, qfec_driver.driver.name); + + res = platform_driver_register(&qfec_driver); + + QFEC_LOG(QFEC_LOG_DBG, "%s: %d - platform_driver_register\n", + __func__, res); + + return res; +} + +/* + * module exit + */ +static void __exit qfec_exit_module(void) +{ + QFEC_LOG(QFEC_LOG_DBG, "%s:\n", __func__); + + platform_driver_unregister(&qfec_driver); +} + +MODULE_DESCRIPTION("FSM Network Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Rohit Vaswani "); +MODULE_VERSION("1.0"); + +module_init(qfec_init_module); +module_exit(qfec_exit_module); diff --git a/drivers/net/ethernet/msm/qfec.h b/drivers/net/ethernet/msm/qfec.h new file mode 100644 index 000000000000..525fd9c65cab --- /dev/null +++ b/drivers/net/ethernet/msm/qfec.h @@ -0,0 +1,800 @@ +/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* qualcomm fast Ethernet controller HW description */ + +#ifndef _QFEC_EMAC_H_ +# define _QFEC_EMAC_H_ + +# ifndef __KERNEL__ +# include "stdint.h" +# endif + +# define MskBits(nBits, pos) (((1 << nBits)-1)<> 16) +# define BUF_RX_FL_SET(p, x) \ + (p.status = (p.status & ~BUF_RX_FL) | ((x << 16) & BUF_RX_FL)) +# define BUF_RX_FL_GET_FROM_STATUS(status) \ + (((status) & BUF_RX_FL) >> 16) + +# define BUF_RX_ES 0x00008000 /* error summary */ +# define BUF_RX_DE 0x00004000 /* error descriptor (es) */ +# define BUF_RX_SAF 0x00002000 /* source addr filt fail */ +# define BUF_RX_LE 0x00001000 /* length error */ + +# define BUF_RX_OE 0x00000800 /* overflow error (es) */ +# define BUF_RX_VLAN 0x00000400 /* vlan tag */ +# define BUF_RX_FS 0x00000200 /* first descriptor */ +# define BUF_RX_LS 0x00000100 /* last descriptor */ + +# define BUF_RX_IPC 0x00000080 /* cksum-err/giant-frame (es) */ +# define BUF_RX_LC 0x00000040 /* late collision (es) */ +# define BUF_RX_FT 0x00000020 /* frame type */ +# define BUF_RX_RWT 0x00000010 /* rec watchdog timeout (es) */ + +# define BUF_RX_RE 0x00000008 /* rec error (es) */ +# define BUF_RX_DBE 0x00000004 /* dribble bit err */ +# define BUF_RX_CE 0x00000002 /* crc err (es) */ +# define BUF_RX_CSE 0x00000001 /* checksum err */ + +# define BUF_RX_ERRORS \ + (BUF_RX_DE | BUF_RX_SAF | BUF_RX_LE | BUF_RX_OE \ + | BUF_RX_IPC | BUF_RX_LC | BUF_RX_RWT | BUF_RX_RE \ + | BUF_RX_DBE | BUF_RX_CE | BUF_RX_CSE) + +/* RX buffer control bits */ +# define BUF_RX_DI 0x80000000 /* disable intrp on compl */ +# define BUF_RX_RER 0x02000000 /* rec end of ring */ +# define BUF_RX_RCH 0x01000000 /* 2nd addr chained */ + +# define BUF_RX_SIZ2 0x003ff800 /* buffer 2 size */ +# define BUF_RX_SIZ2_GET(p) ((p.control&BUF_RX_SIZ2) >> 11) + +# define BUF_RX_SIZ 0x000007ff /* rx buf 1 size */ +# define BUF_RX_SIZ_GET(p) (p.ctl&BUF_RX_SIZ) + +/* TX buffer status bits */ +# define BUF_TX_TTSS 0x00020000 /* time stamp status */ +# define BUF_TX_IHE 0x00010000 /* IP hdr err */ + +# define BUF_TX_ES 0x00008000 /* error summary */ +# define BUF_TX_JT 0x00004000 /* jabber timeout (es) */ +# define BUF_TX_FF 0x00002000 /* frame flushed (es) */ +# define BUF_TX_PCE 0x00001000 /* payld cksum err */ + +# define BUF_TX_LOC 0x00000800 /* loss carrier (es) */ +# define BUF_TX_NC 0x00000400 /* no carrier (es) */ +# define BUF_TX_LC 0x00000200 /* late collision (es) */ +# define BUF_TX_EC 0x00000100 /* excessive collision (es) */ + +# define BUF_TX_VLAN 0x00000080 /* VLAN frame */ +# define BUF_TX_CC MskBits(4, 3) /* collision count */ +# define BUF_TX_CC_GET(p) ((p.status&BUF_TX_CC)>>3) + +# define BUF_TX_ED 0x00000004 /* excessive deferral (es) */ +# define BUF_TX_UF 0x00000002 /* underflow err (es) */ +# define BUF_TX_DB 0x00000001 /* deferred bit */ + +/* TX buffer control bits */ +# define BUF_TX_IC 0x80000000 /* intrpt on compl */ +# define BUF_TX_LS 0x40000000 /* last segment */ +# define BUF_TX_FS 0x20000000 /* first segment */ +# define BUF_TX_CIC 0x18000000 /* cksum insert control */ +# define BUF_TX_CIC_SET(n) (BUF_TX_CIC&(n<<27)) + +# define BUF_TX_DC 0x04000000 /* disable CRC */ +# define BUF_TX_TER 0x02000000 /* end of ring */ +# define BUF_TX_TCH 0x01000000 /* 2nd addr chained */ + +# define BUF_TX_DP 0x00800000 /* disable padding */ +# define BUF_TX_TTSE 0x00400000 /* timestamp enable */ + +# define BUF_TX_SIZ2 0x003ff800 /* buffer 2 size */ +# define BUF_TX_SIZ2_SET(n) (BUF_TX_SIZ2(n<<11)) + +# define BUF_TX_SIZ 0x000007ff /* buffer 1 size */ +# define BUF_TX_SIZ_SET(n) (BUF_TX_SI1 & n) + + +/* Ethernet Controller Registers */ +# define BUS_MODE_REG 0x1000 + +# define BUS_MODE_MB 0x04000000 /* mixed burst */ +# define BUS_MODE_AAL 0x02000000 /* address alignment beats */ +# define BUS_MODE_8XPBL 0x01000000 /* */ + +# define BUS_MODE_USP 0x00800000 /* use separate PBL */ +# define BUS_MODE_RPBL 0x007e0000 /* rxDMA PBL */ +# define BUS_MODE_FB 0x00010000 /* fixed burst */ + +# define BUS_MODE_PR 0x0000c000 /* tx/rx priority */ +# define BUS_MODE_PR4 0x0000c000 /* tx/rx priority 4:1 */ +# define BUS_MODE_PR3 0x00008000 /* tx/rx priority 3:1 */ +# define BUS_MODE_PR2 0x00004000 /* tx/rx priority 2:1 */ +# define BUS_MODE_PR1 0x00000000 /* tx/rx priority 1:1 */ + +# define BUS_MODE_PBL 0x00003f00 /* programmable burst length */ +# define BUS_MODE_PBLSET(n) (BUS_MODE_PBL&(n<<8)) + +# define BUS_MODE_DSL 0x0000007c /* descriptor skip length */ +# define BUS_MODE_DSL_SET(n) (BUS_MODE_DSL & (n << 2)) + +# define BUS_MODE_DA 0x00000002 /* DMA arbitration scheme */ +# define BUS_MODE_SWR 0x00000001 /* software reset */ + +#define BUS_MODE_REG_DEFAULT (BUS_MODE_FB \ + | BUS_MODE_AAL \ + | BUS_MODE_PBLSET(16) \ + | BUS_MODE_DA \ + | BUS_MODE_DSL_SET(0)) + +# define TX_POLL_DEM_REG 0x1004 /* transmit poll demand */ +# define RX_POLL_DEM_REG 0x1008 /* receive poll demand */ + +# define RX_DES_LST_ADR_REG 0x100c /* receive buffer descriptor */ +# define TX_DES_LST_ADR_REG 0x1010 /* transmit buffer descriptor */ + +# define STATUS_REG 0x1014 + +# define STATUS_REG_RSVRD_1 0xc0000000 /* reserved */ +# define STATUS_REG_TTI 0x20000000 /* time-stamp trigger intrpt */ +# define STATUS_REG_GPI 0x10000000 /* gmac PMT interrupt */ + +# define STATUS_REG_GMI 0x08000000 /* gmac MMC interrupt */ +# define STATUS_REG_GLI 0x04000000 /* gmac line interface intrpt */ + +# define STATUS_REG_EB 0x03800000 /* error bits */ +# define STATUS_REG_EB_DATA 0x00800000 /* error during data transfer */ +# define STATUS_REG_EB_RDWR 0x01000000 /* error during rd/wr transfer */ +# define STATUS_REG_EB_DESC 0x02000000 /* error during desc access */ + +# define STATUS_REG_TS 0x00700000 /* transmit process state */ + +# define STATUS_REG_TS_STOP 0x00000000 /* stopped */ +# define STATUS_REG_TS_FETCH_DESC 0x00100000 /* fetching descriptor */ +# define STATUS_REG_TS_WAIT 0x00200000 /* waiting for status */ +# define STATUS_REG_TS_READ 0x00300000 /* reading host memory */ +# define STATUS_REG_TS_TIMESTAMP 0x00400000 /* timestamp write status */ +# define STATUS_REG_TS_RSVRD 0x00500000 /* reserved */ +# define STATUS_REG_TS_SUSPEND 0x00600000 /* desc-unavail/buffer-unflw */ +# define STATUS_REG_TS_CLOSE 0x00700000 /* closing desc */ + +# define STATUS_REG_RS 0x000e0000 /* receive process state */ + +# define STATUS_REG_RS_STOP 0x00000000 /* stopped */ +# define STATUS_REG_RS_FETCH_DESC 0x00020000 /* fetching descriptor */ +# define STATUS_REG_RS_RSVRD_1 0x00040000 /* reserved */ +# define STATUS_REG_RS_WAIT 0x00060000 /* waiting for packet */ +# define STATUS_REG_RS_SUSPEND 0x00080000 /* desc unavail */ +# define STATUS_REG_RS_CLOSE 0x000a0000 /* closing desc */ +# define STATUS_REG_RS_TIMESTAMP 0x000c0000 /* timestamp write status */ +# define STATUS_REG_RS_RSVRD_2 0x000e0000 /* writing host memory */ + +# define STATUS_REG_NIS 0x00010000 /* normal intrpt 14|6|2|0 */ +# define STATUS_REG_AIS 0x00008000 /* intrpts 13|10|9|8|7|5|4|3|1 */ + +# define STATUS_REG_ERI 0x00004000 /* early receive interrupt */ +# define STATUS_REG_FBI 0x00002000 /* fatal bus error interrupt */ +# define STATUS_REG_RSVRD_2 0x00001800 /* reserved */ + +# define STATUS_REG_ETI 0x00000400 /* early transmit interrupt */ +# define STATUS_REG_RWT 0x00000200 /* receive watchdog timeout */ +# define STATUS_REG_RPS 0x00000100 /* receive process stopped */ + +# define STATUS_REG_RU 0x00000080 /* receive buffer unavailable */ +# define STATUS_REG_RI 0x00000040 /* receive interrupt */ +# define STATUS_REG_UNF 0x00000020 /* transmit underflow */ +# define STATUS_REG_OVF 0x00000010 /* receive overflow */ + +# define STATUS_REG_TJT 0x00000008 /* transmit jabber timeout */ +# define STATUS_REG_TU 0x00000004 /* transmit buffer unavailable */ +# define STATUS_REG_TPS 0x00000002 /* transmit process stopped */ +# define STATUS_REG_TI 0x00000001 /* transmit interrupt */ + +# define STATUS_REG_AIS_BITS (STATUS_REG_FBI | STATUS_REG_ETI \ + | STATUS_REG_RWT | STATUS_REG_RPS \ + | STATUS_REG_RU | STATUS_REG_UNF \ + | STATUS_REG_OVF | STATUS_REG_TJT \ + | STATUS_REG_TPS | STATUS_REG_AIS) + +# define OPER_MODE_REG 0x1018 + +# define OPER_MODE_REG_DT 0x04000000 /* disab drop ip cksum err fr */ +# define OPER_MODE_REG_RSF 0x02000000 /* rec store and forward */ +# define OPER_MODE_REG_DFF 0x01000000 /* disable flush of rec frames */ + +# define OPER_MODE_REG_RFA2 0x00800000 /* thresh MSB for act flow-ctl */ +# define OPER_MODE_REG_RFD2 0x00400000 /* thresh MSB deAct flow-ctl */ +# define OPER_MODE_REG_TSF 0x00200000 /* tx store and forward */ +# define OPER_MODE_REG_FTF 0x00100000 /* flush tx FIFO */ + +# define OPER_MODE_REG_RSVD1 0x000e0000 /* reserved */ +# define OPER_MODE_REG_TTC 0x0001c000 /* transmit threshold control */ +# define OPER_MODE_REG_TTC_SET(x) (OPER_MODE_REG_TTC & (x << 14)) +# define OPER_MODE_REG_ST 0x00002000 /* start/stop transmission cmd */ + +# define OPER_MODE_REG_RFD 0x00001800 /* thresh for deAct flow-ctl */ +# define OPER_MODE_REG_RFA 0x00000600 /* threshold for act flow-ctl */ +# define OPER_MODE_REG_EFC 0x00000100 /* enable HW flow-ctl */ + +# define OPER_MODE_REG_FEF 0x00000080 /* forward error frames */ +# define OPER_MODE_REG_FUF 0x00000040 /* forward undersize good fr */ +# define OPER_MODE_REG_RSVD2 0x00000020 /* reserved */ +# define OPER_MODE_REG_RTC 0x00000018 /* receive threshold control */ +# define OPER_MODE_REG_RTC_SET(x) (OPER_MODE_REG_RTC & (x << 3)) + +# define OPER_MODE_REG_OSF 0x00000004 /* operate on second frame */ +# define OPER_MODE_REG_SR 0x00000002 /* start/stop receive */ +# define OPER_MODE_REG_RSVD3 0x00000001 /* reserved */ + + +#define OPER_MODE_REG_DEFAULT (OPER_MODE_REG_RSF \ + | OPER_MODE_REG_TSF \ + | OPER_MODE_REG_TTC_SET(5) \ + | OPER_MODE_REG_RTC_SET(1) \ + | OPER_MODE_REG_OSF) + +# define INTRP_EN_REG 0x101c + +# define INTRP_EN_REG_RSVD1 0xfffc0000 /* */ +# define INTRP_EN_REG_NIE 0x00010000 /* normal intrpt summ enable */ + +# define INTRP_EN_REG_AIE 0x00008000 /* abnormal intrpt summary en */ +# define INTRP_EN_REG_ERE 0x00004000 /* early receive intrpt enable */ +# define INTRP_EN_REG_FBE 0x00002000 /* fatal bus error enable */ + +# define INTRP_EN_REG_RSVD2 0x00001800 /* */ + +# define INTRP_EN_REG_ETE 0x00000400 /* early tx intrpt enable */ +# define INTRP_EN_REG_RWE 0x00000200 /* rx watchdog timeout enable */ +# define INTRP_EN_REG_RSE 0x00000100 /* rx stopped enable */ + +# define INTRP_EN_REG_RUE 0x00000080 /* rx buf unavailable enable */ +# define INTRP_EN_REG_RIE 0x00000040 /* rx interrupt enable */ +# define INTRP_EN_REG_UNE 0x00000020 /* underflow interrupt enable */ +# define INTRP_EN_REG_OVE 0x00000010 /* overflow interrupt enable */ + +# define INTRP_EN_REG_TJE 0x00000008 /* tx jabber timeout enable */ +# define INTRP_EN_REG_TUE 0x00000004 /* tx buf unavailable enable */ +# define INTRP_EN_REG_TSE 0x00000002 /* tx stopped enable */ +# define INTRP_EN_REG_TIE 0x00000001 /* tx interrupt enable */ + +# define INTRP_EN_REG_All (~(INTRP_EN_REG_RSVD1)) + +# define MIS_FR_REG 0x1020 + +# define MIS_FR_REG_FIFO_OVFL 0x10000000 /* fifo overflow */ +# define MIS_FR_REG_FIFO_CNT 0x0FFE0000 /* fifo cnt */ + +# define MIS_FR_REG_MISS_OVFL 0x00010000 /* missed-frame overflow */ +# define MIS_FR_REG_MISS_CNT 0x0000FFFF /* missed-frame cnt */ + +# define RX_INTRP_WTCHDOG_REG 0x1024 +# define AXI_BUS_MODE_REG 0x1028 + +# define AXI_BUS_MODE_EN_LPI 0x80000000 /* enable low power interface */ +# define AXI_BUS_MODE_UNLK_MGC_PKT 0x40000000 /* unlock-magic-pkt/rem-wk-up */ +# define AXI_BUS_MODE_WR_OSR_LMT 0x00F00000 /* max wr out stndg req limit */ +# define AXI_BUS_MODE_RD_OSR_LMT 0x000F0000 /* max rd out stndg req limit */ +# define AXI_BUS_MODE_AXI_AAL 0x00001000 /* address aligned beats */ +# define AXI_BUS_MODE_BLEN256 0x00000080 /* axi burst length 256 */ +# define AXI_BUS_MODE_BLEN128 0x00000040 /* axi burst length 128 */ +# define AXI_BUS_MODE_BLEN64 0x00000020 /* axi burst length 64 */ +# define AXI_BUS_MODE_BLEN32 0x00000010 /* axi burst length 32 */ +# define AXI_BUS_MODE_BLEN16 0x00000008 /* axi burst length 16 */ +# define AXI_BUS_MODE_BLEN8 0x00000004 /* axi burst length 8 */ +# define AXI_BUS_MODE_BLEN4 0x00000002 /* axi burst length 4 */ +# define AXI_BUS_MODE_UNDEF 0x00000001 /* axi undef burst length */ + +#define AXI_BUS_MODE_DEFAULT (AXI_BUS_MODE_WR_OSR_LMT \ + | AXI_BUS_MODE_RD_OSR_LMT \ + | AXI_BUS_MODE_BLEN16 \ + | AXI_BUS_MODE_BLEN8 \ + | AXI_BUS_MODE_BLEN4) + +# define AXI_STATUS_REG 0x102c + +/* 0x1030-0x1044 reserved */ +# define CUR_HOST_TX_DES_REG 0x1048 +# define CUR_HOST_RX_DES_REG 0x104c +# define CUR_HOST_TX_BU_ADR_REG 0x1050 +# define CUR_HOST_RX_BU_ADR_REG 0x1054 + +# define HW_FEATURE_REG 0x1058 + +# define MAC_CONFIG_REG 0x0000 + +# define MAC_CONFIG_REG_RSVD1 0xf8000000 /* */ + +# define MAC_CONFIG_REG_SFTERR 0x04000000 /* smii force tx error */ +# define MAC_CONFIG_REG_CST 0x02000000 /* crc strip for type frame */ +# define MAC_CONFIG_REG_TC 0x01000000 /* tx cfg in rgmii/sgmii/smii */ + +# define MAC_CONFIG_REG_WD 0x00800000 /* watchdog disable */ +# define MAC_CONFIG_REG_JD 0x00400000 /* jabber disable */ +# define MAC_CONFIG_REG_BE 0x00200000 /* frame burst enable */ +# define MAC_CONFIG_REG_JE 0x00100000 /* jumbo frame enable */ + +# define MAC_CONFIG_REG_IFG 0x000e0000 /* inter frame gap, 96-(8*n) */ +# define MAC_CONFIG_REG_DCRS 0x00010000 /* dis carrier sense during tx */ + +# define MAC_CONFIG_REG_PS 0x00008000 /* port select: 0/1 g/(10/100) */ +# define MAC_CONFIG_REG_FES 0x00004000 /* speed 100 mbps */ +# define MAC_CONFIG_REG_SPD (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES) +# define MAC_CONFIG_REG_SPD_1G (0) +# define MAC_CONFIG_REG_SPD_100 (MAC_CONFIG_REG_PS | MAC_CONFIG_REG_FES) +# define MAC_CONFIG_REG_SPD_10 (MAC_CONFIG_REG_PS) +# define MAC_CONFIG_REG_SPD_SET(x) (MAC_CONFIG_REG_PS_FES & (x << 14)) + +# define MAC_CONFIG_REG_DO 0x00002000 /* disable receive own */ +# define MAC_CONFIG_REG_LM 0x00001000 /* loopback mode */ + +# define MAC_CONFIG_REG_DM 0x00000800 /* (full) duplex mode */ +# define MAC_CONFIG_REG_IPC 0x00000400 /* checksum offload */ +# define MAC_CONFIG_REG_DR 0x00000200 /* disable retry */ +# define MAC_CONFIG_REG_LUD 0x00000100 /* link up/down */ + +# define MAC_CONFIG_REG_ACS 0x00000080 /* auto pad/crc stripping */ +# define MAC_CONFIG_REG_BL 0x00000060 /* back-off limit */ +# define MAC_CONFIG_REG_BL_10 0x00000000 /* 10 */ +# define MAC_CONFIG_REG_BL_8 0x00000020 /* 8 */ +# define MAC_CONFIG_REG_BL_4 0x00000040 /* 4 */ +# define MAC_CONFIG_REG_BL_1 0x00000060 /* 1 */ +# define MAC_CONFIG_REG_DC 0x00000010 /* deferral check */ + +# define MAC_CONFIG_REG_TE 0x00000008 /* transmitter enable */ +# define MAC_CONFIG_REG_RE 0x00000004 /* receiver enable */ +# define MAC_CONFIG_REG_RSVD2 0x00000003 /* */ + +# define MAC_FR_FILTER_REG 0x0004 + +# define MAC_FR_FILTER_RA 0x80000000 /* receive all */ + +# define MAC_FR_FILTER_HPF 0x00000400 /* hash or perfect filter */ +# define MAC_FR_FILTER_SAF 0x00000200 /* source addr filt en */ +# define MAC_FR_FILTER_SAIF 0x00000100 /* SA inverse filter */ +# define MAC_FR_FILTER_PCF_MASK 0x000000c0 /* pass control frames */ +# define MAC_FR_FILTER_PCF_0 0x00000000 /* */ +# define MAC_FR_FILTER_PCF_1 0x00000040 /* */ +# define MAC_FR_FILTER_PCF_2 0x00000080 /* */ +# define MAC_FR_FILTER_PCF_3 0x000000c0 /* */ +# define MAC_FR_FILTER_DBF 0x00000020 /* disable broadcast frames */ +# define MAC_FR_FILTER_PM 0x00000010 /* pass all multicast */ +# define MAC_FR_FILTER_DAIF 0x00000008 /* DA inverse filtering */ +# define MAC_FR_FILTER_HMC 0x00000004 /* hash multicast */ +# define MAC_FR_FILTER_HUC 0x00000002 /* hash unicast */ +# define MAC_FR_FILTER_PR 0x00000001 /* promiscuous mode */ + +# define HASH_TABLE_HIGH_REG 0x0008 +# define HASH_TABLE_LOW_REG 0x000c + +# define GMII_ADR_REG 0x0010 + +# define GMII_ADR_REG_PA 0x0000f800 /* addr bits */ +# define GMII_ADR_REG_GR 0x000007c0 /* addr bits */ +# define GMII_ADR_REG_RSVRD1 0x00000020 /* */ +# define GMII_ADR_REG_CR 0x0000001c /* csr clock range */ +# define GMII_ADR_REG_GW 0x00000002 /* gmii write */ +# define GMII_ADR_REG_GB 0x00000001 /* gmii busy */ + +# define GMII_ADR_REG_ADR_SET(x) (GMII_ADR_REG_PA & (x << 11)) +# define GMII_ADR_REG_ADR_GET(x) ((x & GMII_ADR_REG_PA) >> 11) + +# define GMII_ADR_REG_REG_SET(x) (GMII_ADR_REG_GR & (x << 6)) +# define GMII_ADR_REG_REG_GET(x) (((x & GMII_ADR_REG_GR) >> 6) + +# define GMII_ADR_REG_CSR_SET(x) (GMII_ADR_REG_CR & (x << 2)) +# define GMII_ADR_REG_CSR_GET(x) (((x & GMII_ADR_REG_CR) >> 2) + +# define GMII_DATA_REG 0x0014 + +# define GMII_DATA_REG_DATA 0x0000ffff /* gmii data */ + +# define FLOW_CONTROL_REG 0x0018 + +# define FLOW_CONTROL_PT 0xFFFF0000 /* pause time */ +# define FLOW_CONTROL_DZPQ 0x00000080 /* disable zero-quanta pause */ +# define FLOW_CONTROL_PLT 0x00000030 /* pause level threshold */ + +# define FLOW_CONTROL_UP 0x00000008 /* unicast pause frame detect */ +# define FLOW_CONTROL_RFE 0x00000004 /* receive flow control enable */ +# define FLOW_CONTROL_TFE 0x00000002 /* transmit flow control enable */ +# define FLOW_CONTROL_FCB 0x00000001 /* flow control busy (BPA) */ + +# define VLAN_TAG_REG 0x001c + +# define VERSION_REG 0x0020 + +/* don't define these until HW if finished */ +/* # define VERSION_USER 0x10 */ +/* # define VERSION_QFEC 0x36 */ + +# define VERSION_REG_USER(x) (0xFF & (x >> 8)) +# define VERSION_REG_QFEC(x) (0xFF & x) + +# define DEBUG_REG 0x0024 + +# define DEBUG_REG_RSVD1 0xfc000000 /* */ +# define DEBUG_REG_TX_FIFO_FULL 0x02000000 /* Tx fifo full */ +# define DEBUG_REG_TX_FIFO_NEMP 0x01000000 /* Tx fifo not empty */ + +# define DEBUG_REG_RSVD2 0x00800000 /* */ +# define DEBUG_REG_TX_WR_ACTIVE 0x00400000 /* Tx fifo write ctrl active */ + +# define DEBUG_REG_TX_RD_STATE 0x00300000 /* Tx fifo rd ctrl state */ +# define DEBUG_REG_TX_RD_IDLE 0x00000000 /* idle */ +# define DEBUG_REG_TX_RD_WAIT 0x00100000 /* waiting for status */ +# define DEBUG_REG_TX_RD_PASUE 0x00200000 /* generating pause */ +# define DEBUG_REG_TX_RD_WRTG 0x00300000 /* wr stat flush fifo */ + +# define DEBUG_REG_TX_PAUSE 0x00080000 /* Tx in pause condition */ + +# define DEBUG_REG_TX_CTRL_STATE 0x00060000 /* Tx frame controller state */ +# define DEBUG_REG_TX_CTRL_IDLE 0x00090000 /* idle */ +# define DEBUG_REG_TX_CTRL_WAIT 0x00020000 /* waiting for status*/ +# define DEBUG_REG_TX_CTRL_PAUSE 0x00040000 /* generating pause */ +# define DEBUG_REG_TX_CTRL_XFER 0x00060000 /* transferring input */ + +# define DEBUG_REG_TX_ACTIVE 0x00010000 /* Tx actively transmitting */ +# define DEBUG_REG_RSVD3 0x0000fc00 /* */ + +# define DEBUG_REG_RX_STATE 0x00000300 /* Rx fifo state */ +# define DEBUG_REG_RX_EMPTY 0x00000000 /* empty */ +# define DEBUG_REG_RX_LOW 0x00000100 /* below threshold */ +# define DEBUG_REG_RX_HIGH 0x00000200 /* above threshold */ +# define DEBUG_REG_RX_FULL 0x00000300 /* full */ + +# define DEBUG_REG_RSVD4 0x00000080 /* */ + +# define DEBUG_REG_RX_RD_STATE 0x00000060 /* Rx rd ctrl state */ +# define DEBUG_REG_RX_RD_IDLE 0x00000000 /* idle */ +# define DEBUG_REG_RX_RD_RDG_FR 0x00000020 /* reading frame data */ +# define DEBUG_REG_RX_RD_RDG_STA 0x00000040 /* reading status */ +# define DEBUG_REG_RX_RD_FLUSH 0x00000060 /* flush fr data/stat */ + +# define DEBUG_REG_RX_ACTIVE 0x00000010 /* Rx wr ctlr active */ + +# define DEBUG_REG_RSVD5 0x00000008 /* */ +# define DEBUG_REG_SM_FIFO_RW_STA 0x00000006 /* small fifo rd/wr state */ +# define DEBUG_REG_RX_RECVG 0x00000001 /* Rx actively receiving data */ + +# define REM_WAKEUP_FR_REG 0x0028 +# define PMT_CTRL_STAT_REG 0x002c +/* 0x0030-0x0034 reserved */ + +# define INTRP_STATUS_REG 0x0038 + +# define INTRP_STATUS_REG_RSVD1 0x0000fc00 /* */ +# define INTRP_STATUS_REG_TSI 0x00000200 /* time stamp int stat */ +# define INTRP_STATUS_REG_RSVD2 0x00000100 /* */ + +# define INTRP_STATUS_REG_RCOI 0x00000080 /* rec checksum offload int */ +# define INTRP_STATUS_REG_TI 0x00000040 /* tx int stat */ +# define INTRP_STATUS_REG_RI 0x00000020 /* rx int stat */ +# define INTRP_STATUS_REG_NI 0x00000010 /* normal int summary */ + +# define INTRP_STATUS_REG_PMTI 0x00000008 /* PMT int */ +# define INTRP_STATUS_REG_ANC 0x00000004 /* auto negotiation complete */ +# define INTRP_STATUS_REG_LSC 0x00000002 /* link status change */ +# define INTRP_STATUS_REG_MII 0x00000001 /* rgMii/sgMii int */ + +# define INTRP_MASK_REG 0x003c + +# define INTRP_MASK_REG_RSVD1 0xfc00 /* */ +# define INTRP_MASK_REG_TSIM 0x0200 /* time stamp int mask */ +# define INTRP_MASK_REG_RSVD2 0x01f0 /* */ + +# define INTRP_MASK_REG_PMTIM 0x0000 /* PMT int mask */ +# define INTRP_MASK_REG_ANCM 0x0000 /* auto negotiation compl mask */ +# define INTRP_MASK_REG_LSCM 0x0000 /* link status change mask */ +# define INTRP_MASK_REG_MIIM 0x0000 /* rgMii/sgMii int mask */ + +# define MAC_ADR_0_HIGH_REG 0x0040 +# define MAC_ADR_0_LOW_REG 0x0044 +/* additional pairs of registers for MAC addresses 1-15 */ +# define MAC_ADR_HIGH_REG_N(n) (((n) < 16) ? \ + (MAC_ADR_0_HIGH_REG + (n) * 8) : \ + (MAC_ADR16_HIGH_REG + ((n) - 16) * 8)) +# define MAC_ADR_LOW_REG_N(n) (((n) < 16) ? \ + (MAC_ADR_0_LOW_REG + (n) * 8) : \ + (MAC_ADR16_LOW_REG + ((n) - 16) * 8)) + +# define AN_CONTROL_REG 0x00c0 + +# define AN_CONTROL_REG_RSVRD1 0xfff80000 /* */ +# define AN_CONTROL_REG_SGM_RAL 0x00040000 /* sgmii ral control */ +# define AN_CONTROL_REG_LR 0x00020000 /* lock to reference */ +# define AN_CONTROL_REG_ECD 0x00010000 /* enable comma detect */ + +# define AN_CONTROL_REG_RSVRD2 0x00008000 /* */ +# define AN_CONTROL_REG_ELE 0x00004000 /* external loopback enable */ +# define AN_CONTROL_REG_RSVRD3 0x00002000 /* */ +# define AN_CONTROL_REG_ANE 0x00001000 /* auto negotiation enable */ + +# define AN_CONTROL_REG_RSRVD4 0x00000c00 /* */ +# define AN_CONTROL_REG_RAN 0x00000200 /* restart auto negotiation */ +# define AN_CONTROL_REG_RSVRD5 0x000001ff /* */ + +# define AN_STATUS_REG 0x00c4 + +# define AN_STATUS_REG_RSVRD1 0xfffffe00 /* */ +# define AN_STATUS_REG_ES 0x00000100 /* extended status */ +# define AN_STATUS_REG_RSVRD2 0x000000c0 /* */ +# define AN_STATUS_REG_ANC 0x00000020 /* auto-negotiation complete */ +# define AN_STATUS_REG_RSVRD3 0x00000010 /* */ +# define AN_STATUS_REG_ANA 0x00000008 /* auto-negotiation ability */ +# define AN_STATUS_REG_LS 0x00000004 /* link status */ +# define AN_STATUS_REG_RSVRD4 0x00000003 /* */ + +# define AN_ADVERTISE_REG 0x00c8 +# define AN_LNK_PRTNR_ABIL_REG 0x00cc +# define AN_EXPANDSION_REG 0x00d0 +# define TBI_EXT_STATUS_REG 0x00d4 + +# define SG_RG_SMII_STATUS_REG 0x00d8 + +# define LINK_STATUS_REG 0x00d8 + +# define LINK_STATUS_REG_RSVRD1 0xffffffc0 /* */ +# define LINK_STATUS_REG_FCD 0x00000020 /* false carrier detect */ +# define LINK_STATUS_REG_JT 0x00000010 /* jabber timeout */ +# define LINK_STATUS_REG_UP 0x00000008 /* link status */ + +# define LINK_STATUS_REG_SPD 0x00000006 /* link speed */ +# define LINK_STATUS_REG_SPD_2_5 0x00000000 /* 10M 2.5M * 4 */ +# define LINK_STATUS_REG_SPD_25 0x00000002 /* 100M 25M * 4 */ +# define LINK_STATUS_REG_SPD_125 0x00000004 /* 1G 125M * 8 */ + +# define LINK_STATUS_REG_F_DUPLEX 0x00000001 /* full duplex */ + +/* 0x00dc-0x00fc reserved */ + +/* MMC Register Map is from 0x0100-0x02fc */ +# define MMC_CNTRL_REG 0x0100 +# define MMC_INTR_RX_REG 0x0104 +# define MMC_INTR_TX_REG 0x0108 +# define MMC_INTR_MASK_RX_REG 0x010C +# define MMC_INTR_MASK_TX_REG 0x0110 +# define NUM_MULTCST_FRM_RCVD_G 0x0190 + +/* 0x0300-0x06fc reserved */ + +/* precision time protocol time stamp registers */ + +# define TS_CTL_REG 0x0700 + +# define TS_CTL_ATSFC 0x00080000 +# define TS_CTL_TSENMAC 0x00040000 + +# define TS_CTL_TSCLKTYPE 0x00030000 +# define TS_CTL_TSCLK_ORD 0x00000000 +# define TS_CTL_TSCLK_BND 0x00010000 +# define TS_CTL_TSCLK_ETE 0x00020000 +# define TS_CTL_TSCLK_PTP 0x00030000 + +# define TS_CTL_TSMSTRENA 0x00008000 +# define TS_CTL_TSEVNTENA 0x00004000 +# define TS_CTL_TSIPV4ENA 0x00002000 +# define TS_CTL_TSIPV6ENA 0x00001000 + +# define TS_CTL_TSIPENA 0x00000800 +# define TS_CTL_TSVER2ENA 0x00000400 +# define TS_CTL_TSCTRLSSR 0x00000200 +# define TS_CTL_TSENALL 0x00000100 + +# define TS_CTL_TSADDREG 0x00000020 +# define TS_CTL_TSTRIG 0x00000010 + +# define TS_CTL_TSUPDT 0x00000008 +# define TS_CTL_TSINIT 0x00000004 +# define TS_CTL_TSCFUPDT 0x00000002 +# define TS_CTL_TSENA 0x00000001 + + +# define TS_SUB_SEC_INCR_REG 0x0704 +# define TS_HIGH_REG 0x0708 +# define TS_LOW_REG 0x070c +# define TS_HI_UPDT_REG 0x0710 +# define TS_LO_UPDT_REG 0x0714 +# define TS_APPEND_REG 0x0718 +# define TS_TARG_TIME_HIGH_REG 0x071c +# define TS_TARG_TIME_LOW_REG 0x0720 +# define TS_HIGHER_WD_REG 0x0724 +# define TS_STATUS_REG 0x072c + +/* 0x0730-0x07fc reserved */ + +# define MAC_ADR16_HIGH_REG 0x0800 +# define MAC_ADR16_LOW_REG 0x0804 +/* additional pairs of registers for MAC addresses 17-31 */ + +# define MAC_ADR_MAX 32 + + +# define QFEC_INTRP_SETUP (INTRP_EN_REG_AIE \ + | INTRP_EN_REG_FBE \ + | INTRP_EN_REG_RWE \ + | INTRP_EN_REG_RSE \ + | INTRP_EN_REG_RUE \ + | INTRP_EN_REG_UNE \ + | INTRP_EN_REG_OVE \ + | INTRP_EN_REG_TJE \ + | INTRP_EN_REG_TSE \ + | INTRP_EN_REG_NIE \ + | INTRP_EN_REG_RIE \ + | INTRP_EN_REG_TIE) + +/* + * ASIC Ethernet clock register definitions: + * address offsets and some register definitions + */ + +# define EMAC_CLK_REG_BASE 0x94020000 + +/* + * PHY clock PLL register locations + */ +# define ETH_MD_REG 0x02A4 +# define ETH_NS_REG 0x02A8 + +/* definitions of NS_REG control bits + */ +# define ETH_NS_SRC_SEL 0x0007 + +# define ETH_NS_PRE_DIV_MSK 0x0018 +# define ETH_NS_PRE_DIV(x) (ETH_NS_PRE_DIV_MSK & (x << 3)) + +# define ETH_NS_MCNTR_MODE_MSK 0x0060 +# define ETH_NS_MCNTR_MODE_BYPASS 0x0000 +# define ETH_NS_MCNTR_MODE_SWALLOW 0x0020 +# define ETH_NS_MCNTR_MODE_DUAL 0x0040 +# define ETH_NS_MCNTR_MODE_SINGLE 0x0060 + +# define ETH_NS_MCNTR_RST 0x0080 +# define ETH_NS_MCNTR_EN 0x0100 + +# define EMAC_PTP_NS_CLK_EN 0x0200 +# define EMAC_PTP_NS_CLK_INV 0x0400 +# define EMAC_PTP_NS_ROOT_EN 0x0800 + +/* clock sources + */ +# define CLK_SRC_TCXO 0x0 +# define CLK_SRC_PLL_GLOBAL 0x1 +# define CLK_SRC_PLL_ARM 0x2 +# define CLK_SRC_PLL_QDSP6 0x3 +# define CLK_SRC_PLL_EMAC 0x4 +# define CLK_SRC_EXT_CLK2 0x5 +# define CLK_SRC_EXT_CLK1 0x6 +# define CLK_SRC_CORE_TEST 0x7 + +# define ETH_MD_M(x) (x << 16) +# define ETH_MD_2D_N(x) ((~(x) & 0xffff)) +# define ETH_NS_NM(x) ((~(x) << 16) & 0xffff0000) + +/* + * PHY interface clock divider + */ +# define ETH_X_EN_NS_REG 0x02AC + +# define ETH_RX_CLK_FB_INV 0x80 +# define ETH_RX_CLK_FB_EN 0x40 +# define ETH_TX_CLK_FB_INV 0x20 +# define ETH_TX_CLK_FB_EN 0x10 +# define ETH_RX_CLK_INV 0x08 +# define ETH_RX_CLK_EN 0x04 +# define ETH_TX_CLK_INV 0x02 +# define ETH_TX_CLK_EN 0x01 + +# define ETH_X_EN_NS_DEFAULT \ + (ETH_RX_CLK_FB_EN | ETH_TX_CLK_FB_EN | ETH_RX_CLK_EN | ETH_TX_CLK_EN) + +# define EMAC_PTP_MD_REG 0x02B0 + +/* PTP clock divider + */ +# define EMAC_PTP_NS_REG 0x02B4 + +/* + * clock interface pin controls + */ +# define EMAC_NS_REG 0x02B8 + +# define EMAC_RX_180_CLK_INV 0x2000 +# define EMAC_RX_180_CLK_EN 0x1000 +# define EMAC_RX_180_CLK_EN_INV (EMAC_RX_180_CLK_INV | EMAC_RX_180_CLK_EN) + +# define EMAC_TX_180_CLK_INV 0x0800 +# define EMAC_TX_180_CLK_EN 0x0400 +# define EMAC_TX_180_CLK_EN_INV (EMAC_TX_180_CLK_INV | EMAC_TX_180_CLK_EN) + +# define EMAC_REVMII_RX_CLK_INV 0x0200 +# define EMAC_REVMII_RX_CLK_EN 0x0100 + +# define EMAC_RX_CLK_INV 0x0080 +# define EMAC_RX_CLK_EN 0x0040 + +# define EMAC_REVMII_TX_CLK_INV 0x0020 +# define EMAC_REVMII_TX_CLK_EN 0x0010 + +# define EMAC_TX_CLK_INV 0x0008 +# define EMAC_TX_CLK_EN 0x0004 + +# define EMAC_RX_R_CLK_EN 0x0002 +# define EMAC_TX_R_CLK_EN 0x0001 + +# define EMAC_NS_DEFAULT \ + (EMAC_RX_180_CLK_EN_INV | EMAC_TX_180_CLK_EN_INV \ + | EMAC_REVMII_RX_CLK_EN | EMAC_REVMII_TX_CLK_EN \ + | EMAC_RX_CLK_EN | EMAC_TX_CLK_EN \ + | EMAC_RX_R_CLK_EN | EMAC_TX_R_CLK_EN) + +/* + * + */ +# define EMAC_TX_FS_REG 0x02BC +# define EMAC_RX_FS_REG 0x02C0 + +/* + * Ethernet controller PHY interface select + */ +# define EMAC_PHY_INTF_SEL_REG 0x18030 + +# define EMAC_PHY_INTF_SEL_MII 0x0 +# define EMAC_PHY_INTF_SEL_RGMII 0x1 +# define EMAC_PHY_INTF_SEL_REVMII 0x7 +# define EMAC_PHY_INTF_SEL_MASK 0x7 + +/* + * MDIO addresses + */ +# define EMAC_PHY_ADDR_REG 0x18034 +# define EMAC_REVMII_PHY_ADDR_REG 0x18038 + +/* + * clock routing + */ +# define EMAC_CLKMUX_SEL_REG 0x1803c + +# define EMAC_CLKMUX_SEL_0 0x1 +# define EMAC_CLKMUX_SEL_1 0x2 + + +#endif