patched lwip-eth with https://github.com/mbedmicro/mbed/commit/8222bde1af2e328e4c58d0f438827f3091e5e0eb

Dependents:   EthernetInterface

Fork of lwip-eth by mbed official

Committer:
emilmont
Date:
Mon Jul 23 11:52:41 2012 +0000
Revision:
3:dd8b8f5b449a
Parent:
2:5208926bd863
Child:
4:d827a085afd9
tidyup

Who changed what in which revision?

UserRevisionLine numberNew contents of line
emilmont 3:dd8b8f5b449a 1 /**********************************************************************
emilmont 3:dd8b8f5b449a 2 * $Id$ lpc17_emac.c 2011-11-20
emilmont 3:dd8b8f5b449a 3 *//**
emilmont 3:dd8b8f5b449a 4 * @file lpc17_emac.c
emilmont 3:dd8b8f5b449a 5 * @brief LPC17 ethernet driver for LWIP
emilmont 3:dd8b8f5b449a 6 * @version 1.0
emilmont 3:dd8b8f5b449a 7 * @date 20. Nov. 2011
emilmont 3:dd8b8f5b449a 8 * @author NXP MCU SW Application Team
emilmont 3:dd8b8f5b449a 9 *
emilmont 3:dd8b8f5b449a 10 * Copyright(C) 2011, NXP Semiconductor
emilmont 3:dd8b8f5b449a 11 * All rights reserved.
emilmont 3:dd8b8f5b449a 12 *
emilmont 3:dd8b8f5b449a 13 ***********************************************************************
emilmont 3:dd8b8f5b449a 14 * Software that is described herein is for illustrative purposes only
emilmont 3:dd8b8f5b449a 15 * which provides customers with programming information regarding the
emilmont 3:dd8b8f5b449a 16 * products. This software is supplied "AS IS" without any warranties.
emilmont 3:dd8b8f5b449a 17 * NXP Semiconductors assumes no responsibility or liability for the
emilmont 3:dd8b8f5b449a 18 * use of the software, conveys no license or title under any patent,
emilmont 3:dd8b8f5b449a 19 * copyright, or mask work right to the product. NXP Semiconductors
emilmont 3:dd8b8f5b449a 20 * reserves the right to make changes in the software without
emilmont 3:dd8b8f5b449a 21 * notification. NXP Semiconductors also make no representation or
emilmont 3:dd8b8f5b449a 22 * warranty that such application will be suitable for the specified
emilmont 3:dd8b8f5b449a 23 * use without further testing or modification.
emilmont 3:dd8b8f5b449a 24 **********************************************************************/
emilmont 3:dd8b8f5b449a 25
emilmont 3:dd8b8f5b449a 26 #include "lwip/opt.h"
emilmont 3:dd8b8f5b449a 27 #include "lwip/sys.h"
emilmont 3:dd8b8f5b449a 28 #include "lwip/def.h"
emilmont 3:dd8b8f5b449a 29 #include "lwip/mem.h"
emilmont 3:dd8b8f5b449a 30 #include "lwip/pbuf.h"
emilmont 3:dd8b8f5b449a 31 #include "lwip/stats.h"
emilmont 3:dd8b8f5b449a 32 #include "lwip/snmp.h"
emilmont 3:dd8b8f5b449a 33 #include "netif/etharp.h"
emilmont 3:dd8b8f5b449a 34 #include "netif/ppp_oe.h"
emilmont 3:dd8b8f5b449a 35
emilmont 3:dd8b8f5b449a 36 #include "lpc17xx_emac.h"
emilmont 3:dd8b8f5b449a 37 #include "lpc17_emac.h"
emilmont 3:dd8b8f5b449a 38 #include "lpc_emac_config.h"
emilmont 3:dd8b8f5b449a 39 #include "lpc_phy.h"
emilmont 3:dd8b8f5b449a 40 #include "sys_arch.h"
emilmont 3:dd8b8f5b449a 41
emilmont 3:dd8b8f5b449a 42 #include "mbed_interface.h"
emilmont 3:dd8b8f5b449a 43 #include <string.h>
emilmont 3:dd8b8f5b449a 44
emilmont 3:dd8b8f5b449a 45 #ifndef LPC_EMAC_RMII
emilmont 3:dd8b8f5b449a 46 #error LPC_EMAC_RMII is not defined!
emilmont 3:dd8b8f5b449a 47 #endif
emilmont 3:dd8b8f5b449a 48
emilmont 3:dd8b8f5b449a 49 #if LPC_NUM_BUFF_TXDESCS < 2
emilmont 3:dd8b8f5b449a 50 #error LPC_NUM_BUFF_TXDESCS must be at least 2
emilmont 3:dd8b8f5b449a 51 #endif
emilmont 3:dd8b8f5b449a 52
emilmont 3:dd8b8f5b449a 53 #if LPC_NUM_BUFF_RXDESCS < 3
emilmont 3:dd8b8f5b449a 54 #error LPC_NUM_BUFF_RXDESCS must be at least 3
emilmont 3:dd8b8f5b449a 55 #endif
emilmont 3:dd8b8f5b449a 56
emilmont 3:dd8b8f5b449a 57 /** @defgroup lwip17xx_emac_DRIVER lpc17 EMAC driver for LWIP
emilmont 3:dd8b8f5b449a 58 * @ingroup lwip_emac
emilmont 3:dd8b8f5b449a 59 *
emilmont 3:dd8b8f5b449a 60 * @{
emilmont 3:dd8b8f5b449a 61 */
emilmont 3:dd8b8f5b449a 62
emilmont 3:dd8b8f5b449a 63 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 64 /** \brief Driver transmit and receive thread priorities
emilmont 3:dd8b8f5b449a 65 *
emilmont 3:dd8b8f5b449a 66 * Thread priorities for receive thread and TX cleanup thread. Alter
emilmont 3:dd8b8f5b449a 67 * to prioritize receive or transmit bandwidth. In a heavily loaded
emilmont 3:dd8b8f5b449a 68 * system or with LEIP_DEBUG enabled, the priorities might be better
emilmont 3:dd8b8f5b449a 69 * the same. */
emilmont 3:dd8b8f5b449a 70 #define RX_PRIORITY (osPriorityNormal)
emilmont 3:dd8b8f5b449a 71 #define TX_PRIORITY (osPriorityNormal)
emilmont 3:dd8b8f5b449a 72
emilmont 3:dd8b8f5b449a 73 /** \brief Debug output formatter lock define
emilmont 3:dd8b8f5b449a 74 *
emilmont 3:dd8b8f5b449a 75 * When using FreeRTOS and with LWIP_DEBUG enabled, enabling this
emilmont 3:dd8b8f5b449a 76 * define will allow RX debug messages to not interleave with the
emilmont 3:dd8b8f5b449a 77 * TX messages (so they are actually readable). Not enabling this
emilmont 3:dd8b8f5b449a 78 * define when the system is under load will cause the output to
emilmont 3:dd8b8f5b449a 79 * be unreadable. There is a small tradeoff in performance for this
emilmont 3:dd8b8f5b449a 80 * so use it only for debug. */
emilmont 3:dd8b8f5b449a 81 //#define LOCK_RX_THREAD
emilmont 3:dd8b8f5b449a 82
emilmont 3:dd8b8f5b449a 83 /** \brief Receive group interrupts
emilmont 3:dd8b8f5b449a 84 */
emilmont 3:dd8b8f5b449a 85 #define RXINTGROUP (EMAC_INT_RX_OVERRUN | EMAC_INT_RX_ERR | EMAC_INT_RX_DONE)
emilmont 3:dd8b8f5b449a 86
emilmont 3:dd8b8f5b449a 87 /** \brief Transmit group interrupts
emilmont 3:dd8b8f5b449a 88 */
emilmont 3:dd8b8f5b449a 89 #define TXINTGROUP (EMAC_INT_TX_UNDERRUN | EMAC_INT_TX_ERR | EMAC_INT_TX_DONE)
emilmont 3:dd8b8f5b449a 90
emilmont 3:dd8b8f5b449a 91 #else
emilmont 3:dd8b8f5b449a 92 #define RXINTGROUP 0
emilmont 3:dd8b8f5b449a 93 #define TXINTGROUP 0
emilmont 3:dd8b8f5b449a 94 #endif
emilmont 3:dd8b8f5b449a 95
emilmont 3:dd8b8f5b449a 96 /** \brief Structure of a TX/RX descriptor
emilmont 3:dd8b8f5b449a 97 */
emilmont 3:dd8b8f5b449a 98 typedef struct
emilmont 3:dd8b8f5b449a 99 {
emilmont 3:dd8b8f5b449a 100 volatile u32_t packet; /**< Pointer to buffer */
emilmont 3:dd8b8f5b449a 101 volatile u32_t control; /**< Control word */
emilmont 3:dd8b8f5b449a 102 } LPC_TXRX_DESC_T;
emilmont 3:dd8b8f5b449a 103
emilmont 3:dd8b8f5b449a 104 /** \brief Structure of a RX status entry
emilmont 3:dd8b8f5b449a 105 */
emilmont 3:dd8b8f5b449a 106 typedef struct
emilmont 3:dd8b8f5b449a 107 {
emilmont 3:dd8b8f5b449a 108 volatile u32_t statusinfo; /**< RX status word */
emilmont 3:dd8b8f5b449a 109 volatile u32_t statushashcrc; /**< RX hash CRC */
emilmont 3:dd8b8f5b449a 110 } LPC_TXRX_STATUS_T;
emilmont 3:dd8b8f5b449a 111
emilmont 3:dd8b8f5b449a 112 /* LPC EMAC driver data structure */
emilmont 3:dd8b8f5b449a 113 struct lpc_enetdata {
emilmont 3:dd8b8f5b449a 114 /* prxs must be 8 byte aligned! */
emilmont 3:dd8b8f5b449a 115 LPC_TXRX_STATUS_T prxs[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX statuses */
emilmont 3:dd8b8f5b449a 116 struct netif *netif; /**< Reference back to LWIP parent netif */
emilmont 3:dd8b8f5b449a 117 LPC_TXRX_DESC_T ptxd[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX descriptor list */
emilmont 3:dd8b8f5b449a 118 LPC_TXRX_STATUS_T ptxs[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX statuses */
emilmont 3:dd8b8f5b449a 119 LPC_TXRX_DESC_T prxd[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX descriptor list */
emilmont 3:dd8b8f5b449a 120 struct pbuf *rxb[LPC_NUM_BUFF_RXDESCS]; /**< RX pbuf pointer list, zero-copy mode */
emilmont 3:dd8b8f5b449a 121 u32_t rx_fill_desc_index; /**< RX descriptor next available index */
emilmont 3:dd8b8f5b449a 122 volatile u32_t rx_free_descs; /**< Count of free RX descriptors */
emilmont 3:dd8b8f5b449a 123 struct pbuf *txb[LPC_NUM_BUFF_TXDESCS]; /**< TX pbuf pointer list, zero-copy mode */
emilmont 3:dd8b8f5b449a 124 u32_t lpc_last_tx_idx; /**< TX last descriptor index, zero-copy mode */
emilmont 3:dd8b8f5b449a 125 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 126 sys_sem_t RxSem; /**< RX receive thread wakeup semaphore */
emilmont 3:dd8b8f5b449a 127 sys_sem_t TxCleanSem; /**< TX cleanup thread wakeup semaphore */
emilmont 3:dd8b8f5b449a 128 sys_mutex_t TXLockMutex; /**< TX critical section mutex */
emilmont 3:dd8b8f5b449a 129 sys_sem_t xTXDCountSem; /**< TX free buffer counting semaphore */
emilmont 3:dd8b8f5b449a 130 #endif
emilmont 3:dd8b8f5b449a 131 };
emilmont 3:dd8b8f5b449a 132
emilmont 3:dd8b8f5b449a 133 /** \brief LPC EMAC driver work data
emilmont 3:dd8b8f5b449a 134 */
emilmont 3:dd8b8f5b449a 135 ALIGNED(8) struct lpc_enetdata lpc_enetdata;
emilmont 3:dd8b8f5b449a 136
emilmont 3:dd8b8f5b449a 137 /* Write a value via the MII link (non-blocking) */
emilmont 3:dd8b8f5b449a 138 void lpc_mii_write_noblock(u32_t PhyReg, u32_t Value)
emilmont 3:dd8b8f5b449a 139 {
emilmont 3:dd8b8f5b449a 140 /* Write value at PHY address and register */
emilmont 3:dd8b8f5b449a 141 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 3:dd8b8f5b449a 142 LPC_EMAC->MWTD = Value;
emilmont 3:dd8b8f5b449a 143 }
emilmont 3:dd8b8f5b449a 144
emilmont 3:dd8b8f5b449a 145 /* Write a value via the MII link (blocking) */
emilmont 3:dd8b8f5b449a 146 err_t lpc_mii_write(u32_t PhyReg, u32_t Value)
emilmont 3:dd8b8f5b449a 147 {
emilmont 3:dd8b8f5b449a 148 u32_t mst = 250;
emilmont 3:dd8b8f5b449a 149 err_t sts = ERR_OK;
emilmont 3:dd8b8f5b449a 150
emilmont 3:dd8b8f5b449a 151 /* Write value at PHY address and register */
emilmont 3:dd8b8f5b449a 152 lpc_mii_write_noblock(PhyReg, Value);
emilmont 3:dd8b8f5b449a 153
emilmont 3:dd8b8f5b449a 154 /* Wait for unbusy status */
emilmont 3:dd8b8f5b449a 155 while (mst > 0) {
emilmont 3:dd8b8f5b449a 156 sts = LPC_EMAC->MIND;
emilmont 3:dd8b8f5b449a 157 if ((sts & EMAC_MIND_BUSY) == 0)
emilmont 3:dd8b8f5b449a 158 mst = 0;
emilmont 3:dd8b8f5b449a 159 else {
emilmont 3:dd8b8f5b449a 160 mst--;
emilmont 3:dd8b8f5b449a 161 osDelay(1);
emilmont 3:dd8b8f5b449a 162 }
emilmont 3:dd8b8f5b449a 163 }
emilmont 3:dd8b8f5b449a 164
emilmont 3:dd8b8f5b449a 165 if (sts != 0)
emilmont 3:dd8b8f5b449a 166 sts = ERR_TIMEOUT;
emilmont 3:dd8b8f5b449a 167
emilmont 3:dd8b8f5b449a 168 return sts;
emilmont 3:dd8b8f5b449a 169 }
emilmont 3:dd8b8f5b449a 170
emilmont 3:dd8b8f5b449a 171 /* Reads current MII link busy status */
emilmont 3:dd8b8f5b449a 172 u32_t lpc_mii_is_busy(void)
emilmont 3:dd8b8f5b449a 173 {
emilmont 3:dd8b8f5b449a 174 return (u32_t) (LPC_EMAC->MIND & EMAC_MIND_BUSY);
emilmont 3:dd8b8f5b449a 175 }
emilmont 3:dd8b8f5b449a 176
emilmont 3:dd8b8f5b449a 177 /* Starts a read operation via the MII link (non-blocking) */
emilmont 3:dd8b8f5b449a 178 u32_t lpc_mii_read_data(void)
emilmont 3:dd8b8f5b449a 179 {
emilmont 3:dd8b8f5b449a 180 u32_t data = LPC_EMAC->MRDD;
emilmont 3:dd8b8f5b449a 181 LPC_EMAC->MCMD = 0;
emilmont 3:dd8b8f5b449a 182
emilmont 3:dd8b8f5b449a 183 return data;
emilmont 3:dd8b8f5b449a 184 }
emilmont 3:dd8b8f5b449a 185
emilmont 3:dd8b8f5b449a 186 /* Starts a read operation via the MII link (non-blocking) */
emilmont 3:dd8b8f5b449a 187 void lpc_mii_read_noblock(u32_t PhyReg)
emilmont 3:dd8b8f5b449a 188 {
emilmont 3:dd8b8f5b449a 189 /* Read value at PHY address and register */
emilmont 3:dd8b8f5b449a 190 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 3:dd8b8f5b449a 191 LPC_EMAC->MCMD = EMAC_MCMD_READ;
emilmont 3:dd8b8f5b449a 192 }
emilmont 3:dd8b8f5b449a 193
emilmont 3:dd8b8f5b449a 194 /* Read a value via the MII link (blocking) */
emilmont 3:dd8b8f5b449a 195 err_t lpc_mii_read(u32_t PhyReg, u32_t *data)
emilmont 3:dd8b8f5b449a 196 {
emilmont 3:dd8b8f5b449a 197 u32_t mst = 250;
emilmont 3:dd8b8f5b449a 198 err_t sts = ERR_OK;
emilmont 3:dd8b8f5b449a 199
emilmont 3:dd8b8f5b449a 200 /* Read value at PHY address and register */
emilmont 3:dd8b8f5b449a 201 lpc_mii_read_noblock(PhyReg);
emilmont 3:dd8b8f5b449a 202
emilmont 3:dd8b8f5b449a 203 /* Wait for unbusy status */
emilmont 3:dd8b8f5b449a 204 while (mst > 0) {
emilmont 3:dd8b8f5b449a 205 sts = LPC_EMAC->MIND & ~EMAC_MIND_MII_LINK_FAIL;
emilmont 3:dd8b8f5b449a 206 if ((sts & EMAC_MIND_BUSY) == 0) {
emilmont 3:dd8b8f5b449a 207 mst = 0;
emilmont 3:dd8b8f5b449a 208 *data = LPC_EMAC->MRDD;
emilmont 3:dd8b8f5b449a 209 } else {
emilmont 3:dd8b8f5b449a 210 mst--;
emilmont 3:dd8b8f5b449a 211 osDelay(1);
emilmont 3:dd8b8f5b449a 212 }
emilmont 3:dd8b8f5b449a 213 }
emilmont 3:dd8b8f5b449a 214
emilmont 3:dd8b8f5b449a 215 LPC_EMAC->MCMD = 0;
emilmont 3:dd8b8f5b449a 216
emilmont 3:dd8b8f5b449a 217 if (sts != 0)
emilmont 3:dd8b8f5b449a 218 sts = ERR_TIMEOUT;
emilmont 3:dd8b8f5b449a 219
emilmont 3:dd8b8f5b449a 220 return sts;
emilmont 3:dd8b8f5b449a 221 }
emilmont 3:dd8b8f5b449a 222
emilmont 3:dd8b8f5b449a 223 /** \brief Queues a pbuf into the RX descriptor list
emilmont 3:dd8b8f5b449a 224 *
emilmont 3:dd8b8f5b449a 225 * \param[in] lpc_enetif Pointer to the drvier data structure
emilmont 3:dd8b8f5b449a 226 * \param[in] p Pointer to pbuf to queue
emilmont 3:dd8b8f5b449a 227 */
emilmont 3:dd8b8f5b449a 228 static void lpc_rxqueue_pbuf(struct lpc_enetdata *lpc_enetif, struct pbuf *p)
emilmont 3:dd8b8f5b449a 229 {
emilmont 3:dd8b8f5b449a 230 u32_t idx;
emilmont 3:dd8b8f5b449a 231
emilmont 3:dd8b8f5b449a 232 /* Get next free descriptor index */
emilmont 3:dd8b8f5b449a 233 idx = lpc_enetif->rx_fill_desc_index;
emilmont 3:dd8b8f5b449a 234
emilmont 3:dd8b8f5b449a 235 /* Setup descriptor and clear statuses */
emilmont 3:dd8b8f5b449a 236 lpc_enetif->prxd[idx].control = EMAC_RCTRL_INT | ((u32_t) (p->len - 1));
emilmont 3:dd8b8f5b449a 237 lpc_enetif->prxd[idx].packet = (u32_t) p->payload;
emilmont 3:dd8b8f5b449a 238 lpc_enetif->prxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 3:dd8b8f5b449a 239 lpc_enetif->prxs[idx].statushashcrc = 0xFFFFFFFF;
emilmont 3:dd8b8f5b449a 240
emilmont 3:dd8b8f5b449a 241 /* Save pbuf pointer for push to network layer later */
emilmont 3:dd8b8f5b449a 242 lpc_enetif->rxb[idx] = p;
emilmont 3:dd8b8f5b449a 243
emilmont 3:dd8b8f5b449a 244 /* Wrap at end of descriptor list */
emilmont 3:dd8b8f5b449a 245 idx++;
emilmont 3:dd8b8f5b449a 246 if (idx >= LPC_NUM_BUFF_RXDESCS)
emilmont 3:dd8b8f5b449a 247 idx = 0;
emilmont 3:dd8b8f5b449a 248
emilmont 3:dd8b8f5b449a 249 /* Queue descriptor(s) */
emilmont 3:dd8b8f5b449a 250 lpc_enetif->rx_free_descs -= 1;
emilmont 3:dd8b8f5b449a 251 lpc_enetif->rx_fill_desc_index = idx;
emilmont 3:dd8b8f5b449a 252 LPC_EMAC->RxConsumeIndex = idx;
emilmont 3:dd8b8f5b449a 253
emilmont 3:dd8b8f5b449a 254 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 255 ("lpc_rxqueue_pbuf: pbuf packet queued: %p (free desc=%d)\n", p,
emilmont 3:dd8b8f5b449a 256 lpc_enetif->rx_free_descs));
emilmont 3:dd8b8f5b449a 257 }
emilmont 3:dd8b8f5b449a 258
emilmont 3:dd8b8f5b449a 259 /** \brief Attempt to allocate and requeue a new pbuf for RX
emilmont 3:dd8b8f5b449a 260 *
emilmont 3:dd8b8f5b449a 261 * \param[in] netif Pointer to the netif structure
emilmont 3:dd8b8f5b449a 262 * \returns 1 if a packet was allocated and requeued, otherwise 0
emilmont 3:dd8b8f5b449a 263 */
emilmont 3:dd8b8f5b449a 264 s32_t lpc_rx_queue(struct netif *netif)
emilmont 3:dd8b8f5b449a 265 {
emilmont 3:dd8b8f5b449a 266 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 3:dd8b8f5b449a 267 struct pbuf *p;
emilmont 3:dd8b8f5b449a 268 s32_t queued = 0;
emilmont 3:dd8b8f5b449a 269
emilmont 3:dd8b8f5b449a 270 /* Attempt to requeue as many packets as possible */
emilmont 3:dd8b8f5b449a 271 while (lpc_enetif->rx_free_descs > 0) {
emilmont 3:dd8b8f5b449a 272 /* Allocate a pbuf from the pool. We need to allocate at the
emilmont 3:dd8b8f5b449a 273 maximum size as we don't know the size of the yet to be
emilmont 3:dd8b8f5b449a 274 received packet. */
emilmont 3:dd8b8f5b449a 275 p = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
emilmont 3:dd8b8f5b449a 276 if (p == NULL) {
emilmont 3:dd8b8f5b449a 277 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 278 ("lpc_rx_queue: could not allocate RX pbuf (free desc=%d)\n",
emilmont 3:dd8b8f5b449a 279 lpc_enetif->rx_free_descs));
emilmont 3:dd8b8f5b449a 280 return queued;
emilmont 3:dd8b8f5b449a 281 }
emilmont 3:dd8b8f5b449a 282
emilmont 3:dd8b8f5b449a 283 /* pbufs allocated from the RAM pool should be non-chained. */
emilmont 3:dd8b8f5b449a 284 LWIP_ASSERT("lpc_rx_queue: pbuf is not contiguous (chained)",
emilmont 3:dd8b8f5b449a 285 pbuf_clen(p) <= 1);
emilmont 3:dd8b8f5b449a 286
emilmont 3:dd8b8f5b449a 287 /* Queue packet */
emilmont 3:dd8b8f5b449a 288 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 3:dd8b8f5b449a 289
emilmont 3:dd8b8f5b449a 290 /* Update queued count */
emilmont 3:dd8b8f5b449a 291 queued++;
emilmont 3:dd8b8f5b449a 292 }
emilmont 3:dd8b8f5b449a 293
emilmont 3:dd8b8f5b449a 294 return queued;
emilmont 3:dd8b8f5b449a 295 }
emilmont 3:dd8b8f5b449a 296
emilmont 3:dd8b8f5b449a 297 /** \brief Sets up the RX descriptor ring buffers.
emilmont 3:dd8b8f5b449a 298 *
emilmont 3:dd8b8f5b449a 299 * This function sets up the descriptor list used for receive packets.
emilmont 3:dd8b8f5b449a 300 *
emilmont 3:dd8b8f5b449a 301 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 3:dd8b8f5b449a 302 * \returns Always returns ERR_OK
emilmont 3:dd8b8f5b449a 303 */
emilmont 3:dd8b8f5b449a 304 static err_t lpc_rx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 3:dd8b8f5b449a 305 {
emilmont 3:dd8b8f5b449a 306 /* Setup pointers to RX structures */
emilmont 3:dd8b8f5b449a 307 LPC_EMAC->RxDescriptor = (u32_t) &lpc_enetif->prxd[0];
emilmont 3:dd8b8f5b449a 308 LPC_EMAC->RxStatus = (u32_t) &lpc_enetif->prxs[0];
emilmont 3:dd8b8f5b449a 309 LPC_EMAC->RxDescriptorNumber = LPC_NUM_BUFF_RXDESCS - 1;
emilmont 3:dd8b8f5b449a 310
emilmont 3:dd8b8f5b449a 311 lpc_enetif->rx_free_descs = LPC_NUM_BUFF_RXDESCS;
emilmont 3:dd8b8f5b449a 312 lpc_enetif->rx_fill_desc_index = 0;
emilmont 3:dd8b8f5b449a 313
emilmont 3:dd8b8f5b449a 314 /* Build RX buffer and descriptors */
emilmont 3:dd8b8f5b449a 315 lpc_rx_queue(lpc_enetif->netif);
emilmont 3:dd8b8f5b449a 316
emilmont 3:dd8b8f5b449a 317 return ERR_OK;
emilmont 3:dd8b8f5b449a 318 }
emilmont 3:dd8b8f5b449a 319
emilmont 3:dd8b8f5b449a 320 /** \brief Allocates a pbuf and returns the data from the incoming packet.
emilmont 3:dd8b8f5b449a 321 *
emilmont 3:dd8b8f5b449a 322 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 323 * \return a pbuf filled with the received packet (including MAC header)
emilmont 3:dd8b8f5b449a 324 * NULL on memory error
emilmont 3:dd8b8f5b449a 325 */
emilmont 3:dd8b8f5b449a 326 static struct pbuf *lpc_low_level_input(struct netif *netif)
emilmont 3:dd8b8f5b449a 327 {
emilmont 3:dd8b8f5b449a 328 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 3:dd8b8f5b449a 329 struct pbuf *p = NULL;
emilmont 3:dd8b8f5b449a 330 u32_t idx, length;
emilmont 3:dd8b8f5b449a 331
emilmont 3:dd8b8f5b449a 332 #ifdef LOCK_RX_THREAD
emilmont 3:dd8b8f5b449a 333 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 334 /* Get exclusive access */
emilmont 3:dd8b8f5b449a 335 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 336 #endif
emilmont 3:dd8b8f5b449a 337 #endif
emilmont 3:dd8b8f5b449a 338
emilmont 3:dd8b8f5b449a 339 /* Monitor RX overrun status. This should never happen unless
emilmont 3:dd8b8f5b449a 340 (possibly) the internal bus is behing held up by something.
emilmont 3:dd8b8f5b449a 341 Unless your system is running at a very low clock speed or
emilmont 3:dd8b8f5b449a 342 there are possibilities that the internal buses may be held
emilmont 3:dd8b8f5b449a 343 up for a long time, this can probably safely be removed. */
emilmont 3:dd8b8f5b449a 344 if (LPC_EMAC->IntStatus & EMAC_INT_RX_OVERRUN) {
emilmont 3:dd8b8f5b449a 345 LINK_STATS_INC(link.err);
emilmont 3:dd8b8f5b449a 346 LINK_STATS_INC(link.drop);
emilmont 3:dd8b8f5b449a 347
emilmont 3:dd8b8f5b449a 348 /* Temporarily disable RX */
emilmont 3:dd8b8f5b449a 349 LPC_EMAC->MAC1 &= ~EMAC_MAC1_REC_EN;
emilmont 3:dd8b8f5b449a 350
emilmont 3:dd8b8f5b449a 351 /* Reset the RX side */
emilmont 3:dd8b8f5b449a 352 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_RX;
emilmont 3:dd8b8f5b449a 353 LPC_EMAC->IntClear = EMAC_INT_RX_OVERRUN;
emilmont 3:dd8b8f5b449a 354
emilmont 3:dd8b8f5b449a 355 /* De-allocate all queued RX pbufs */
emilmont 3:dd8b8f5b449a 356 for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
emilmont 3:dd8b8f5b449a 357 if (lpc_enetif->rxb[idx] != NULL) {
emilmont 3:dd8b8f5b449a 358 pbuf_free(lpc_enetif->rxb[idx]);
emilmont 3:dd8b8f5b449a 359 lpc_enetif->rxb[idx] = NULL;
emilmont 3:dd8b8f5b449a 360 }
emilmont 3:dd8b8f5b449a 361 }
emilmont 3:dd8b8f5b449a 362
emilmont 3:dd8b8f5b449a 363 /* Start RX side again */
emilmont 3:dd8b8f5b449a 364 lpc_rx_setup(lpc_enetif);
emilmont 3:dd8b8f5b449a 365
emilmont 3:dd8b8f5b449a 366 /* Re-enable RX */
emilmont 3:dd8b8f5b449a 367 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 3:dd8b8f5b449a 368
emilmont 3:dd8b8f5b449a 369 #ifdef LOCK_RX_THREAD
emilmont 3:dd8b8f5b449a 370 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 371 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 372 #endif
emilmont 3:dd8b8f5b449a 373 #endif
emilmont 3:dd8b8f5b449a 374
emilmont 3:dd8b8f5b449a 375 return NULL;
emilmont 3:dd8b8f5b449a 376 }
emilmont 3:dd8b8f5b449a 377
emilmont 3:dd8b8f5b449a 378 /* Determine if a frame has been received */
emilmont 3:dd8b8f5b449a 379 length = 0;
emilmont 3:dd8b8f5b449a 380 idx = LPC_EMAC->RxConsumeIndex;
emilmont 3:dd8b8f5b449a 381 if (LPC_EMAC->RxProduceIndex != idx) {
emilmont 3:dd8b8f5b449a 382 /* Handle errors */
emilmont 3:dd8b8f5b449a 383 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 3:dd8b8f5b449a 384 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR | EMAC_RINFO_LEN_ERR)) {
emilmont 3:dd8b8f5b449a 385 #if LINK_STATS
emilmont 3:dd8b8f5b449a 386 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 3:dd8b8f5b449a 387 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR))
emilmont 3:dd8b8f5b449a 388 LINK_STATS_INC(link.chkerr);
emilmont 3:dd8b8f5b449a 389 if (lpc_enetif->prxs[idx].statusinfo & EMAC_RINFO_LEN_ERR)
emilmont 3:dd8b8f5b449a 390 LINK_STATS_INC(link.lenerr);
emilmont 3:dd8b8f5b449a 391 #endif
emilmont 3:dd8b8f5b449a 392
emilmont 3:dd8b8f5b449a 393 /* Drop the frame */
emilmont 3:dd8b8f5b449a 394 LINK_STATS_INC(link.drop);
emilmont 3:dd8b8f5b449a 395
emilmont 3:dd8b8f5b449a 396 /* Re-queue the pbuf for receive */
emilmont 3:dd8b8f5b449a 397 lpc_enetif->rx_free_descs++;
emilmont 3:dd8b8f5b449a 398 p = lpc_enetif->rxb[idx];
emilmont 3:dd8b8f5b449a 399 lpc_enetif->rxb[idx] = NULL;
emilmont 3:dd8b8f5b449a 400 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 3:dd8b8f5b449a 401
emilmont 3:dd8b8f5b449a 402 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 403 ("lpc_low_level_input: Packet dropped with errors (0x%x)\n",
emilmont 3:dd8b8f5b449a 404 lpc_enetif->prxs[idx].statusinfo));
emilmont 3:dd8b8f5b449a 405 } else {
emilmont 3:dd8b8f5b449a 406 /* A packet is waiting, get length */
emilmont 3:dd8b8f5b449a 407 length = (lpc_enetif->prxs[idx].statusinfo & 0x7FF) + 1;
emilmont 3:dd8b8f5b449a 408
emilmont 3:dd8b8f5b449a 409 /* Zero-copy */
emilmont 3:dd8b8f5b449a 410 p = lpc_enetif->rxb[idx];
emilmont 3:dd8b8f5b449a 411 p->len = (u16_t) length;
emilmont 3:dd8b8f5b449a 412
emilmont 3:dd8b8f5b449a 413 /* Free pbuf from desriptor */
emilmont 3:dd8b8f5b449a 414 lpc_enetif->rxb[idx] = NULL;
emilmont 3:dd8b8f5b449a 415 lpc_enetif->rx_free_descs++;
emilmont 3:dd8b8f5b449a 416
emilmont 3:dd8b8f5b449a 417 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 418 ("lpc_low_level_input: Packet received: %p, size %d (index=%d)\n",
emilmont 3:dd8b8f5b449a 419 p, length, idx));
emilmont 3:dd8b8f5b449a 420
emilmont 3:dd8b8f5b449a 421 /* Save size */
emilmont 3:dd8b8f5b449a 422 p->tot_len = (u16_t) length;
emilmont 3:dd8b8f5b449a 423 LINK_STATS_INC(link.recv);
emilmont 3:dd8b8f5b449a 424
emilmont 3:dd8b8f5b449a 425 /* Queue new buffer(s) */
emilmont 3:dd8b8f5b449a 426 lpc_rx_queue(lpc_enetif->netif);
emilmont 3:dd8b8f5b449a 427 }
emilmont 3:dd8b8f5b449a 428 }
emilmont 3:dd8b8f5b449a 429
emilmont 3:dd8b8f5b449a 430 #ifdef LOCK_RX_THREAD
emilmont 3:dd8b8f5b449a 431 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 432 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 433 #endif
emilmont 3:dd8b8f5b449a 434 #endif
emilmont 3:dd8b8f5b449a 435
emilmont 3:dd8b8f5b449a 436 return p;
emilmont 3:dd8b8f5b449a 437 }
emilmont 3:dd8b8f5b449a 438
emilmont 3:dd8b8f5b449a 439 /** \brief Attempt to read a packet from the EMAC interface.
emilmont 3:dd8b8f5b449a 440 *
emilmont 3:dd8b8f5b449a 441 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 442 */
emilmont 3:dd8b8f5b449a 443 void lpc_enetif_input(struct netif *netif)
emilmont 3:dd8b8f5b449a 444 {
emilmont 3:dd8b8f5b449a 445 struct eth_hdr *ethhdr;
emilmont 3:dd8b8f5b449a 446 struct pbuf *p;
emilmont 3:dd8b8f5b449a 447
emilmont 3:dd8b8f5b449a 448 /* move received packet into a new pbuf */
emilmont 3:dd8b8f5b449a 449 p = lpc_low_level_input(netif);
emilmont 3:dd8b8f5b449a 450 if (p == NULL)
emilmont 3:dd8b8f5b449a 451 return;
emilmont 3:dd8b8f5b449a 452
emilmont 3:dd8b8f5b449a 453 /* points to packet payload, which starts with an Ethernet header */
emilmont 3:dd8b8f5b449a 454 ethhdr = p->payload;
emilmont 3:dd8b8f5b449a 455
emilmont 3:dd8b8f5b449a 456 switch (htons(ethhdr->type)) {
emilmont 3:dd8b8f5b449a 457 case ETHTYPE_IP:
emilmont 3:dd8b8f5b449a 458 case ETHTYPE_ARP:
emilmont 3:dd8b8f5b449a 459 #if PPPOE_SUPPORT
emilmont 3:dd8b8f5b449a 460 case ETHTYPE_PPPOEDISC:
emilmont 3:dd8b8f5b449a 461 case ETHTYPE_PPPOE:
emilmont 3:dd8b8f5b449a 462 #endif /* PPPOE_SUPPORT */
emilmont 3:dd8b8f5b449a 463 /* full packet send to tcpip_thread to process */
emilmont 3:dd8b8f5b449a 464 if (netif->input(p, netif) != ERR_OK) {
emilmont 3:dd8b8f5b449a 465 LWIP_DEBUGF(NETIF_DEBUG, ("lpc_enetif_input: IP input error\n"));
emilmont 3:dd8b8f5b449a 466 /* Free buffer */
emilmont 3:dd8b8f5b449a 467 pbuf_free(p);
emilmont 3:dd8b8f5b449a 468 }
emilmont 3:dd8b8f5b449a 469 break;
emilmont 3:dd8b8f5b449a 470
emilmont 3:dd8b8f5b449a 471 default:
emilmont 3:dd8b8f5b449a 472 /* Return buffer */
emilmont 3:dd8b8f5b449a 473 pbuf_free(p);
emilmont 3:dd8b8f5b449a 474 break;
emilmont 3:dd8b8f5b449a 475 }
emilmont 3:dd8b8f5b449a 476 }
emilmont 3:dd8b8f5b449a 477
emilmont 3:dd8b8f5b449a 478 /** \brief Determine if the passed address is usable for the ethernet
emilmont 3:dd8b8f5b449a 479 * DMA controller.
emilmont 3:dd8b8f5b449a 480 *
emilmont 3:dd8b8f5b449a 481 * \param[in] addr Address of packet to check for DMA safe operation
emilmont 3:dd8b8f5b449a 482 * \return 1 if the packet address is not safe, otherwise 0
emilmont 3:dd8b8f5b449a 483 */
emilmont 3:dd8b8f5b449a 484 static s32_t lpc_packet_addr_notsafe(void *addr) {
emilmont 3:dd8b8f5b449a 485 /* Check for legal address ranges */
emilmont 3:dd8b8f5b449a 486 if ((((u32_t) addr >= 0x2007C000) && ((u32_t) addr < 0x20083FFF))) {
emilmont 3:dd8b8f5b449a 487 return 0;
emilmont 3:dd8b8f5b449a 488 }
emilmont 3:dd8b8f5b449a 489 return 1;
emilmont 3:dd8b8f5b449a 490 }
emilmont 3:dd8b8f5b449a 491
emilmont 3:dd8b8f5b449a 492 /** \brief Sets up the TX descriptor ring buffers.
emilmont 3:dd8b8f5b449a 493 *
emilmont 3:dd8b8f5b449a 494 * This function sets up the descriptor list used for transmit packets.
emilmont 3:dd8b8f5b449a 495 *
emilmont 3:dd8b8f5b449a 496 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 3:dd8b8f5b449a 497 */
emilmont 3:dd8b8f5b449a 498 static err_t lpc_tx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 3:dd8b8f5b449a 499 {
emilmont 3:dd8b8f5b449a 500 s32_t idx;
emilmont 3:dd8b8f5b449a 501
emilmont 3:dd8b8f5b449a 502 /* Build TX descriptors for local buffers */
emilmont 3:dd8b8f5b449a 503 for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) {
emilmont 3:dd8b8f5b449a 504 lpc_enetif->ptxd[idx].control = 0;
emilmont 3:dd8b8f5b449a 505 lpc_enetif->ptxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 3:dd8b8f5b449a 506 }
emilmont 3:dd8b8f5b449a 507
emilmont 3:dd8b8f5b449a 508 /* Setup pointers to TX structures */
emilmont 3:dd8b8f5b449a 509 LPC_EMAC->TxDescriptor = (u32_t) &lpc_enetif->ptxd[0];
emilmont 3:dd8b8f5b449a 510 LPC_EMAC->TxStatus = (u32_t) &lpc_enetif->ptxs[0];
emilmont 3:dd8b8f5b449a 511 LPC_EMAC->TxDescriptorNumber = LPC_NUM_BUFF_TXDESCS - 1;
emilmont 3:dd8b8f5b449a 512
emilmont 3:dd8b8f5b449a 513 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 3:dd8b8f5b449a 514
emilmont 3:dd8b8f5b449a 515 return ERR_OK;
emilmont 3:dd8b8f5b449a 516 }
emilmont 3:dd8b8f5b449a 517
emilmont 3:dd8b8f5b449a 518 /** \brief Free TX buffers that are complete
emilmont 3:dd8b8f5b449a 519 *
emilmont 3:dd8b8f5b449a 520 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 3:dd8b8f5b449a 521 * \param[in] cidx EMAC current descriptor comsumer index
emilmont 3:dd8b8f5b449a 522 */
emilmont 3:dd8b8f5b449a 523 static void lpc_tx_reclaim_st(struct lpc_enetdata *lpc_enetif, u32_t cidx)
emilmont 3:dd8b8f5b449a 524 {
emilmont 3:dd8b8f5b449a 525 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 526 /* Get exclusive access */
emilmont 3:dd8b8f5b449a 527 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 528 #endif
emilmont 3:dd8b8f5b449a 529
emilmont 3:dd8b8f5b449a 530 while (cidx != lpc_enetif->lpc_last_tx_idx) {
emilmont 3:dd8b8f5b449a 531 if (lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] != NULL) {
emilmont 3:dd8b8f5b449a 532 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 533 ("lpc_tx_reclaim_st: Freeing packet %p (index %d)\n",
emilmont 3:dd8b8f5b449a 534 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx],
emilmont 3:dd8b8f5b449a 535 lpc_enetif->lpc_last_tx_idx));
emilmont 3:dd8b8f5b449a 536 pbuf_free(lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx]);
emilmont 3:dd8b8f5b449a 537 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] = NULL;
emilmont 3:dd8b8f5b449a 538 }
emilmont 3:dd8b8f5b449a 539
emilmont 3:dd8b8f5b449a 540 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 541 osSemaphoreRelease(lpc_enetif->xTXDCountSem.id);
emilmont 3:dd8b8f5b449a 542 #endif
emilmont 3:dd8b8f5b449a 543 lpc_enetif->lpc_last_tx_idx++;
emilmont 3:dd8b8f5b449a 544 if (lpc_enetif->lpc_last_tx_idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 3:dd8b8f5b449a 545 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 3:dd8b8f5b449a 546 }
emilmont 3:dd8b8f5b449a 547
emilmont 3:dd8b8f5b449a 548 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 549 /* Restore access */
emilmont 3:dd8b8f5b449a 550 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 551 #endif
emilmont 3:dd8b8f5b449a 552 }
emilmont 3:dd8b8f5b449a 553
emilmont 3:dd8b8f5b449a 554 /** \brief User call for freeingTX buffers that are complete
emilmont 3:dd8b8f5b449a 555 *
emilmont 3:dd8b8f5b449a 556 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 557 */
emilmont 3:dd8b8f5b449a 558 void lpc_tx_reclaim(struct netif *netif)
emilmont 3:dd8b8f5b449a 559 {
emilmont 3:dd8b8f5b449a 560 lpc_tx_reclaim_st((struct lpc_enetdata *) netif->state,
emilmont 3:dd8b8f5b449a 561 LPC_EMAC->TxConsumeIndex);
emilmont 3:dd8b8f5b449a 562 }
emilmont 3:dd8b8f5b449a 563
emilmont 3:dd8b8f5b449a 564 /** \brief Polls if an available TX descriptor is ready. Can be used to
emilmont 3:dd8b8f5b449a 565 * determine if the low level transmit function will block.
emilmont 3:dd8b8f5b449a 566 *
emilmont 3:dd8b8f5b449a 567 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 568 * \return 0 if no descriptors are read, or >0
emilmont 3:dd8b8f5b449a 569 */
emilmont 3:dd8b8f5b449a 570 s32_t lpc_tx_ready(struct netif *netif)
emilmont 3:dd8b8f5b449a 571 {
emilmont 3:dd8b8f5b449a 572 s32_t fb;
emilmont 3:dd8b8f5b449a 573 u32_t idx, cidx;
emilmont 3:dd8b8f5b449a 574
emilmont 3:dd8b8f5b449a 575 cidx = LPC_EMAC->TxConsumeIndex;
emilmont 3:dd8b8f5b449a 576 idx = LPC_EMAC->TxProduceIndex;
emilmont 3:dd8b8f5b449a 577
emilmont 3:dd8b8f5b449a 578 /* Determine number of free buffers */
emilmont 3:dd8b8f5b449a 579 if (idx == cidx)
emilmont 3:dd8b8f5b449a 580 fb = LPC_NUM_BUFF_TXDESCS;
emilmont 3:dd8b8f5b449a 581 else if (cidx > idx)
emilmont 3:dd8b8f5b449a 582 fb = (LPC_NUM_BUFF_TXDESCS - 1) -
emilmont 3:dd8b8f5b449a 583 ((idx + LPC_NUM_BUFF_TXDESCS) - cidx);
emilmont 3:dd8b8f5b449a 584 else
emilmont 3:dd8b8f5b449a 585 fb = (LPC_NUM_BUFF_TXDESCS - 1) - (cidx - idx);
emilmont 3:dd8b8f5b449a 586
emilmont 3:dd8b8f5b449a 587 return fb;
emilmont 3:dd8b8f5b449a 588 }
emilmont 3:dd8b8f5b449a 589
emilmont 3:dd8b8f5b449a 590 /** \brief Low level output of a packet. Never call this from an
emilmont 3:dd8b8f5b449a 591 * interrupt context, as it may block until TX descriptors
emilmont 3:dd8b8f5b449a 592 * become available.
emilmont 3:dd8b8f5b449a 593 *
emilmont 3:dd8b8f5b449a 594 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 595 * \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
emilmont 3:dd8b8f5b449a 596 * \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
emilmont 3:dd8b8f5b449a 597 */
emilmont 3:dd8b8f5b449a 598 static err_t lpc_low_level_output(struct netif *netif, struct pbuf *p)
emilmont 3:dd8b8f5b449a 599 {
emilmont 3:dd8b8f5b449a 600 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 3:dd8b8f5b449a 601 struct pbuf *q;
emilmont 3:dd8b8f5b449a 602 u8_t *dst;
emilmont 3:dd8b8f5b449a 603 u32_t idx;
emilmont 3:dd8b8f5b449a 604 struct pbuf *np;
emilmont 3:dd8b8f5b449a 605 u32_t dn, notdmasafe = 0;
emilmont 3:dd8b8f5b449a 606
emilmont 3:dd8b8f5b449a 607 /* Zero-copy TX buffers may be fragmented across mutliple payload
emilmont 3:dd8b8f5b449a 608 chains. Determine the number of descriptors needed for the
emilmont 3:dd8b8f5b449a 609 transfer. The pbuf chaining can be a mess! */
emilmont 3:dd8b8f5b449a 610 dn = (u32_t) pbuf_clen(p);
emilmont 3:dd8b8f5b449a 611
emilmont 3:dd8b8f5b449a 612 /* Test to make sure packet addresses are DMA safe. A DMA safe
emilmont 3:dd8b8f5b449a 613 address is once that uses external memory or periphheral RAM.
emilmont 3:dd8b8f5b449a 614 IRAM and FLASH are not safe! */
emilmont 3:dd8b8f5b449a 615 for (q = p; q != NULL; q = q->next)
emilmont 3:dd8b8f5b449a 616 notdmasafe += lpc_packet_addr_notsafe(q->payload);
emilmont 3:dd8b8f5b449a 617
emilmont 3:dd8b8f5b449a 618 #if LPC_TX_PBUF_BOUNCE_EN==1
emilmont 3:dd8b8f5b449a 619 /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be
emilmont 3:dd8b8f5b449a 620 created that will be used instead. This requires an copy from the
emilmont 3:dd8b8f5b449a 621 non-safe DMA region to the new pbuf */
emilmont 3:dd8b8f5b449a 622 if (notdmasafe) {
emilmont 3:dd8b8f5b449a 623 /* Allocate a pbuf in DMA memory */
emilmont 3:dd8b8f5b449a 624 np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
emilmont 3:dd8b8f5b449a 625 if (np == NULL)
emilmont 3:dd8b8f5b449a 626 return ERR_MEM;
emilmont 3:dd8b8f5b449a 627
emilmont 3:dd8b8f5b449a 628 /* This buffer better be contiguous! */
emilmont 3:dd8b8f5b449a 629 LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained",
emilmont 3:dd8b8f5b449a 630 (pbuf_clen(np) == 1));
emilmont 3:dd8b8f5b449a 631
emilmont 3:dd8b8f5b449a 632 /* Copy to DMA safe pbuf */
emilmont 3:dd8b8f5b449a 633 dst = (u8_t *) np->payload;
emilmont 3:dd8b8f5b449a 634 for(q = p; q != NULL; q = q->next) {
emilmont 3:dd8b8f5b449a 635 /* Copy the buffer to the descriptor's buffer */
emilmont 3:dd8b8f5b449a 636 MEMCPY(dst, (u8_t *) q->payload, q->len);
emilmont 3:dd8b8f5b449a 637 dst += q->len;
emilmont 3:dd8b8f5b449a 638 }
emilmont 3:dd8b8f5b449a 639 np->len = p->tot_len;
emilmont 3:dd8b8f5b449a 640
emilmont 3:dd8b8f5b449a 641 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 642 ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\n",
emilmont 3:dd8b8f5b449a 643 q, np));
emilmont 3:dd8b8f5b449a 644
emilmont 3:dd8b8f5b449a 645 /* use the new buffer for descrptor queueing. The original pbuf will
emilmont 3:dd8b8f5b449a 646 be de-allocated outsuide this driver. */
emilmont 3:dd8b8f5b449a 647 p = np;
emilmont 3:dd8b8f5b449a 648 dn = 1;
emilmont 3:dd8b8f5b449a 649 }
emilmont 3:dd8b8f5b449a 650 #else
emilmont 3:dd8b8f5b449a 651 if (notdmasafe)
emilmont 3:dd8b8f5b449a 652 LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf",
emilmont 3:dd8b8f5b449a 653 (notdmasafe == 0));
emilmont 3:dd8b8f5b449a 654 #endif
emilmont 3:dd8b8f5b449a 655
emilmont 3:dd8b8f5b449a 656 /* Wait until enough descriptors are available for the transfer. */
emilmont 3:dd8b8f5b449a 657 /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
emilmont 3:dd8b8f5b449a 658 while (dn > lpc_tx_ready(netif))
emilmont 3:dd8b8f5b449a 659 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 660 osSemaphoreWait(lpc_enetif->xTXDCountSem.id, osWaitForever);
emilmont 3:dd8b8f5b449a 661 #else
emilmont 3:dd8b8f5b449a 662 osDelay(1);
emilmont 3:dd8b8f5b449a 663 #endif
emilmont 3:dd8b8f5b449a 664
emilmont 3:dd8b8f5b449a 665 /* Get free TX buffer index */
emilmont 3:dd8b8f5b449a 666 idx = LPC_EMAC->TxProduceIndex;
emilmont 3:dd8b8f5b449a 667
emilmont 3:dd8b8f5b449a 668 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 669 /* Get exclusive access */
emilmont 3:dd8b8f5b449a 670 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 671 #endif
emilmont 3:dd8b8f5b449a 672
emilmont 3:dd8b8f5b449a 673 /* Prevent LWIP from de-allocating this pbuf. The driver will
emilmont 3:dd8b8f5b449a 674 free it once it's been transmitted. */
emilmont 3:dd8b8f5b449a 675 if (!notdmasafe)
emilmont 3:dd8b8f5b449a 676 pbuf_ref(p);
emilmont 3:dd8b8f5b449a 677
emilmont 3:dd8b8f5b449a 678 /* Setup transfers */
emilmont 3:dd8b8f5b449a 679 q = p;
emilmont 3:dd8b8f5b449a 680 while (dn > 0) {
emilmont 3:dd8b8f5b449a 681 dn--;
emilmont 3:dd8b8f5b449a 682
emilmont 3:dd8b8f5b449a 683 /* Only save pointer to free on last descriptor */
emilmont 3:dd8b8f5b449a 684 if (dn == 0) {
emilmont 3:dd8b8f5b449a 685 /* Save size of packet and signal it's ready */
emilmont 3:dd8b8f5b449a 686 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT |
emilmont 3:dd8b8f5b449a 687 EMAC_TCTRL_LAST;
emilmont 3:dd8b8f5b449a 688 lpc_enetif->txb[idx] = p;
emilmont 3:dd8b8f5b449a 689 }
emilmont 3:dd8b8f5b449a 690 else {
emilmont 3:dd8b8f5b449a 691 /* Save size of packet, descriptor is not last */
emilmont 3:dd8b8f5b449a 692 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT;
emilmont 3:dd8b8f5b449a 693 lpc_enetif->txb[idx] = NULL;
emilmont 3:dd8b8f5b449a 694 }
emilmont 3:dd8b8f5b449a 695
emilmont 3:dd8b8f5b449a 696 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 3:dd8b8f5b449a 697 ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d,"
emilmont 3:dd8b8f5b449a 698 " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
emilmont 3:dd8b8f5b449a 699
emilmont 3:dd8b8f5b449a 700 lpc_enetif->ptxd[idx].packet = (u32_t) q->payload;
emilmont 3:dd8b8f5b449a 701
emilmont 3:dd8b8f5b449a 702 q = q->next;
emilmont 3:dd8b8f5b449a 703
emilmont 3:dd8b8f5b449a 704 idx++;
emilmont 3:dd8b8f5b449a 705 if (idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 3:dd8b8f5b449a 706 idx = 0;
emilmont 3:dd8b8f5b449a 707 }
emilmont 3:dd8b8f5b449a 708
emilmont 3:dd8b8f5b449a 709 LPC_EMAC->TxProduceIndex = idx;
emilmont 3:dd8b8f5b449a 710
emilmont 3:dd8b8f5b449a 711 LINK_STATS_INC(link.xmit);
emilmont 3:dd8b8f5b449a 712
emilmont 3:dd8b8f5b449a 713 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 714 /* Restore access */
emilmont 3:dd8b8f5b449a 715 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 716 #endif
emilmont 3:dd8b8f5b449a 717
emilmont 3:dd8b8f5b449a 718 return ERR_OK;
emilmont 3:dd8b8f5b449a 719 }
emilmont 3:dd8b8f5b449a 720
emilmont 3:dd8b8f5b449a 721 /** \brief LPC EMAC interrupt handler.
emilmont 3:dd8b8f5b449a 722 *
emilmont 3:dd8b8f5b449a 723 * This function handles the transmit, receive, and error interrupt of
emilmont 3:dd8b8f5b449a 724 * the LPC177x_8x. This is meant to be used when NO_SYS=0.
emilmont 3:dd8b8f5b449a 725 */
emilmont 3:dd8b8f5b449a 726 void ENET_IRQHandler(void)
emilmont 3:dd8b8f5b449a 727 {
emilmont 3:dd8b8f5b449a 728 #if NO_SYS == 1
emilmont 3:dd8b8f5b449a 729 /* Interrupts are not used without an RTOS */
emilmont 3:dd8b8f5b449a 730 NVIC_DisableIRQ(ENET_IRQn);
emilmont 3:dd8b8f5b449a 731 #else
emilmont 3:dd8b8f5b449a 732 uint32_t ints;
emilmont 3:dd8b8f5b449a 733
emilmont 3:dd8b8f5b449a 734 /* Interrupts are of 2 groups - transmit or receive. Based on the
emilmont 3:dd8b8f5b449a 735 interrupt, kick off the receive or transmit (cleanup) task */
emilmont 3:dd8b8f5b449a 736
emilmont 3:dd8b8f5b449a 737 /* Get pending interrupts */
emilmont 3:dd8b8f5b449a 738 ints = LPC_EMAC->IntStatus;
emilmont 3:dd8b8f5b449a 739
emilmont 3:dd8b8f5b449a 740 if (ints & RXINTGROUP) {
emilmont 3:dd8b8f5b449a 741 /* RX group interrupt(s): Give semaphore to wakeup RX receive task.*/
emilmont 3:dd8b8f5b449a 742 sys_sem_signal(&lpc_enetdata.RxSem);
emilmont 3:dd8b8f5b449a 743 }
emilmont 3:dd8b8f5b449a 744
emilmont 3:dd8b8f5b449a 745 if (ints & TXINTGROUP) {
emilmont 3:dd8b8f5b449a 746 /* TX group interrupt(s): Give semaphore to wakeup TX cleanup task. */
emilmont 3:dd8b8f5b449a 747 sys_sem_signal(&lpc_enetdata.TxCleanSem);
emilmont 3:dd8b8f5b449a 748 }
emilmont 3:dd8b8f5b449a 749
emilmont 3:dd8b8f5b449a 750 /* Clear pending interrupts */
emilmont 3:dd8b8f5b449a 751 LPC_EMAC->IntClear = ints;
emilmont 3:dd8b8f5b449a 752 #endif
emilmont 3:dd8b8f5b449a 753 }
emilmont 3:dd8b8f5b449a 754
emilmont 3:dd8b8f5b449a 755 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 756 /** \brief Packet reception task
emilmont 3:dd8b8f5b449a 757 *
emilmont 3:dd8b8f5b449a 758 * This task is called when a packet is received. It will
emilmont 3:dd8b8f5b449a 759 * pass the packet to the LWIP core.
emilmont 3:dd8b8f5b449a 760 *
emilmont 3:dd8b8f5b449a 761 * \param[in] pvParameters Not used yet
emilmont 3:dd8b8f5b449a 762 */
emilmont 3:dd8b8f5b449a 763 static void packet_rx(void* pvParameters) {
emilmont 3:dd8b8f5b449a 764 struct lpc_enetdata *lpc_enetif = pvParameters;
emilmont 3:dd8b8f5b449a 765
emilmont 3:dd8b8f5b449a 766 while (1) {
emilmont 3:dd8b8f5b449a 767 /* Wait for receive task to wakeup */
emilmont 3:dd8b8f5b449a 768 sys_arch_sem_wait(&lpc_enetif->RxSem, 0);
emilmont 3:dd8b8f5b449a 769
emilmont 3:dd8b8f5b449a 770 /* Process packets until all empty */
emilmont 3:dd8b8f5b449a 771 while (LPC_EMAC->RxConsumeIndex != LPC_EMAC->RxProduceIndex)
emilmont 3:dd8b8f5b449a 772 lpc_enetif_input(lpc_enetif->netif);
emilmont 3:dd8b8f5b449a 773 }
emilmont 3:dd8b8f5b449a 774 }
emilmont 3:dd8b8f5b449a 775
emilmont 3:dd8b8f5b449a 776 /** \brief Transmit cleanup task
emilmont 3:dd8b8f5b449a 777 *
emilmont 3:dd8b8f5b449a 778 * This task is called when a transmit interrupt occurs and
emilmont 3:dd8b8f5b449a 779 * reclaims the pbuf and descriptor used for the packet once
emilmont 3:dd8b8f5b449a 780 * the packet has been transferred.
emilmont 3:dd8b8f5b449a 781 *
emilmont 3:dd8b8f5b449a 782 * \param[in] pvParameters Not used yet
emilmont 3:dd8b8f5b449a 783 */
emilmont 3:dd8b8f5b449a 784 static void packet_tx(void* pvParameters) {
emilmont 3:dd8b8f5b449a 785 struct lpc_enetdata *lpc_enetif = pvParameters;
emilmont 3:dd8b8f5b449a 786 s32_t idx;
emilmont 3:dd8b8f5b449a 787
emilmont 3:dd8b8f5b449a 788 while (1) {
emilmont 3:dd8b8f5b449a 789 /* Wait for transmit cleanup task to wakeup */
emilmont 3:dd8b8f5b449a 790 sys_arch_sem_wait(&lpc_enetif->TxCleanSem, 0);
emilmont 3:dd8b8f5b449a 791
emilmont 3:dd8b8f5b449a 792 /* Error handling for TX underruns. This should never happen unless
emilmont 3:dd8b8f5b449a 793 something is holding the bus or the clocks are going too slow. It
emilmont 3:dd8b8f5b449a 794 can probably be safely removed. */
emilmont 3:dd8b8f5b449a 795 if (LPC_EMAC->IntStatus & EMAC_INT_TX_UNDERRUN) {
emilmont 3:dd8b8f5b449a 796 LINK_STATS_INC(link.err);
emilmont 3:dd8b8f5b449a 797 LINK_STATS_INC(link.drop);
emilmont 3:dd8b8f5b449a 798
emilmont 3:dd8b8f5b449a 799 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 800 /* Get exclusive access */
emilmont 3:dd8b8f5b449a 801 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 802 #endif
emilmont 3:dd8b8f5b449a 803 /* Reset the TX side */
emilmont 3:dd8b8f5b449a 804 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_TX;
emilmont 3:dd8b8f5b449a 805 LPC_EMAC->IntClear = EMAC_INT_TX_UNDERRUN;
emilmont 3:dd8b8f5b449a 806
emilmont 3:dd8b8f5b449a 807 /* De-allocate all queued TX pbufs */
emilmont 3:dd8b8f5b449a 808 for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
emilmont 3:dd8b8f5b449a 809 if (lpc_enetif->txb[idx] != NULL) {
emilmont 3:dd8b8f5b449a 810 pbuf_free(lpc_enetif->txb[idx]);
emilmont 3:dd8b8f5b449a 811 lpc_enetif->txb[idx] = NULL;
emilmont 3:dd8b8f5b449a 812 }
emilmont 3:dd8b8f5b449a 813 }
emilmont 3:dd8b8f5b449a 814
emilmont 3:dd8b8f5b449a 815 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 816 /* Restore access */
emilmont 3:dd8b8f5b449a 817 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 3:dd8b8f5b449a 818 #endif
emilmont 3:dd8b8f5b449a 819 /* Start TX side again */
emilmont 3:dd8b8f5b449a 820 lpc_tx_setup(lpc_enetif);
emilmont 3:dd8b8f5b449a 821 } else {
emilmont 3:dd8b8f5b449a 822 /* Free TX buffers that are done sending */
emilmont 3:dd8b8f5b449a 823 lpc_tx_reclaim(lpc_enetdata.netif);
emilmont 3:dd8b8f5b449a 824 }
emilmont 3:dd8b8f5b449a 825 }
emilmont 3:dd8b8f5b449a 826 }
emilmont 3:dd8b8f5b449a 827 #endif
emilmont 3:dd8b8f5b449a 828
emilmont 3:dd8b8f5b449a 829 /** \brief Low level init of the MAC and PHY.
emilmont 3:dd8b8f5b449a 830 *
emilmont 3:dd8b8f5b449a 831 * \param[in] netif Pointer to LWIP netif structure
emilmont 3:dd8b8f5b449a 832 */
emilmont 3:dd8b8f5b449a 833 static err_t low_level_init(struct netif *netif)
emilmont 3:dd8b8f5b449a 834 {
emilmont 3:dd8b8f5b449a 835 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 3:dd8b8f5b449a 836 err_t err = ERR_OK;
emilmont 3:dd8b8f5b449a 837
emilmont 3:dd8b8f5b449a 838 /* Enable MII clocking */
emilmont 3:dd8b8f5b449a 839 LPC_SC->PCONP |= CLKPWR_PCONP_PCENET;
emilmont 3:dd8b8f5b449a 840
emilmont 3:dd8b8f5b449a 841 LPC_PINCON->PINSEL2 = 0x50150105; /* Enable P1 Ethernet Pins. */
emilmont 3:dd8b8f5b449a 842 LPC_PINCON->PINSEL3 = (LPC_PINCON->PINSEL3 & ~0x0000000F) | 0x00000005;
emilmont 3:dd8b8f5b449a 843
emilmont 3:dd8b8f5b449a 844 /* Reset all MAC logic */
emilmont 3:dd8b8f5b449a 845 LPC_EMAC->MAC1 = EMAC_MAC1_RES_TX | EMAC_MAC1_RES_MCS_TX |
emilmont 3:dd8b8f5b449a 846 EMAC_MAC1_RES_RX | EMAC_MAC1_RES_MCS_RX | EMAC_MAC1_SIM_RES |
emilmont 3:dd8b8f5b449a 847 EMAC_MAC1_SOFT_RES;
emilmont 3:dd8b8f5b449a 848 LPC_EMAC->Command = EMAC_CR_REG_RES | EMAC_CR_TX_RES | EMAC_CR_RX_RES |
emilmont 3:dd8b8f5b449a 849 EMAC_CR_PASS_RUNT_FRM;
emilmont 3:dd8b8f5b449a 850 osDelay(10);
emilmont 3:dd8b8f5b449a 851
emilmont 3:dd8b8f5b449a 852 /* Initial MAC initialization */
emilmont 3:dd8b8f5b449a 853 LPC_EMAC->MAC1 = EMAC_MAC1_PASS_ALL;
emilmont 3:dd8b8f5b449a 854 LPC_EMAC->MAC2 = EMAC_MAC2_CRC_EN | EMAC_MAC2_PAD_EN |
emilmont 3:dd8b8f5b449a 855 EMAC_MAC2_VLAN_PAD_EN;
emilmont 3:dd8b8f5b449a 856 LPC_EMAC->MAXF = EMAC_ETH_MAX_FLEN;
emilmont 3:dd8b8f5b449a 857
emilmont 3:dd8b8f5b449a 858 /* Set RMII management clock rate to lowest speed */
emilmont 3:dd8b8f5b449a 859 LPC_EMAC->MCFG = EMAC_MCFG_CLK_SEL(11) | EMAC_MCFG_RES_MII;
emilmont 3:dd8b8f5b449a 860 LPC_EMAC->MCFG &= ~EMAC_MCFG_RES_MII;
emilmont 3:dd8b8f5b449a 861
emilmont 3:dd8b8f5b449a 862 /* Maximum number of retries, 0x37 collision window, gap */
emilmont 3:dd8b8f5b449a 863 LPC_EMAC->CLRT = EMAC_CLRT_DEF;
emilmont 3:dd8b8f5b449a 864 LPC_EMAC->IPGR = EMAC_IPGR_P1_DEF | EMAC_IPGR_P2_DEF;
emilmont 3:dd8b8f5b449a 865
emilmont 3:dd8b8f5b449a 866 #if LPC_EMAC_RMII
emilmont 3:dd8b8f5b449a 867 /* RMII setup */
emilmont 3:dd8b8f5b449a 868 LPC_EMAC->Command = EMAC_CR_PASS_RUNT_FRM | EMAC_CR_RMII;
emilmont 3:dd8b8f5b449a 869 #else
emilmont 3:dd8b8f5b449a 870 /* MII setup */
emilmont 3:dd8b8f5b449a 871 LPC_EMAC->CR = EMAC_CR_PASS_RUNT_FRM;
emilmont 3:dd8b8f5b449a 872 #endif
emilmont 3:dd8b8f5b449a 873
emilmont 3:dd8b8f5b449a 874 /* Initialize the PHY and reset */
emilmont 3:dd8b8f5b449a 875 err = lpc_phy_init(netif, LPC_EMAC_RMII);
emilmont 3:dd8b8f5b449a 876 if (err != ERR_OK)
emilmont 3:dd8b8f5b449a 877 return err;
emilmont 3:dd8b8f5b449a 878
emilmont 3:dd8b8f5b449a 879 /* Save station address */
emilmont 3:dd8b8f5b449a 880 LPC_EMAC->SA2 = (u32_t) netif->hwaddr[0] |
emilmont 3:dd8b8f5b449a 881 (((u32_t) netif->hwaddr[1]) << 8);
emilmont 3:dd8b8f5b449a 882 LPC_EMAC->SA1 = (u32_t) netif->hwaddr[2] |
emilmont 3:dd8b8f5b449a 883 (((u32_t) netif->hwaddr[3]) << 8);
emilmont 3:dd8b8f5b449a 884 LPC_EMAC->SA0 = (u32_t) netif->hwaddr[4] |
emilmont 3:dd8b8f5b449a 885 (((u32_t) netif->hwaddr[5]) << 8);
emilmont 3:dd8b8f5b449a 886
emilmont 3:dd8b8f5b449a 887 /* Setup transmit and receive descriptors */
emilmont 3:dd8b8f5b449a 888 if (lpc_tx_setup(lpc_enetif) != ERR_OK)
emilmont 3:dd8b8f5b449a 889 return ERR_BUF;
emilmont 3:dd8b8f5b449a 890 if (lpc_rx_setup(lpc_enetif) != ERR_OK)
emilmont 3:dd8b8f5b449a 891 return ERR_BUF;
emilmont 3:dd8b8f5b449a 892
emilmont 3:dd8b8f5b449a 893 /* Enable packet reception */
emilmont 3:dd8b8f5b449a 894 #if IP_SOF_BROADCAST_RECV
emilmont 3:dd8b8f5b449a 895 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN | EMAC_RFC_BCAST_EN;
emilmont 3:dd8b8f5b449a 896 #else
emilmont 3:dd8b8f5b449a 897 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN;
emilmont 3:dd8b8f5b449a 898 #endif
emilmont 3:dd8b8f5b449a 899
emilmont 3:dd8b8f5b449a 900 /* Clear and enable rx/tx interrupts */
emilmont 3:dd8b8f5b449a 901 LPC_EMAC->IntClear = 0xFFFF;
emilmont 3:dd8b8f5b449a 902 LPC_EMAC->IntEnable = RXINTGROUP | TXINTGROUP;
emilmont 3:dd8b8f5b449a 903
emilmont 3:dd8b8f5b449a 904 /* Enable RX and TX */
emilmont 3:dd8b8f5b449a 905 LPC_EMAC->Command |= EMAC_CR_RX_EN | EMAC_CR_TX_EN;
emilmont 3:dd8b8f5b449a 906 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 3:dd8b8f5b449a 907
emilmont 3:dd8b8f5b449a 908 return err;
emilmont 3:dd8b8f5b449a 909 }
emilmont 3:dd8b8f5b449a 910
emilmont 3:dd8b8f5b449a 911 /* This function provides a method for the PHY to setup the EMAC
emilmont 3:dd8b8f5b449a 912 for the PHY negotiated duplex mode */
emilmont 3:dd8b8f5b449a 913 void lpc_emac_set_duplex(int full_duplex)
emilmont 3:dd8b8f5b449a 914 {
emilmont 3:dd8b8f5b449a 915 if (full_duplex) {
emilmont 3:dd8b8f5b449a 916 LPC_EMAC->MAC2 |= EMAC_MAC2_FULL_DUP;
emilmont 3:dd8b8f5b449a 917 LPC_EMAC->Command |= EMAC_CR_FULL_DUP;
emilmont 3:dd8b8f5b449a 918 LPC_EMAC->IPGT = EMAC_IPGT_FULL_DUP;
emilmont 3:dd8b8f5b449a 919 } else {
emilmont 3:dd8b8f5b449a 920 LPC_EMAC->MAC2 &= ~EMAC_MAC2_FULL_DUP;
emilmont 3:dd8b8f5b449a 921 LPC_EMAC->Command &= ~EMAC_CR_FULL_DUP;
emilmont 3:dd8b8f5b449a 922 LPC_EMAC->IPGT = EMAC_IPGT_HALF_DUP;
emilmont 3:dd8b8f5b449a 923 }
emilmont 3:dd8b8f5b449a 924 }
emilmont 3:dd8b8f5b449a 925
emilmont 3:dd8b8f5b449a 926 /* This function provides a method for the PHY to setup the EMAC
emilmont 3:dd8b8f5b449a 927 for the PHY negotiated bit rate */
emilmont 3:dd8b8f5b449a 928 void lpc_emac_set_speed(int mbs_100)
emilmont 3:dd8b8f5b449a 929 {
emilmont 3:dd8b8f5b449a 930 if (mbs_100)
emilmont 3:dd8b8f5b449a 931 LPC_EMAC->SUPP = EMAC_SUPP_SPEED;
emilmont 3:dd8b8f5b449a 932 else
emilmont 3:dd8b8f5b449a 933 LPC_EMAC->SUPP = 0;
emilmont 3:dd8b8f5b449a 934 }
emilmont 3:dd8b8f5b449a 935
emilmont 3:dd8b8f5b449a 936 /**
emilmont 3:dd8b8f5b449a 937 * This function is the ethernet packet send function. It calls
emilmont 3:dd8b8f5b449a 938 * etharp_output after checking link status.
emilmont 3:dd8b8f5b449a 939 *
emilmont 3:dd8b8f5b449a 940 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 941 * \param[in] q Pointer to pbug to send
emilmont 3:dd8b8f5b449a 942 * \param[in] ipaddr IP address
emilmont 3:dd8b8f5b449a 943 * \return ERR_OK or error code
emilmont 3:dd8b8f5b449a 944 */
emilmont 3:dd8b8f5b449a 945 err_t lpc_etharp_output(struct netif *netif, struct pbuf *q,
emilmont 3:dd8b8f5b449a 946 ip_addr_t *ipaddr)
emilmont 3:dd8b8f5b449a 947 {
emilmont 3:dd8b8f5b449a 948 /* Only send packet is link is up */
emilmont 3:dd8b8f5b449a 949 if (netif->flags & NETIF_FLAG_LINK_UP)
emilmont 3:dd8b8f5b449a 950 return etharp_output(netif, q, ipaddr);
emilmont 3:dd8b8f5b449a 951
emilmont 3:dd8b8f5b449a 952 return ERR_CONN;
emilmont 3:dd8b8f5b449a 953 }
emilmont 3:dd8b8f5b449a 954
emilmont 3:dd8b8f5b449a 955 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 956 /* periodic PHY status update */
emilmont 3:dd8b8f5b449a 957 void phy_update(void const *nif) {
emilmont 3:dd8b8f5b449a 958 lpc_phy_sts_sm((struct netif*)nif);
emilmont 3:dd8b8f5b449a 959 }
emilmont 3:dd8b8f5b449a 960 osTimerDef(phy_update, phy_update);
emilmont 3:dd8b8f5b449a 961 #endif
emilmont 3:dd8b8f5b449a 962
emilmont 3:dd8b8f5b449a 963 /**
emilmont 3:dd8b8f5b449a 964 * Should be called at the beginning of the program to set up the
emilmont 3:dd8b8f5b449a 965 * network interface.
emilmont 3:dd8b8f5b449a 966 *
emilmont 3:dd8b8f5b449a 967 * This function should be passed as a parameter to netif_add().
emilmont 3:dd8b8f5b449a 968 *
emilmont 3:dd8b8f5b449a 969 * @param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 3:dd8b8f5b449a 970 * @return ERR_OK if the loopif is initialized
emilmont 3:dd8b8f5b449a 971 * ERR_MEM if private data couldn't be allocated
emilmont 3:dd8b8f5b449a 972 * any other err_t on error
emilmont 3:dd8b8f5b449a 973 */
emilmont 3:dd8b8f5b449a 974 err_t lpc_enetif_init(struct netif *netif)
emilmont 3:dd8b8f5b449a 975 {
emilmont 3:dd8b8f5b449a 976 err_t err;
emilmont 3:dd8b8f5b449a 977
emilmont 3:dd8b8f5b449a 978 LWIP_ASSERT("netif != NULL", (netif != NULL));
emilmont 3:dd8b8f5b449a 979
emilmont 3:dd8b8f5b449a 980 lpc_enetdata.netif = netif;
emilmont 3:dd8b8f5b449a 981
emilmont 3:dd8b8f5b449a 982 /* set MAC hardware address */
emilmont 3:dd8b8f5b449a 983 mbed_mac_address((char *)netif->hwaddr);
emilmont 3:dd8b8f5b449a 984 netif->hwaddr_len = ETHARP_HWADDR_LEN;
emilmont 3:dd8b8f5b449a 985
emilmont 3:dd8b8f5b449a 986 /* maximum transfer unit */
emilmont 3:dd8b8f5b449a 987 netif->mtu = 1500;
emilmont 3:dd8b8f5b449a 988
emilmont 3:dd8b8f5b449a 989 /* device capabilities */
emilmont 3:dd8b8f5b449a 990 netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET;
emilmont 3:dd8b8f5b449a 991
emilmont 3:dd8b8f5b449a 992 /* Initialize the hardware */
emilmont 3:dd8b8f5b449a 993 netif->state = &lpc_enetdata;
emilmont 3:dd8b8f5b449a 994 err = low_level_init(netif);
emilmont 3:dd8b8f5b449a 995 if (err != ERR_OK)
emilmont 3:dd8b8f5b449a 996 return err;
emilmont 3:dd8b8f5b449a 997
emilmont 3:dd8b8f5b449a 998 #if LWIP_NETIF_HOSTNAME
emilmont 3:dd8b8f5b449a 999 /* Initialize interface hostname */
emilmont 3:dd8b8f5b449a 1000 netif->hostname = "lwiplpc";
emilmont 3:dd8b8f5b449a 1001 #endif /* LWIP_NETIF_HOSTNAME */
emilmont 3:dd8b8f5b449a 1002
emilmont 3:dd8b8f5b449a 1003 netif->name[0] = 'e';
emilmont 3:dd8b8f5b449a 1004 netif->name[1] = 'n';
emilmont 3:dd8b8f5b449a 1005
emilmont 3:dd8b8f5b449a 1006 netif->output = lpc_etharp_output;
emilmont 3:dd8b8f5b449a 1007 netif->linkoutput = lpc_low_level_output;
emilmont 3:dd8b8f5b449a 1008
emilmont 3:dd8b8f5b449a 1009 /* CMSIS-RTOS, start tasks */
emilmont 3:dd8b8f5b449a 1010 #if NO_SYS == 0
emilmont 3:dd8b8f5b449a 1011 #ifdef CMSIS_OS_RTX
emilmont 3:dd8b8f5b449a 1012 memset(lpc_enetdata.xTXDCountSem.data, 0, sizeof(lpc_enetdata.xTXDCountSem.data));
emilmont 3:dd8b8f5b449a 1013 lpc_enetdata.xTXDCountSem.def.semaphore = lpc_enetdata.xTXDCountSem.data;
emilmont 3:dd8b8f5b449a 1014 #endif
emilmont 3:dd8b8f5b449a 1015 lpc_enetdata.xTXDCountSem.id = osSemaphoreCreate(&lpc_enetdata.xTXDCountSem.def, LPC_NUM_BUFF_TXDESCS);
emilmont 3:dd8b8f5b449a 1016 LWIP_ASSERT("xTXDCountSem creation error", (lpc_enetdata.xTXDCountSem.id != NULL));
emilmont 3:dd8b8f5b449a 1017
emilmont 3:dd8b8f5b449a 1018 err = sys_mutex_new(&lpc_enetdata.TXLockMutex);
emilmont 3:dd8b8f5b449a 1019 LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));
emilmont 3:dd8b8f5b449a 1020
emilmont 3:dd8b8f5b449a 1021 /* Packet receive task */
emilmont 3:dd8b8f5b449a 1022 err = sys_sem_new(&lpc_enetdata.RxSem, 0);
emilmont 3:dd8b8f5b449a 1023 LWIP_ASSERT("RxSem creation error", (err == ERR_OK));
emilmont 3:dd8b8f5b449a 1024 sys_thread_new("receive_thread", packet_rx, netif->state, DEFAULT_THREAD_STACKSIZE, RX_PRIORITY);
emilmont 3:dd8b8f5b449a 1025
emilmont 3:dd8b8f5b449a 1026 /* Transmit cleanup task */
emilmont 3:dd8b8f5b449a 1027 err = sys_sem_new(&lpc_enetdata.TxCleanSem, 0);
emilmont 3:dd8b8f5b449a 1028 LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
emilmont 3:dd8b8f5b449a 1029 sys_thread_new("txclean_thread", packet_tx, netif->state, DEFAULT_THREAD_STACKSIZE, TX_PRIORITY);
emilmont 3:dd8b8f5b449a 1030
emilmont 3:dd8b8f5b449a 1031 /* periodic PHY status update */
emilmont 3:dd8b8f5b449a 1032 osTimerId phy_timer = osTimerCreate(osTimer(phy_update), osTimerPeriodic, (void *)netif);
emilmont 3:dd8b8f5b449a 1033 osTimerStart(phy_timer, 250);
emilmont 3:dd8b8f5b449a 1034 #endif
emilmont 3:dd8b8f5b449a 1035
emilmont 3:dd8b8f5b449a 1036 return ERR_OK;
emilmont 3:dd8b8f5b449a 1037 }
emilmont 3:dd8b8f5b449a 1038
emilmont 3:dd8b8f5b449a 1039 /**
emilmont 3:dd8b8f5b449a 1040 * @}
emilmont 3:dd8b8f5b449a 1041 */
emilmont 3:dd8b8f5b449a 1042
emilmont 3:dd8b8f5b449a 1043 /* --------------------------------- End Of File ------------------------------ */