V1

Dependents:   EthernetInterface

Fork of lwip-eth by mbed official

Committer:
emilmont
Date:
Fri Mar 01 15:30:33 2013 +0000
Revision:
4:d827a085afd9
Parent:
3:dd8b8f5b449a
Child:
5:698d868a5285
Apply latest NXP patches

Who changed what in which revision?

UserRevisionLine numberNew contents of line
emilmont 4:d827a085afd9 1 /**********************************************************************
emilmont 4:d827a085afd9 2 * $Id$ lpc17_emac.c 2011-11-20
emilmont 4:d827a085afd9 3 *//**
emilmont 4:d827a085afd9 4 * @file lpc17_emac.c
emilmont 4:d827a085afd9 5 * @brief LPC17 ethernet driver for LWIP
emilmont 4:d827a085afd9 6 * @version 1.0
emilmont 4:d827a085afd9 7 * @date 20. Nov. 2011
emilmont 4:d827a085afd9 8 * @author NXP MCU SW Application Team
emilmont 4:d827a085afd9 9 *
emilmont 4:d827a085afd9 10 * Copyright(C) 2011, NXP Semiconductor
emilmont 4:d827a085afd9 11 * All rights reserved.
emilmont 4:d827a085afd9 12 *
emilmont 4:d827a085afd9 13 ***********************************************************************
emilmont 4:d827a085afd9 14 * Software that is described herein is for illustrative purposes only
emilmont 4:d827a085afd9 15 * which provides customers with programming information regarding the
emilmont 4:d827a085afd9 16 * products. This software is supplied "AS IS" without any warranties.
emilmont 4:d827a085afd9 17 * NXP Semiconductors assumes no responsibility or liability for the
emilmont 4:d827a085afd9 18 * use of the software, conveys no license or title under any patent,
emilmont 4:d827a085afd9 19 * copyright, or mask work right to the product. NXP Semiconductors
emilmont 4:d827a085afd9 20 * reserves the right to make changes in the software without
emilmont 4:d827a085afd9 21 * notification. NXP Semiconductors also make no representation or
emilmont 4:d827a085afd9 22 * warranty that such application will be suitable for the specified
emilmont 4:d827a085afd9 23 * use without further testing or modification.
emilmont 4:d827a085afd9 24 **********************************************************************/
emilmont 4:d827a085afd9 25
emilmont 4:d827a085afd9 26 #include "lwip/opt.h"
emilmont 4:d827a085afd9 27 #include "lwip/sys.h"
emilmont 4:d827a085afd9 28 #include "lwip/def.h"
emilmont 4:d827a085afd9 29 #include "lwip/mem.h"
emilmont 4:d827a085afd9 30 #include "lwip/pbuf.h"
emilmont 4:d827a085afd9 31 #include "lwip/stats.h"
emilmont 4:d827a085afd9 32 #include "lwip/snmp.h"
emilmont 4:d827a085afd9 33 #include "netif/etharp.h"
emilmont 4:d827a085afd9 34 #include "netif/ppp_oe.h"
emilmont 4:d827a085afd9 35
emilmont 4:d827a085afd9 36 #include "lpc17xx_emac.h"
emilmont 4:d827a085afd9 37 #include "lpc17_emac.h"
emilmont 4:d827a085afd9 38 #include "lpc_emac_config.h"
emilmont 4:d827a085afd9 39 #include "lpc_phy.h"
emilmont 4:d827a085afd9 40 #include "sys_arch.h"
emilmont 4:d827a085afd9 41
emilmont 4:d827a085afd9 42 #include "mbed_interface.h"
emilmont 4:d827a085afd9 43 #include <string.h>
emilmont 4:d827a085afd9 44
emilmont 4:d827a085afd9 45 #ifndef LPC_EMAC_RMII
emilmont 4:d827a085afd9 46 #error LPC_EMAC_RMII is not defined!
emilmont 4:d827a085afd9 47 #endif
emilmont 4:d827a085afd9 48
emilmont 4:d827a085afd9 49 #if LPC_NUM_BUFF_TXDESCS < 2
emilmont 4:d827a085afd9 50 #error LPC_NUM_BUFF_TXDESCS must be at least 2
emilmont 4:d827a085afd9 51 #endif
emilmont 4:d827a085afd9 52
emilmont 4:d827a085afd9 53 #if LPC_NUM_BUFF_RXDESCS < 3
emilmont 4:d827a085afd9 54 #error LPC_NUM_BUFF_RXDESCS must be at least 3
emilmont 4:d827a085afd9 55 #endif
emilmont 4:d827a085afd9 56
emilmont 4:d827a085afd9 57 /** @defgroup lwip17xx_emac_DRIVER lpc17 EMAC driver for LWIP
emilmont 4:d827a085afd9 58 * @ingroup lwip_emac
emilmont 4:d827a085afd9 59 *
emilmont 4:d827a085afd9 60 * @{
emilmont 4:d827a085afd9 61 */
emilmont 4:d827a085afd9 62
emilmont 4:d827a085afd9 63 #if NO_SYS == 0
emilmont 4:d827a085afd9 64 /** \brief Driver transmit and receive thread priorities
emilmont 4:d827a085afd9 65 *
emilmont 4:d827a085afd9 66 * Thread priorities for receive thread and TX cleanup thread. Alter
emilmont 4:d827a085afd9 67 * to prioritize receive or transmit bandwidth. In a heavily loaded
emilmont 4:d827a085afd9 68 * system or with LEIP_DEBUG enabled, the priorities might be better
emilmont 4:d827a085afd9 69 * the same. */
emilmont 4:d827a085afd9 70 #define RX_PRIORITY (osPriorityNormal)
emilmont 4:d827a085afd9 71 #define TX_PRIORITY (osPriorityNormal)
emilmont 4:d827a085afd9 72
emilmont 4:d827a085afd9 73 /** \brief Debug output formatter lock define
emilmont 4:d827a085afd9 74 *
emilmont 4:d827a085afd9 75 * When using FreeRTOS and with LWIP_DEBUG enabled, enabling this
emilmont 4:d827a085afd9 76 * define will allow RX debug messages to not interleave with the
emilmont 4:d827a085afd9 77 * TX messages (so they are actually readable). Not enabling this
emilmont 4:d827a085afd9 78 * define when the system is under load will cause the output to
emilmont 4:d827a085afd9 79 * be unreadable. There is a small tradeoff in performance for this
emilmont 4:d827a085afd9 80 * so use it only for debug. */
emilmont 4:d827a085afd9 81 //#define LOCK_RX_THREAD
emilmont 4:d827a085afd9 82
emilmont 4:d827a085afd9 83 /** \brief Receive group interrupts
emilmont 4:d827a085afd9 84 */
emilmont 4:d827a085afd9 85 #define RXINTGROUP (EMAC_INT_RX_OVERRUN | EMAC_INT_RX_ERR | EMAC_INT_RX_DONE)
emilmont 4:d827a085afd9 86
emilmont 4:d827a085afd9 87 /** \brief Transmit group interrupts
emilmont 4:d827a085afd9 88 */
emilmont 4:d827a085afd9 89 #define TXINTGROUP (EMAC_INT_TX_UNDERRUN | EMAC_INT_TX_ERR | EMAC_INT_TX_DONE)
emilmont 4:d827a085afd9 90
emilmont 4:d827a085afd9 91 #else
emilmont 4:d827a085afd9 92 #define RXINTGROUP 0
emilmont 4:d827a085afd9 93 #define TXINTGROUP 0
emilmont 4:d827a085afd9 94 #endif
emilmont 4:d827a085afd9 95
emilmont 4:d827a085afd9 96 /** \brief Structure of a TX/RX descriptor
emilmont 4:d827a085afd9 97 */
emilmont 4:d827a085afd9 98 typedef struct
emilmont 4:d827a085afd9 99 {
emilmont 4:d827a085afd9 100 volatile u32_t packet; /**< Pointer to buffer */
emilmont 4:d827a085afd9 101 volatile u32_t control; /**< Control word */
emilmont 4:d827a085afd9 102 } LPC_TXRX_DESC_T;
emilmont 4:d827a085afd9 103
emilmont 4:d827a085afd9 104 /** \brief Structure of a RX status entry
emilmont 4:d827a085afd9 105 */
emilmont 4:d827a085afd9 106 typedef struct
emilmont 4:d827a085afd9 107 {
emilmont 4:d827a085afd9 108 volatile u32_t statusinfo; /**< RX status word */
emilmont 4:d827a085afd9 109 volatile u32_t statushashcrc; /**< RX hash CRC */
emilmont 4:d827a085afd9 110 } LPC_TXRX_STATUS_T;
emilmont 4:d827a085afd9 111
emilmont 4:d827a085afd9 112 /* LPC EMAC driver data structure */
emilmont 4:d827a085afd9 113 struct lpc_enetdata {
emilmont 4:d827a085afd9 114 /* prxs must be 8 byte aligned! */
emilmont 4:d827a085afd9 115 LPC_TXRX_STATUS_T prxs[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX statuses */
emilmont 4:d827a085afd9 116 struct netif *netif; /**< Reference back to LWIP parent netif */
emilmont 4:d827a085afd9 117 LPC_TXRX_DESC_T ptxd[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX descriptor list */
emilmont 4:d827a085afd9 118 LPC_TXRX_STATUS_T ptxs[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX statuses */
emilmont 4:d827a085afd9 119 LPC_TXRX_DESC_T prxd[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX descriptor list */
emilmont 4:d827a085afd9 120 struct pbuf *rxb[LPC_NUM_BUFF_RXDESCS]; /**< RX pbuf pointer list, zero-copy mode */
emilmont 4:d827a085afd9 121 u32_t rx_fill_desc_index; /**< RX descriptor next available index */
emilmont 4:d827a085afd9 122 volatile u32_t rx_free_descs; /**< Count of free RX descriptors */
emilmont 4:d827a085afd9 123 struct pbuf *txb[LPC_NUM_BUFF_TXDESCS]; /**< TX pbuf pointer list, zero-copy mode */
emilmont 4:d827a085afd9 124 u32_t lpc_last_tx_idx; /**< TX last descriptor index, zero-copy mode */
emilmont 4:d827a085afd9 125 #if NO_SYS == 0
emilmont 4:d827a085afd9 126 sys_sem_t RxSem; /**< RX receive thread wakeup semaphore */
emilmont 4:d827a085afd9 127 sys_sem_t TxCleanSem; /**< TX cleanup thread wakeup semaphore */
emilmont 4:d827a085afd9 128 sys_mutex_t TXLockMutex; /**< TX critical section mutex */
emilmont 4:d827a085afd9 129 sys_sem_t xTXDCountSem; /**< TX free buffer counting semaphore */
emilmont 4:d827a085afd9 130 #endif
emilmont 4:d827a085afd9 131 };
emilmont 4:d827a085afd9 132
emilmont 4:d827a085afd9 133 /** \brief LPC EMAC driver work data
emilmont 4:d827a085afd9 134 */
emilmont 4:d827a085afd9 135 ALIGNED(8) struct lpc_enetdata lpc_enetdata;
emilmont 4:d827a085afd9 136
emilmont 4:d827a085afd9 137 /* Write a value via the MII link (non-blocking) */
emilmont 4:d827a085afd9 138 void lpc_mii_write_noblock(u32_t PhyReg, u32_t Value)
emilmont 4:d827a085afd9 139 {
emilmont 4:d827a085afd9 140 /* Write value at PHY address and register */
emilmont 4:d827a085afd9 141 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 4:d827a085afd9 142 LPC_EMAC->MWTD = Value;
emilmont 4:d827a085afd9 143 }
emilmont 4:d827a085afd9 144
emilmont 4:d827a085afd9 145 /* Write a value via the MII link (blocking) */
emilmont 4:d827a085afd9 146 err_t lpc_mii_write(u32_t PhyReg, u32_t Value)
emilmont 4:d827a085afd9 147 {
emilmont 4:d827a085afd9 148 u32_t mst = 250;
emilmont 4:d827a085afd9 149 err_t sts = ERR_OK;
emilmont 4:d827a085afd9 150
emilmont 4:d827a085afd9 151 /* Write value at PHY address and register */
emilmont 4:d827a085afd9 152 lpc_mii_write_noblock(PhyReg, Value);
emilmont 4:d827a085afd9 153
emilmont 4:d827a085afd9 154 /* Wait for unbusy status */
emilmont 4:d827a085afd9 155 while (mst > 0) {
emilmont 4:d827a085afd9 156 sts = LPC_EMAC->MIND;
emilmont 4:d827a085afd9 157 if ((sts & EMAC_MIND_BUSY) == 0)
emilmont 4:d827a085afd9 158 mst = 0;
emilmont 4:d827a085afd9 159 else {
emilmont 4:d827a085afd9 160 mst--;
emilmont 4:d827a085afd9 161 osDelay(1);
emilmont 4:d827a085afd9 162 }
emilmont 4:d827a085afd9 163 }
emilmont 4:d827a085afd9 164
emilmont 4:d827a085afd9 165 if (sts != 0)
emilmont 4:d827a085afd9 166 sts = ERR_TIMEOUT;
emilmont 4:d827a085afd9 167
emilmont 4:d827a085afd9 168 return sts;
emilmont 4:d827a085afd9 169 }
emilmont 4:d827a085afd9 170
emilmont 4:d827a085afd9 171 /* Reads current MII link busy status */
emilmont 4:d827a085afd9 172 u32_t lpc_mii_is_busy(void)
emilmont 4:d827a085afd9 173 {
emilmont 4:d827a085afd9 174 return (u32_t) (LPC_EMAC->MIND & EMAC_MIND_BUSY);
emilmont 4:d827a085afd9 175 }
emilmont 4:d827a085afd9 176
emilmont 4:d827a085afd9 177 /* Starts a read operation via the MII link (non-blocking) */
emilmont 4:d827a085afd9 178 u32_t lpc_mii_read_data(void)
emilmont 4:d827a085afd9 179 {
emilmont 4:d827a085afd9 180 u32_t data = LPC_EMAC->MRDD;
emilmont 4:d827a085afd9 181 LPC_EMAC->MCMD = 0;
emilmont 4:d827a085afd9 182
emilmont 4:d827a085afd9 183 return data;
emilmont 4:d827a085afd9 184 }
emilmont 4:d827a085afd9 185
emilmont 4:d827a085afd9 186 /* Starts a read operation via the MII link (non-blocking) */
emilmont 4:d827a085afd9 187 void lpc_mii_read_noblock(u32_t PhyReg)
emilmont 4:d827a085afd9 188 {
emilmont 4:d827a085afd9 189 /* Read value at PHY address and register */
emilmont 4:d827a085afd9 190 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 4:d827a085afd9 191 LPC_EMAC->MCMD = EMAC_MCMD_READ;
emilmont 4:d827a085afd9 192 }
emilmont 4:d827a085afd9 193
emilmont 4:d827a085afd9 194 /* Read a value via the MII link (blocking) */
emilmont 4:d827a085afd9 195 err_t lpc_mii_read(u32_t PhyReg, u32_t *data)
emilmont 4:d827a085afd9 196 {
emilmont 4:d827a085afd9 197 u32_t mst = 250;
emilmont 4:d827a085afd9 198 err_t sts = ERR_OK;
emilmont 4:d827a085afd9 199
emilmont 4:d827a085afd9 200 /* Read value at PHY address and register */
emilmont 4:d827a085afd9 201 lpc_mii_read_noblock(PhyReg);
emilmont 4:d827a085afd9 202
emilmont 4:d827a085afd9 203 /* Wait for unbusy status */
emilmont 4:d827a085afd9 204 while (mst > 0) {
emilmont 4:d827a085afd9 205 sts = LPC_EMAC->MIND & ~EMAC_MIND_MII_LINK_FAIL;
emilmont 4:d827a085afd9 206 if ((sts & EMAC_MIND_BUSY) == 0) {
emilmont 4:d827a085afd9 207 mst = 0;
emilmont 4:d827a085afd9 208 *data = LPC_EMAC->MRDD;
emilmont 4:d827a085afd9 209 } else {
emilmont 4:d827a085afd9 210 mst--;
emilmont 4:d827a085afd9 211 osDelay(1);
emilmont 4:d827a085afd9 212 }
emilmont 4:d827a085afd9 213 }
emilmont 4:d827a085afd9 214
emilmont 4:d827a085afd9 215 LPC_EMAC->MCMD = 0;
emilmont 4:d827a085afd9 216
emilmont 4:d827a085afd9 217 if (sts != 0)
emilmont 4:d827a085afd9 218 sts = ERR_TIMEOUT;
emilmont 4:d827a085afd9 219
emilmont 4:d827a085afd9 220 return sts;
emilmont 4:d827a085afd9 221 }
emilmont 4:d827a085afd9 222
emilmont 4:d827a085afd9 223 /** \brief Queues a pbuf into the RX descriptor list
emilmont 4:d827a085afd9 224 *
emilmont 4:d827a085afd9 225 * \param[in] lpc_enetif Pointer to the drvier data structure
emilmont 4:d827a085afd9 226 * \param[in] p Pointer to pbuf to queue
emilmont 4:d827a085afd9 227 */
emilmont 4:d827a085afd9 228 static void lpc_rxqueue_pbuf(struct lpc_enetdata *lpc_enetif, struct pbuf *p)
emilmont 4:d827a085afd9 229 {
emilmont 4:d827a085afd9 230 u32_t idx;
emilmont 4:d827a085afd9 231
emilmont 4:d827a085afd9 232 /* Get next free descriptor index */
emilmont 4:d827a085afd9 233 idx = lpc_enetif->rx_fill_desc_index;
emilmont 4:d827a085afd9 234
emilmont 4:d827a085afd9 235 /* Setup descriptor and clear statuses */
emilmont 4:d827a085afd9 236 lpc_enetif->prxd[idx].control = EMAC_RCTRL_INT | ((u32_t) (p->len - 1));
emilmont 4:d827a085afd9 237 lpc_enetif->prxd[idx].packet = (u32_t) p->payload;
emilmont 4:d827a085afd9 238 lpc_enetif->prxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 4:d827a085afd9 239 lpc_enetif->prxs[idx].statushashcrc = 0xFFFFFFFF;
emilmont 4:d827a085afd9 240
emilmont 4:d827a085afd9 241 /* Save pbuf pointer for push to network layer later */
emilmont 4:d827a085afd9 242 lpc_enetif->rxb[idx] = p;
emilmont 4:d827a085afd9 243
emilmont 4:d827a085afd9 244 /* Wrap at end of descriptor list */
emilmont 4:d827a085afd9 245 idx++;
emilmont 4:d827a085afd9 246 if (idx >= LPC_NUM_BUFF_RXDESCS)
emilmont 4:d827a085afd9 247 idx = 0;
emilmont 4:d827a085afd9 248
emilmont 4:d827a085afd9 249 /* Queue descriptor(s) */
emilmont 4:d827a085afd9 250 lpc_enetif->rx_free_descs -= 1;
emilmont 4:d827a085afd9 251 lpc_enetif->rx_fill_desc_index = idx;
emilmont 4:d827a085afd9 252 LPC_EMAC->RxConsumeIndex = idx;
emilmont 4:d827a085afd9 253
emilmont 4:d827a085afd9 254 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 255 ("lpc_rxqueue_pbuf: pbuf packet queued: %p (free desc=%d)\n", p,
emilmont 4:d827a085afd9 256 lpc_enetif->rx_free_descs));
emilmont 4:d827a085afd9 257 }
emilmont 4:d827a085afd9 258
emilmont 4:d827a085afd9 259 /** \brief Attempt to allocate and requeue a new pbuf for RX
emilmont 4:d827a085afd9 260 *
emilmont 4:d827a085afd9 261 * \param[in] netif Pointer to the netif structure
emilmont 4:d827a085afd9 262 * \returns 1 if a packet was allocated and requeued, otherwise 0
emilmont 4:d827a085afd9 263 */
emilmont 4:d827a085afd9 264 s32_t lpc_rx_queue(struct netif *netif)
emilmont 4:d827a085afd9 265 {
emilmont 4:d827a085afd9 266 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 4:d827a085afd9 267 struct pbuf *p;
emilmont 4:d827a085afd9 268 s32_t queued = 0;
emilmont 4:d827a085afd9 269
emilmont 4:d827a085afd9 270 /* Attempt to requeue as many packets as possible */
emilmont 4:d827a085afd9 271 while (lpc_enetif->rx_free_descs > 0) {
emilmont 4:d827a085afd9 272 /* Allocate a pbuf from the pool. We need to allocate at the
emilmont 4:d827a085afd9 273 maximum size as we don't know the size of the yet to be
emilmont 4:d827a085afd9 274 received packet. */
emilmont 4:d827a085afd9 275 p = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
emilmont 4:d827a085afd9 276 if (p == NULL) {
emilmont 4:d827a085afd9 277 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 278 ("lpc_rx_queue: could not allocate RX pbuf (free desc=%d)\n",
emilmont 4:d827a085afd9 279 lpc_enetif->rx_free_descs));
emilmont 4:d827a085afd9 280 return queued;
emilmont 4:d827a085afd9 281 }
emilmont 4:d827a085afd9 282
emilmont 4:d827a085afd9 283 /* pbufs allocated from the RAM pool should be non-chained. */
emilmont 4:d827a085afd9 284 LWIP_ASSERT("lpc_rx_queue: pbuf is not contiguous (chained)",
emilmont 4:d827a085afd9 285 pbuf_clen(p) <= 1);
emilmont 4:d827a085afd9 286
emilmont 4:d827a085afd9 287 /* Queue packet */
emilmont 4:d827a085afd9 288 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 4:d827a085afd9 289
emilmont 4:d827a085afd9 290 /* Update queued count */
emilmont 4:d827a085afd9 291 queued++;
emilmont 4:d827a085afd9 292 }
emilmont 4:d827a085afd9 293
emilmont 4:d827a085afd9 294 return queued;
emilmont 4:d827a085afd9 295 }
emilmont 4:d827a085afd9 296
emilmont 4:d827a085afd9 297 /** \brief Sets up the RX descriptor ring buffers.
emilmont 4:d827a085afd9 298 *
emilmont 4:d827a085afd9 299 * This function sets up the descriptor list used for receive packets.
emilmont 4:d827a085afd9 300 *
emilmont 4:d827a085afd9 301 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 302 * \returns Always returns ERR_OK
emilmont 4:d827a085afd9 303 */
emilmont 4:d827a085afd9 304 static err_t lpc_rx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 4:d827a085afd9 305 {
emilmont 4:d827a085afd9 306 /* Setup pointers to RX structures */
emilmont 4:d827a085afd9 307 LPC_EMAC->RxDescriptor = (u32_t) &lpc_enetif->prxd[0];
emilmont 4:d827a085afd9 308 LPC_EMAC->RxStatus = (u32_t) &lpc_enetif->prxs[0];
emilmont 4:d827a085afd9 309 LPC_EMAC->RxDescriptorNumber = LPC_NUM_BUFF_RXDESCS - 1;
emilmont 4:d827a085afd9 310
emilmont 4:d827a085afd9 311 lpc_enetif->rx_free_descs = LPC_NUM_BUFF_RXDESCS;
emilmont 4:d827a085afd9 312 lpc_enetif->rx_fill_desc_index = 0;
emilmont 4:d827a085afd9 313
emilmont 4:d827a085afd9 314 /* Build RX buffer and descriptors */
emilmont 4:d827a085afd9 315 lpc_rx_queue(lpc_enetif->netif);
emilmont 4:d827a085afd9 316
emilmont 4:d827a085afd9 317 return ERR_OK;
emilmont 4:d827a085afd9 318 }
emilmont 4:d827a085afd9 319
emilmont 4:d827a085afd9 320 /** \brief Allocates a pbuf and returns the data from the incoming packet.
emilmont 4:d827a085afd9 321 *
emilmont 4:d827a085afd9 322 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 323 * \return a pbuf filled with the received packet (including MAC header)
emilmont 4:d827a085afd9 324 * NULL on memory error
emilmont 4:d827a085afd9 325 */
emilmont 4:d827a085afd9 326 static struct pbuf *lpc_low_level_input(struct netif *netif)
emilmont 4:d827a085afd9 327 {
emilmont 4:d827a085afd9 328 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 4:d827a085afd9 329 struct pbuf *p = NULL;
emilmont 4:d827a085afd9 330 u32_t idx, length;
emilmont 4:d827a085afd9 331
emilmont 4:d827a085afd9 332 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 333 #if NO_SYS == 0
emilmont 4:d827a085afd9 334 /* Get exclusive access */
emilmont 4:d827a085afd9 335 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 336 #endif
emilmont 4:d827a085afd9 337 #endif
emilmont 4:d827a085afd9 338
emilmont 4:d827a085afd9 339 /* Monitor RX overrun status. This should never happen unless
emilmont 4:d827a085afd9 340 (possibly) the internal bus is behing held up by something.
emilmont 4:d827a085afd9 341 Unless your system is running at a very low clock speed or
emilmont 4:d827a085afd9 342 there are possibilities that the internal buses may be held
emilmont 4:d827a085afd9 343 up for a long time, this can probably safely be removed. */
emilmont 4:d827a085afd9 344 if (LPC_EMAC->IntStatus & EMAC_INT_RX_OVERRUN) {
emilmont 4:d827a085afd9 345 LINK_STATS_INC(link.err);
emilmont 4:d827a085afd9 346 LINK_STATS_INC(link.drop);
emilmont 4:d827a085afd9 347
emilmont 4:d827a085afd9 348 /* Temporarily disable RX */
emilmont 4:d827a085afd9 349 LPC_EMAC->MAC1 &= ~EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 350
emilmont 4:d827a085afd9 351 /* Reset the RX side */
emilmont 4:d827a085afd9 352 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_RX;
emilmont 4:d827a085afd9 353 LPC_EMAC->IntClear = EMAC_INT_RX_OVERRUN;
emilmont 4:d827a085afd9 354
emilmont 4:d827a085afd9 355 /* De-allocate all queued RX pbufs */
emilmont 4:d827a085afd9 356 for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
emilmont 4:d827a085afd9 357 if (lpc_enetif->rxb[idx] != NULL) {
emilmont 4:d827a085afd9 358 pbuf_free(lpc_enetif->rxb[idx]);
emilmont 4:d827a085afd9 359 lpc_enetif->rxb[idx] = NULL;
emilmont 4:d827a085afd9 360 }
emilmont 4:d827a085afd9 361 }
emilmont 4:d827a085afd9 362
emilmont 4:d827a085afd9 363 /* Start RX side again */
emilmont 4:d827a085afd9 364 lpc_rx_setup(lpc_enetif);
emilmont 4:d827a085afd9 365
emilmont 4:d827a085afd9 366 /* Re-enable RX */
emilmont 4:d827a085afd9 367 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 368
emilmont 4:d827a085afd9 369 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 370 #if NO_SYS == 0
emilmont 4:d827a085afd9 371 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 372 #endif
emilmont 4:d827a085afd9 373 #endif
emilmont 4:d827a085afd9 374
emilmont 4:d827a085afd9 375 return NULL;
emilmont 4:d827a085afd9 376 }
emilmont 4:d827a085afd9 377
emilmont 4:d827a085afd9 378 /* Determine if a frame has been received */
emilmont 4:d827a085afd9 379 length = 0;
emilmont 4:d827a085afd9 380 idx = LPC_EMAC->RxConsumeIndex;
emilmont 4:d827a085afd9 381 if (LPC_EMAC->RxProduceIndex != idx) {
emilmont 4:d827a085afd9 382 /* Handle errors */
emilmont 4:d827a085afd9 383 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 4:d827a085afd9 384 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR | EMAC_RINFO_LEN_ERR)) {
emilmont 4:d827a085afd9 385 #if LINK_STATS
emilmont 4:d827a085afd9 386 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 4:d827a085afd9 387 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR))
emilmont 4:d827a085afd9 388 LINK_STATS_INC(link.chkerr);
emilmont 4:d827a085afd9 389 if (lpc_enetif->prxs[idx].statusinfo & EMAC_RINFO_LEN_ERR)
emilmont 4:d827a085afd9 390 LINK_STATS_INC(link.lenerr);
emilmont 4:d827a085afd9 391 #endif
emilmont 4:d827a085afd9 392
emilmont 4:d827a085afd9 393 /* Drop the frame */
emilmont 4:d827a085afd9 394 LINK_STATS_INC(link.drop);
emilmont 4:d827a085afd9 395
emilmont 4:d827a085afd9 396 /* Re-queue the pbuf for receive */
emilmont 4:d827a085afd9 397 lpc_enetif->rx_free_descs++;
emilmont 4:d827a085afd9 398 p = lpc_enetif->rxb[idx];
emilmont 4:d827a085afd9 399 lpc_enetif->rxb[idx] = NULL;
emilmont 4:d827a085afd9 400 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 4:d827a085afd9 401
emilmont 4:d827a085afd9 402 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 403 ("lpc_low_level_input: Packet dropped with errors (0x%x)\n",
emilmont 4:d827a085afd9 404 lpc_enetif->prxs[idx].statusinfo));
emilmont 4:d827a085afd9 405
emilmont 4:d827a085afd9 406 p = NULL;
emilmont 4:d827a085afd9 407 } else {
emilmont 4:d827a085afd9 408 /* A packet is waiting, get length */
emilmont 4:d827a085afd9 409 length = (lpc_enetif->prxs[idx].statusinfo & 0x7FF) + 1;
emilmont 4:d827a085afd9 410
emilmont 4:d827a085afd9 411 /* Zero-copy */
emilmont 4:d827a085afd9 412 p = lpc_enetif->rxb[idx];
emilmont 4:d827a085afd9 413 p->len = (u16_t) length;
emilmont 4:d827a085afd9 414
emilmont 4:d827a085afd9 415 /* Free pbuf from desriptor */
emilmont 4:d827a085afd9 416 lpc_enetif->rxb[idx] = NULL;
emilmont 4:d827a085afd9 417 lpc_enetif->rx_free_descs++;
emilmont 4:d827a085afd9 418
emilmont 4:d827a085afd9 419 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 420 ("lpc_low_level_input: Packet received: %p, size %d (index=%d)\n",
emilmont 4:d827a085afd9 421 p, length, idx));
emilmont 4:d827a085afd9 422
emilmont 4:d827a085afd9 423 /* Save size */
emilmont 4:d827a085afd9 424 p->tot_len = (u16_t) length;
emilmont 4:d827a085afd9 425 LINK_STATS_INC(link.recv);
emilmont 4:d827a085afd9 426
emilmont 4:d827a085afd9 427 /* Queue new buffer(s) */
emilmont 4:d827a085afd9 428 lpc_rx_queue(lpc_enetif->netif);
emilmont 4:d827a085afd9 429 }
emilmont 4:d827a085afd9 430 }
emilmont 4:d827a085afd9 431
emilmont 4:d827a085afd9 432 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 433 #if NO_SYS == 0
emilmont 4:d827a085afd9 434 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 435 #endif
emilmont 4:d827a085afd9 436 #endif
emilmont 4:d827a085afd9 437
emilmont 4:d827a085afd9 438 return p;
emilmont 4:d827a085afd9 439 }
emilmont 4:d827a085afd9 440
emilmont 4:d827a085afd9 441 /** \brief Attempt to read a packet from the EMAC interface.
emilmont 4:d827a085afd9 442 *
emilmont 4:d827a085afd9 443 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 444 */
emilmont 4:d827a085afd9 445 void lpc_enetif_input(struct netif *netif)
emilmont 4:d827a085afd9 446 {
emilmont 4:d827a085afd9 447 struct eth_hdr *ethhdr;
emilmont 4:d827a085afd9 448 struct pbuf *p;
emilmont 4:d827a085afd9 449
emilmont 4:d827a085afd9 450 /* move received packet into a new pbuf */
emilmont 4:d827a085afd9 451 p = lpc_low_level_input(netif);
emilmont 4:d827a085afd9 452 if (p == NULL)
emilmont 4:d827a085afd9 453 return;
emilmont 4:d827a085afd9 454
emilmont 4:d827a085afd9 455 /* points to packet payload, which starts with an Ethernet header */
emilmont 4:d827a085afd9 456 ethhdr = p->payload;
emilmont 4:d827a085afd9 457
emilmont 4:d827a085afd9 458 switch (htons(ethhdr->type)) {
emilmont 4:d827a085afd9 459 case ETHTYPE_IP:
emilmont 4:d827a085afd9 460 case ETHTYPE_ARP:
emilmont 4:d827a085afd9 461 #if PPPOE_SUPPORT
emilmont 4:d827a085afd9 462 case ETHTYPE_PPPOEDISC:
emilmont 4:d827a085afd9 463 case ETHTYPE_PPPOE:
emilmont 4:d827a085afd9 464 #endif /* PPPOE_SUPPORT */
emilmont 4:d827a085afd9 465 /* full packet send to tcpip_thread to process */
emilmont 4:d827a085afd9 466 if (netif->input(p, netif) != ERR_OK) {
emilmont 4:d827a085afd9 467 LWIP_DEBUGF(NETIF_DEBUG, ("lpc_enetif_input: IP input error\n"));
emilmont 4:d827a085afd9 468 /* Free buffer */
emilmont 4:d827a085afd9 469 pbuf_free(p);
emilmont 4:d827a085afd9 470 }
emilmont 4:d827a085afd9 471 break;
emilmont 4:d827a085afd9 472
emilmont 4:d827a085afd9 473 default:
emilmont 4:d827a085afd9 474 /* Return buffer */
emilmont 4:d827a085afd9 475 pbuf_free(p);
emilmont 4:d827a085afd9 476 break;
emilmont 4:d827a085afd9 477 }
emilmont 4:d827a085afd9 478 }
emilmont 4:d827a085afd9 479
emilmont 4:d827a085afd9 480 /** \brief Determine if the passed address is usable for the ethernet
emilmont 4:d827a085afd9 481 * DMA controller.
emilmont 4:d827a085afd9 482 *
emilmont 4:d827a085afd9 483 * \param[in] addr Address of packet to check for DMA safe operation
emilmont 4:d827a085afd9 484 * \return 1 if the packet address is not safe, otherwise 0
emilmont 4:d827a085afd9 485 */
emilmont 4:d827a085afd9 486 static s32_t lpc_packet_addr_notsafe(void *addr) {
emilmont 4:d827a085afd9 487 /* Check for legal address ranges */
emilmont 4:d827a085afd9 488 if ((((u32_t) addr >= 0x2007C000) && ((u32_t) addr < 0x20083FFF))) {
emilmont 4:d827a085afd9 489 return 0;
emilmont 4:d827a085afd9 490 }
emilmont 4:d827a085afd9 491 return 1;
emilmont 4:d827a085afd9 492 }
emilmont 4:d827a085afd9 493
emilmont 4:d827a085afd9 494 /** \brief Sets up the TX descriptor ring buffers.
emilmont 4:d827a085afd9 495 *
emilmont 4:d827a085afd9 496 * This function sets up the descriptor list used for transmit packets.
emilmont 4:d827a085afd9 497 *
emilmont 4:d827a085afd9 498 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 499 */
emilmont 4:d827a085afd9 500 static err_t lpc_tx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 4:d827a085afd9 501 {
emilmont 4:d827a085afd9 502 s32_t idx;
emilmont 4:d827a085afd9 503
emilmont 4:d827a085afd9 504 /* Build TX descriptors for local buffers */
emilmont 4:d827a085afd9 505 for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) {
emilmont 4:d827a085afd9 506 lpc_enetif->ptxd[idx].control = 0;
emilmont 4:d827a085afd9 507 lpc_enetif->ptxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 4:d827a085afd9 508 }
emilmont 4:d827a085afd9 509
emilmont 4:d827a085afd9 510 /* Setup pointers to TX structures */
emilmont 4:d827a085afd9 511 LPC_EMAC->TxDescriptor = (u32_t) &lpc_enetif->ptxd[0];
emilmont 4:d827a085afd9 512 LPC_EMAC->TxStatus = (u32_t) &lpc_enetif->ptxs[0];
emilmont 4:d827a085afd9 513 LPC_EMAC->TxDescriptorNumber = LPC_NUM_BUFF_TXDESCS - 1;
emilmont 4:d827a085afd9 514
emilmont 4:d827a085afd9 515 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 4:d827a085afd9 516
emilmont 4:d827a085afd9 517 return ERR_OK;
emilmont 4:d827a085afd9 518 }
emilmont 4:d827a085afd9 519
emilmont 4:d827a085afd9 520 /** \brief Free TX buffers that are complete
emilmont 4:d827a085afd9 521 *
emilmont 4:d827a085afd9 522 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 523 * \param[in] cidx EMAC current descriptor comsumer index
emilmont 4:d827a085afd9 524 */
emilmont 4:d827a085afd9 525 static void lpc_tx_reclaim_st(struct lpc_enetdata *lpc_enetif, u32_t cidx)
emilmont 4:d827a085afd9 526 {
emilmont 4:d827a085afd9 527 #if NO_SYS == 0
emilmont 4:d827a085afd9 528 /* Get exclusive access */
emilmont 4:d827a085afd9 529 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 530 #endif
emilmont 4:d827a085afd9 531
emilmont 4:d827a085afd9 532 while (cidx != lpc_enetif->lpc_last_tx_idx) {
emilmont 4:d827a085afd9 533 if (lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] != NULL) {
emilmont 4:d827a085afd9 534 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 535 ("lpc_tx_reclaim_st: Freeing packet %p (index %d)\n",
emilmont 4:d827a085afd9 536 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx],
emilmont 4:d827a085afd9 537 lpc_enetif->lpc_last_tx_idx));
emilmont 4:d827a085afd9 538 pbuf_free(lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx]);
emilmont 4:d827a085afd9 539 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] = NULL;
emilmont 4:d827a085afd9 540 }
emilmont 4:d827a085afd9 541
emilmont 4:d827a085afd9 542 #if NO_SYS == 0
emilmont 4:d827a085afd9 543 osSemaphoreRelease(lpc_enetif->xTXDCountSem.id);
emilmont 4:d827a085afd9 544 #endif
emilmont 4:d827a085afd9 545 lpc_enetif->lpc_last_tx_idx++;
emilmont 4:d827a085afd9 546 if (lpc_enetif->lpc_last_tx_idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 4:d827a085afd9 547 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 4:d827a085afd9 548 }
emilmont 4:d827a085afd9 549
emilmont 4:d827a085afd9 550 #if NO_SYS == 0
emilmont 4:d827a085afd9 551 /* Restore access */
emilmont 4:d827a085afd9 552 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 553 #endif
emilmont 4:d827a085afd9 554 }
emilmont 4:d827a085afd9 555
emilmont 4:d827a085afd9 556 /** \brief User call for freeingTX buffers that are complete
emilmont 4:d827a085afd9 557 *
emilmont 4:d827a085afd9 558 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 559 */
emilmont 4:d827a085afd9 560 void lpc_tx_reclaim(struct netif *netif)
emilmont 4:d827a085afd9 561 {
emilmont 4:d827a085afd9 562 lpc_tx_reclaim_st((struct lpc_enetdata *) netif->state,
emilmont 4:d827a085afd9 563 LPC_EMAC->TxConsumeIndex);
emilmont 4:d827a085afd9 564 }
emilmont 4:d827a085afd9 565
emilmont 4:d827a085afd9 566 /** \brief Polls if an available TX descriptor is ready. Can be used to
emilmont 4:d827a085afd9 567 * determine if the low level transmit function will block.
emilmont 4:d827a085afd9 568 *
emilmont 4:d827a085afd9 569 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 570 * \return 0 if no descriptors are read, or >0
emilmont 4:d827a085afd9 571 */
emilmont 4:d827a085afd9 572 s32_t lpc_tx_ready(struct netif *netif)
emilmont 4:d827a085afd9 573 {
emilmont 4:d827a085afd9 574 s32_t fb;
emilmont 4:d827a085afd9 575 u32_t idx, cidx;
emilmont 4:d827a085afd9 576
emilmont 4:d827a085afd9 577 cidx = LPC_EMAC->TxConsumeIndex;
emilmont 4:d827a085afd9 578 idx = LPC_EMAC->TxProduceIndex;
emilmont 4:d827a085afd9 579
emilmont 4:d827a085afd9 580 /* Determine number of free buffers */
emilmont 4:d827a085afd9 581 if (idx == cidx)
emilmont 4:d827a085afd9 582 fb = LPC_NUM_BUFF_TXDESCS;
emilmont 4:d827a085afd9 583 else if (cidx > idx)
emilmont 4:d827a085afd9 584 fb = (LPC_NUM_BUFF_TXDESCS - 1) -
emilmont 4:d827a085afd9 585 ((idx + LPC_NUM_BUFF_TXDESCS) - cidx);
emilmont 4:d827a085afd9 586 else
emilmont 4:d827a085afd9 587 fb = (LPC_NUM_BUFF_TXDESCS - 1) - (cidx - idx);
emilmont 4:d827a085afd9 588
emilmont 4:d827a085afd9 589 return fb;
emilmont 4:d827a085afd9 590 }
emilmont 4:d827a085afd9 591
emilmont 4:d827a085afd9 592 /** \brief Low level output of a packet. Never call this from an
emilmont 4:d827a085afd9 593 * interrupt context, as it may block until TX descriptors
emilmont 4:d827a085afd9 594 * become available.
emilmont 4:d827a085afd9 595 *
emilmont 4:d827a085afd9 596 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 597 * \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
emilmont 4:d827a085afd9 598 * \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
emilmont 4:d827a085afd9 599 */
emilmont 4:d827a085afd9 600 static err_t lpc_low_level_output(struct netif *netif, struct pbuf *p)
emilmont 4:d827a085afd9 601 {
emilmont 4:d827a085afd9 602 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 4:d827a085afd9 603 struct pbuf *q;
emilmont 4:d827a085afd9 604 u8_t *dst;
emilmont 4:d827a085afd9 605 u32_t idx;
emilmont 4:d827a085afd9 606 struct pbuf *np;
emilmont 4:d827a085afd9 607 u32_t dn, notdmasafe = 0;
emilmont 4:d827a085afd9 608
emilmont 4:d827a085afd9 609 /* Zero-copy TX buffers may be fragmented across mutliple payload
emilmont 4:d827a085afd9 610 chains. Determine the number of descriptors needed for the
emilmont 4:d827a085afd9 611 transfer. The pbuf chaining can be a mess! */
emilmont 4:d827a085afd9 612 dn = (u32_t) pbuf_clen(p);
emilmont 4:d827a085afd9 613
emilmont 4:d827a085afd9 614 /* Test to make sure packet addresses are DMA safe. A DMA safe
emilmont 4:d827a085afd9 615 address is once that uses external memory or periphheral RAM.
emilmont 4:d827a085afd9 616 IRAM and FLASH are not safe! */
emilmont 4:d827a085afd9 617 for (q = p; q != NULL; q = q->next)
emilmont 4:d827a085afd9 618 notdmasafe += lpc_packet_addr_notsafe(q->payload);
emilmont 4:d827a085afd9 619
emilmont 4:d827a085afd9 620 #if LPC_TX_PBUF_BOUNCE_EN==1
emilmont 4:d827a085afd9 621 /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be
emilmont 4:d827a085afd9 622 created that will be used instead. This requires an copy from the
emilmont 4:d827a085afd9 623 non-safe DMA region to the new pbuf */
emilmont 4:d827a085afd9 624 if (notdmasafe) {
emilmont 4:d827a085afd9 625 /* Allocate a pbuf in DMA memory */
emilmont 4:d827a085afd9 626 np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
emilmont 4:d827a085afd9 627 if (np == NULL)
emilmont 4:d827a085afd9 628 return ERR_MEM;
emilmont 4:d827a085afd9 629
emilmont 4:d827a085afd9 630 /* This buffer better be contiguous! */
emilmont 4:d827a085afd9 631 LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained",
emilmont 4:d827a085afd9 632 (pbuf_clen(np) == 1));
emilmont 4:d827a085afd9 633
emilmont 4:d827a085afd9 634 /* Copy to DMA safe pbuf */
emilmont 4:d827a085afd9 635 dst = (u8_t *) np->payload;
emilmont 4:d827a085afd9 636 for(q = p; q != NULL; q = q->next) {
emilmont 4:d827a085afd9 637 /* Copy the buffer to the descriptor's buffer */
emilmont 4:d827a085afd9 638 MEMCPY(dst, (u8_t *) q->payload, q->len);
emilmont 4:d827a085afd9 639 dst += q->len;
emilmont 4:d827a085afd9 640 }
emilmont 4:d827a085afd9 641 np->len = p->tot_len;
emilmont 4:d827a085afd9 642
emilmont 4:d827a085afd9 643 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 644 ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\n",
emilmont 4:d827a085afd9 645 q, np));
emilmont 4:d827a085afd9 646
emilmont 4:d827a085afd9 647 /* use the new buffer for descrptor queueing. The original pbuf will
emilmont 4:d827a085afd9 648 be de-allocated outsuide this driver. */
emilmont 4:d827a085afd9 649 p = np;
emilmont 4:d827a085afd9 650 dn = 1;
emilmont 4:d827a085afd9 651 }
emilmont 4:d827a085afd9 652 #else
emilmont 4:d827a085afd9 653 if (notdmasafe)
emilmont 4:d827a085afd9 654 LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf",
emilmont 4:d827a085afd9 655 (notdmasafe == 0));
emilmont 4:d827a085afd9 656 #endif
emilmont 4:d827a085afd9 657
emilmont 4:d827a085afd9 658 /* Wait until enough descriptors are available for the transfer. */
emilmont 4:d827a085afd9 659 /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
emilmont 4:d827a085afd9 660 while (dn > lpc_tx_ready(netif))
emilmont 4:d827a085afd9 661 #if NO_SYS == 0
emilmont 4:d827a085afd9 662 osSemaphoreWait(lpc_enetif->xTXDCountSem.id, osWaitForever);
emilmont 4:d827a085afd9 663 #else
emilmont 4:d827a085afd9 664 osDelay(1);
emilmont 4:d827a085afd9 665 #endif
emilmont 4:d827a085afd9 666
emilmont 4:d827a085afd9 667 /* Get free TX buffer index */
emilmont 4:d827a085afd9 668 idx = LPC_EMAC->TxProduceIndex;
emilmont 4:d827a085afd9 669
emilmont 4:d827a085afd9 670 #if NO_SYS == 0
emilmont 4:d827a085afd9 671 /* Get exclusive access */
emilmont 4:d827a085afd9 672 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 673 #endif
emilmont 4:d827a085afd9 674
emilmont 4:d827a085afd9 675 /* Prevent LWIP from de-allocating this pbuf. The driver will
emilmont 4:d827a085afd9 676 free it once it's been transmitted. */
emilmont 4:d827a085afd9 677 if (!notdmasafe)
emilmont 4:d827a085afd9 678 pbuf_ref(p);
emilmont 4:d827a085afd9 679
emilmont 4:d827a085afd9 680 /* Setup transfers */
emilmont 4:d827a085afd9 681 q = p;
emilmont 4:d827a085afd9 682 while (dn > 0) {
emilmont 4:d827a085afd9 683 dn--;
emilmont 4:d827a085afd9 684
emilmont 4:d827a085afd9 685 /* Only save pointer to free on last descriptor */
emilmont 4:d827a085afd9 686 if (dn == 0) {
emilmont 4:d827a085afd9 687 /* Save size of packet and signal it's ready */
emilmont 4:d827a085afd9 688 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT |
emilmont 4:d827a085afd9 689 EMAC_TCTRL_LAST;
emilmont 4:d827a085afd9 690 lpc_enetif->txb[idx] = p;
emilmont 4:d827a085afd9 691 }
emilmont 4:d827a085afd9 692 else {
emilmont 4:d827a085afd9 693 /* Save size of packet, descriptor is not last */
emilmont 4:d827a085afd9 694 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT;
emilmont 4:d827a085afd9 695 lpc_enetif->txb[idx] = NULL;
emilmont 4:d827a085afd9 696 }
emilmont 4:d827a085afd9 697
emilmont 4:d827a085afd9 698 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 4:d827a085afd9 699 ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d,"
emilmont 4:d827a085afd9 700 " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
emilmont 4:d827a085afd9 701
emilmont 4:d827a085afd9 702 lpc_enetif->ptxd[idx].packet = (u32_t) q->payload;
emilmont 4:d827a085afd9 703
emilmont 4:d827a085afd9 704 q = q->next;
emilmont 4:d827a085afd9 705
emilmont 4:d827a085afd9 706 idx++;
emilmont 4:d827a085afd9 707 if (idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 4:d827a085afd9 708 idx = 0;
emilmont 4:d827a085afd9 709 }
emilmont 4:d827a085afd9 710
emilmont 4:d827a085afd9 711 LPC_EMAC->TxProduceIndex = idx;
emilmont 4:d827a085afd9 712
emilmont 4:d827a085afd9 713 LINK_STATS_INC(link.xmit);
emilmont 4:d827a085afd9 714
emilmont 4:d827a085afd9 715 #if NO_SYS == 0
emilmont 4:d827a085afd9 716 /* Restore access */
emilmont 4:d827a085afd9 717 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 718 #endif
emilmont 4:d827a085afd9 719
emilmont 4:d827a085afd9 720 return ERR_OK;
emilmont 4:d827a085afd9 721 }
emilmont 4:d827a085afd9 722
emilmont 4:d827a085afd9 723 /** \brief LPC EMAC interrupt handler.
emilmont 4:d827a085afd9 724 *
emilmont 4:d827a085afd9 725 * This function handles the transmit, receive, and error interrupt of
emilmont 4:d827a085afd9 726 * the LPC177x_8x. This is meant to be used when NO_SYS=0.
emilmont 4:d827a085afd9 727 */
emilmont 4:d827a085afd9 728 void ENET_IRQHandler(void)
emilmont 4:d827a085afd9 729 {
emilmont 4:d827a085afd9 730 #if NO_SYS == 1
emilmont 4:d827a085afd9 731 /* Interrupts are not used without an RTOS */
emilmont 4:d827a085afd9 732 NVIC_DisableIRQ(ENET_IRQn);
emilmont 4:d827a085afd9 733 #else
emilmont 4:d827a085afd9 734 uint32_t ints;
emilmont 4:d827a085afd9 735
emilmont 4:d827a085afd9 736 /* Interrupts are of 2 groups - transmit or receive. Based on the
emilmont 4:d827a085afd9 737 interrupt, kick off the receive or transmit (cleanup) task */
emilmont 4:d827a085afd9 738
emilmont 4:d827a085afd9 739 /* Get pending interrupts */
emilmont 4:d827a085afd9 740 ints = LPC_EMAC->IntStatus;
emilmont 4:d827a085afd9 741
emilmont 4:d827a085afd9 742 if (ints & RXINTGROUP) {
emilmont 4:d827a085afd9 743 /* RX group interrupt(s): Give semaphore to wakeup RX receive task.*/
emilmont 4:d827a085afd9 744 sys_sem_signal(&lpc_enetdata.RxSem);
emilmont 4:d827a085afd9 745 }
emilmont 4:d827a085afd9 746
emilmont 4:d827a085afd9 747 if (ints & TXINTGROUP) {
emilmont 4:d827a085afd9 748 /* TX group interrupt(s): Give semaphore to wakeup TX cleanup task. */
emilmont 4:d827a085afd9 749 sys_sem_signal(&lpc_enetdata.TxCleanSem);
emilmont 4:d827a085afd9 750 }
emilmont 4:d827a085afd9 751
emilmont 4:d827a085afd9 752 /* Clear pending interrupts */
emilmont 4:d827a085afd9 753 LPC_EMAC->IntClear = ints;
emilmont 4:d827a085afd9 754 #endif
emilmont 4:d827a085afd9 755 }
emilmont 4:d827a085afd9 756
emilmont 4:d827a085afd9 757 #if NO_SYS == 0
emilmont 4:d827a085afd9 758 /** \brief Packet reception task
emilmont 4:d827a085afd9 759 *
emilmont 4:d827a085afd9 760 * This task is called when a packet is received. It will
emilmont 4:d827a085afd9 761 * pass the packet to the LWIP core.
emilmont 4:d827a085afd9 762 *
emilmont 4:d827a085afd9 763 * \param[in] pvParameters Not used yet
emilmont 4:d827a085afd9 764 */
emilmont 4:d827a085afd9 765 static void packet_rx(void* pvParameters) {
emilmont 4:d827a085afd9 766 struct lpc_enetdata *lpc_enetif = pvParameters;
emilmont 4:d827a085afd9 767
emilmont 4:d827a085afd9 768 while (1) {
emilmont 4:d827a085afd9 769 /* Wait for receive task to wakeup */
emilmont 4:d827a085afd9 770 sys_arch_sem_wait(&lpc_enetif->RxSem, 0);
emilmont 4:d827a085afd9 771
emilmont 4:d827a085afd9 772 /* Process packets until all empty */
emilmont 4:d827a085afd9 773 while (LPC_EMAC->RxConsumeIndex != LPC_EMAC->RxProduceIndex)
emilmont 4:d827a085afd9 774 lpc_enetif_input(lpc_enetif->netif);
emilmont 4:d827a085afd9 775 }
emilmont 4:d827a085afd9 776 }
emilmont 4:d827a085afd9 777
emilmont 4:d827a085afd9 778 /** \brief Transmit cleanup task
emilmont 4:d827a085afd9 779 *
emilmont 4:d827a085afd9 780 * This task is called when a transmit interrupt occurs and
emilmont 4:d827a085afd9 781 * reclaims the pbuf and descriptor used for the packet once
emilmont 4:d827a085afd9 782 * the packet has been transferred.
emilmont 4:d827a085afd9 783 *
emilmont 4:d827a085afd9 784 * \param[in] pvParameters Not used yet
emilmont 4:d827a085afd9 785 */
emilmont 4:d827a085afd9 786 static void packet_tx(void* pvParameters) {
emilmont 4:d827a085afd9 787 struct lpc_enetdata *lpc_enetif = pvParameters;
emilmont 4:d827a085afd9 788 s32_t idx;
emilmont 4:d827a085afd9 789
emilmont 4:d827a085afd9 790 while (1) {
emilmont 4:d827a085afd9 791 /* Wait for transmit cleanup task to wakeup */
emilmont 4:d827a085afd9 792 sys_arch_sem_wait(&lpc_enetif->TxCleanSem, 0);
emilmont 4:d827a085afd9 793
emilmont 4:d827a085afd9 794 /* Error handling for TX underruns. This should never happen unless
emilmont 4:d827a085afd9 795 something is holding the bus or the clocks are going too slow. It
emilmont 4:d827a085afd9 796 can probably be safely removed. */
emilmont 4:d827a085afd9 797 if (LPC_EMAC->IntStatus & EMAC_INT_TX_UNDERRUN) {
emilmont 4:d827a085afd9 798 LINK_STATS_INC(link.err);
emilmont 4:d827a085afd9 799 LINK_STATS_INC(link.drop);
emilmont 4:d827a085afd9 800
emilmont 4:d827a085afd9 801 #if NO_SYS == 0
emilmont 4:d827a085afd9 802 /* Get exclusive access */
emilmont 4:d827a085afd9 803 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 804 #endif
emilmont 4:d827a085afd9 805 /* Reset the TX side */
emilmont 4:d827a085afd9 806 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_TX;
emilmont 4:d827a085afd9 807 LPC_EMAC->IntClear = EMAC_INT_TX_UNDERRUN;
emilmont 4:d827a085afd9 808
emilmont 4:d827a085afd9 809 /* De-allocate all queued TX pbufs */
emilmont 4:d827a085afd9 810 for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
emilmont 4:d827a085afd9 811 if (lpc_enetif->txb[idx] != NULL) {
emilmont 4:d827a085afd9 812 pbuf_free(lpc_enetif->txb[idx]);
emilmont 4:d827a085afd9 813 lpc_enetif->txb[idx] = NULL;
emilmont 4:d827a085afd9 814 }
emilmont 4:d827a085afd9 815 }
emilmont 4:d827a085afd9 816
emilmont 4:d827a085afd9 817 #if NO_SYS == 0
emilmont 4:d827a085afd9 818 /* Restore access */
emilmont 4:d827a085afd9 819 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 820 #endif
emilmont 4:d827a085afd9 821 /* Start TX side again */
emilmont 4:d827a085afd9 822 lpc_tx_setup(lpc_enetif);
emilmont 4:d827a085afd9 823 } else {
emilmont 4:d827a085afd9 824 /* Free TX buffers that are done sending */
emilmont 4:d827a085afd9 825 lpc_tx_reclaim(lpc_enetdata.netif);
emilmont 4:d827a085afd9 826 }
emilmont 4:d827a085afd9 827 }
emilmont 4:d827a085afd9 828 }
emilmont 4:d827a085afd9 829 #endif
emilmont 4:d827a085afd9 830
emilmont 4:d827a085afd9 831 /** \brief Low level init of the MAC and PHY.
emilmont 4:d827a085afd9 832 *
emilmont 4:d827a085afd9 833 * \param[in] netif Pointer to LWIP netif structure
emilmont 4:d827a085afd9 834 */
emilmont 4:d827a085afd9 835 static err_t low_level_init(struct netif *netif)
emilmont 4:d827a085afd9 836 {
emilmont 4:d827a085afd9 837 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 4:d827a085afd9 838 err_t err = ERR_OK;
emilmont 4:d827a085afd9 839
emilmont 4:d827a085afd9 840 /* Enable MII clocking */
emilmont 4:d827a085afd9 841 LPC_SC->PCONP |= CLKPWR_PCONP_PCENET;
emilmont 4:d827a085afd9 842
emilmont 4:d827a085afd9 843 LPC_PINCON->PINSEL2 = 0x50150105; /* Enable P1 Ethernet Pins. */
emilmont 4:d827a085afd9 844 LPC_PINCON->PINSEL3 = (LPC_PINCON->PINSEL3 & ~0x0000000F) | 0x00000005;
emilmont 4:d827a085afd9 845
emilmont 4:d827a085afd9 846 /* Reset all MAC logic */
emilmont 4:d827a085afd9 847 LPC_EMAC->MAC1 = EMAC_MAC1_RES_TX | EMAC_MAC1_RES_MCS_TX |
emilmont 4:d827a085afd9 848 EMAC_MAC1_RES_RX | EMAC_MAC1_RES_MCS_RX | EMAC_MAC1_SIM_RES |
emilmont 4:d827a085afd9 849 EMAC_MAC1_SOFT_RES;
emilmont 4:d827a085afd9 850 LPC_EMAC->Command = EMAC_CR_REG_RES | EMAC_CR_TX_RES | EMAC_CR_RX_RES |
emilmont 4:d827a085afd9 851 EMAC_CR_PASS_RUNT_FRM;
emilmont 4:d827a085afd9 852 osDelay(10);
emilmont 4:d827a085afd9 853
emilmont 4:d827a085afd9 854 /* Initial MAC initialization */
emilmont 4:d827a085afd9 855 LPC_EMAC->MAC1 = EMAC_MAC1_PASS_ALL;
emilmont 4:d827a085afd9 856 LPC_EMAC->MAC2 = EMAC_MAC2_CRC_EN | EMAC_MAC2_PAD_EN |
emilmont 4:d827a085afd9 857 EMAC_MAC2_VLAN_PAD_EN;
emilmont 4:d827a085afd9 858 LPC_EMAC->MAXF = EMAC_ETH_MAX_FLEN;
emilmont 4:d827a085afd9 859
emilmont 4:d827a085afd9 860 /* Set RMII management clock rate to lowest speed */
emilmont 4:d827a085afd9 861 LPC_EMAC->MCFG = EMAC_MCFG_CLK_SEL(11) | EMAC_MCFG_RES_MII;
emilmont 4:d827a085afd9 862 LPC_EMAC->MCFG &= ~EMAC_MCFG_RES_MII;
emilmont 4:d827a085afd9 863
emilmont 4:d827a085afd9 864 /* Maximum number of retries, 0x37 collision window, gap */
emilmont 4:d827a085afd9 865 LPC_EMAC->CLRT = EMAC_CLRT_DEF;
emilmont 4:d827a085afd9 866 LPC_EMAC->IPGR = EMAC_IPGR_P1_DEF | EMAC_IPGR_P2_DEF;
emilmont 4:d827a085afd9 867
emilmont 4:d827a085afd9 868 #if LPC_EMAC_RMII
emilmont 4:d827a085afd9 869 /* RMII setup */
emilmont 4:d827a085afd9 870 LPC_EMAC->Command = EMAC_CR_PASS_RUNT_FRM | EMAC_CR_RMII;
emilmont 4:d827a085afd9 871 #else
emilmont 4:d827a085afd9 872 /* MII setup */
emilmont 4:d827a085afd9 873 LPC_EMAC->CR = EMAC_CR_PASS_RUNT_FRM;
emilmont 4:d827a085afd9 874 #endif
emilmont 4:d827a085afd9 875
emilmont 4:d827a085afd9 876 /* Initialize the PHY and reset */
emilmont 4:d827a085afd9 877 err = lpc_phy_init(netif, LPC_EMAC_RMII);
emilmont 4:d827a085afd9 878 if (err != ERR_OK)
emilmont 4:d827a085afd9 879 return err;
emilmont 4:d827a085afd9 880
emilmont 4:d827a085afd9 881 /* Save station address */
emilmont 4:d827a085afd9 882 LPC_EMAC->SA2 = (u32_t) netif->hwaddr[0] |
emilmont 4:d827a085afd9 883 (((u32_t) netif->hwaddr[1]) << 8);
emilmont 4:d827a085afd9 884 LPC_EMAC->SA1 = (u32_t) netif->hwaddr[2] |
emilmont 4:d827a085afd9 885 (((u32_t) netif->hwaddr[3]) << 8);
emilmont 4:d827a085afd9 886 LPC_EMAC->SA0 = (u32_t) netif->hwaddr[4] |
emilmont 4:d827a085afd9 887 (((u32_t) netif->hwaddr[5]) << 8);
emilmont 4:d827a085afd9 888
emilmont 4:d827a085afd9 889 /* Setup transmit and receive descriptors */
emilmont 4:d827a085afd9 890 if (lpc_tx_setup(lpc_enetif) != ERR_OK)
emilmont 4:d827a085afd9 891 return ERR_BUF;
emilmont 4:d827a085afd9 892 if (lpc_rx_setup(lpc_enetif) != ERR_OK)
emilmont 4:d827a085afd9 893 return ERR_BUF;
emilmont 4:d827a085afd9 894
emilmont 4:d827a085afd9 895 /* Enable packet reception */
emilmont 4:d827a085afd9 896 #if IP_SOF_BROADCAST_RECV
emilmont 4:d827a085afd9 897 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN | EMAC_RFC_BCAST_EN;
emilmont 4:d827a085afd9 898 #else
emilmont 4:d827a085afd9 899 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN;
emilmont 4:d827a085afd9 900 #endif
emilmont 4:d827a085afd9 901
emilmont 4:d827a085afd9 902 /* Clear and enable rx/tx interrupts */
emilmont 4:d827a085afd9 903 LPC_EMAC->IntClear = 0xFFFF;
emilmont 4:d827a085afd9 904 LPC_EMAC->IntEnable = RXINTGROUP | TXINTGROUP;
emilmont 4:d827a085afd9 905
emilmont 4:d827a085afd9 906 /* Enable RX and TX */
emilmont 4:d827a085afd9 907 LPC_EMAC->Command |= EMAC_CR_RX_EN | EMAC_CR_TX_EN;
emilmont 4:d827a085afd9 908 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 909
emilmont 4:d827a085afd9 910 return err;
emilmont 4:d827a085afd9 911 }
emilmont 4:d827a085afd9 912
emilmont 4:d827a085afd9 913 /* This function provides a method for the PHY to setup the EMAC
emilmont 4:d827a085afd9 914 for the PHY negotiated duplex mode */
emilmont 4:d827a085afd9 915 void lpc_emac_set_duplex(int full_duplex)
emilmont 4:d827a085afd9 916 {
emilmont 4:d827a085afd9 917 if (full_duplex) {
emilmont 4:d827a085afd9 918 LPC_EMAC->MAC2 |= EMAC_MAC2_FULL_DUP;
emilmont 4:d827a085afd9 919 LPC_EMAC->Command |= EMAC_CR_FULL_DUP;
emilmont 4:d827a085afd9 920 LPC_EMAC->IPGT = EMAC_IPGT_FULL_DUP;
emilmont 4:d827a085afd9 921 } else {
emilmont 4:d827a085afd9 922 LPC_EMAC->MAC2 &= ~EMAC_MAC2_FULL_DUP;
emilmont 4:d827a085afd9 923 LPC_EMAC->Command &= ~EMAC_CR_FULL_DUP;
emilmont 4:d827a085afd9 924 LPC_EMAC->IPGT = EMAC_IPGT_HALF_DUP;
emilmont 4:d827a085afd9 925 }
emilmont 4:d827a085afd9 926 }
emilmont 4:d827a085afd9 927
emilmont 4:d827a085afd9 928 /* This function provides a method for the PHY to setup the EMAC
emilmont 4:d827a085afd9 929 for the PHY negotiated bit rate */
emilmont 4:d827a085afd9 930 void lpc_emac_set_speed(int mbs_100)
emilmont 4:d827a085afd9 931 {
emilmont 4:d827a085afd9 932 if (mbs_100)
emilmont 4:d827a085afd9 933 LPC_EMAC->SUPP = EMAC_SUPP_SPEED;
emilmont 4:d827a085afd9 934 else
emilmont 4:d827a085afd9 935 LPC_EMAC->SUPP = 0;
emilmont 4:d827a085afd9 936 }
emilmont 4:d827a085afd9 937
emilmont 4:d827a085afd9 938 /**
emilmont 4:d827a085afd9 939 * This function is the ethernet packet send function. It calls
emilmont 4:d827a085afd9 940 * etharp_output after checking link status.
emilmont 4:d827a085afd9 941 *
emilmont 4:d827a085afd9 942 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 943 * \param[in] q Pointer to pbug to send
emilmont 4:d827a085afd9 944 * \param[in] ipaddr IP address
emilmont 4:d827a085afd9 945 * \return ERR_OK or error code
emilmont 4:d827a085afd9 946 */
emilmont 4:d827a085afd9 947 err_t lpc_etharp_output(struct netif *netif, struct pbuf *q,
emilmont 4:d827a085afd9 948 ip_addr_t *ipaddr)
emilmont 4:d827a085afd9 949 {
emilmont 4:d827a085afd9 950 /* Only send packet is link is up */
emilmont 4:d827a085afd9 951 if (netif->flags & NETIF_FLAG_LINK_UP)
emilmont 4:d827a085afd9 952 return etharp_output(netif, q, ipaddr);
emilmont 4:d827a085afd9 953
emilmont 4:d827a085afd9 954 return ERR_CONN;
emilmont 4:d827a085afd9 955 }
emilmont 4:d827a085afd9 956
emilmont 4:d827a085afd9 957 #if NO_SYS == 0
emilmont 4:d827a085afd9 958 /* periodic PHY status update */
emilmont 4:d827a085afd9 959 void phy_update(void const *nif) {
emilmont 4:d827a085afd9 960 lpc_phy_sts_sm((struct netif*)nif);
emilmont 4:d827a085afd9 961 }
emilmont 4:d827a085afd9 962 osTimerDef(phy_update, phy_update);
emilmont 4:d827a085afd9 963 #endif
emilmont 4:d827a085afd9 964
emilmont 4:d827a085afd9 965 /**
emilmont 4:d827a085afd9 966 * Should be called at the beginning of the program to set up the
emilmont 4:d827a085afd9 967 * network interface.
emilmont 4:d827a085afd9 968 *
emilmont 4:d827a085afd9 969 * This function should be passed as a parameter to netif_add().
emilmont 4:d827a085afd9 970 *
emilmont 4:d827a085afd9 971 * @param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 972 * @return ERR_OK if the loopif is initialized
emilmont 4:d827a085afd9 973 * ERR_MEM if private data couldn't be allocated
emilmont 4:d827a085afd9 974 * any other err_t on error
emilmont 4:d827a085afd9 975 */
emilmont 4:d827a085afd9 976 err_t lpc_enetif_init(struct netif *netif)
emilmont 4:d827a085afd9 977 {
emilmont 4:d827a085afd9 978 err_t err;
emilmont 4:d827a085afd9 979
emilmont 4:d827a085afd9 980 LWIP_ASSERT("netif != NULL", (netif != NULL));
emilmont 4:d827a085afd9 981
emilmont 4:d827a085afd9 982 lpc_enetdata.netif = netif;
emilmont 4:d827a085afd9 983
emilmont 4:d827a085afd9 984 /* set MAC hardware address */
emilmont 4:d827a085afd9 985 mbed_mac_address((char *)netif->hwaddr);
emilmont 4:d827a085afd9 986 netif->hwaddr_len = ETHARP_HWADDR_LEN;
emilmont 4:d827a085afd9 987
emilmont 4:d827a085afd9 988 /* maximum transfer unit */
emilmont 4:d827a085afd9 989 netif->mtu = 1500;
emilmont 4:d827a085afd9 990
emilmont 4:d827a085afd9 991 /* device capabilities */
emilmont 4:d827a085afd9 992 netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET;
emilmont 4:d827a085afd9 993
emilmont 4:d827a085afd9 994 /* Initialize the hardware */
emilmont 4:d827a085afd9 995 netif->state = &lpc_enetdata;
emilmont 4:d827a085afd9 996 err = low_level_init(netif);
emilmont 4:d827a085afd9 997 if (err != ERR_OK)
emilmont 4:d827a085afd9 998 return err;
emilmont 4:d827a085afd9 999
emilmont 4:d827a085afd9 1000 #if LWIP_NETIF_HOSTNAME
emilmont 4:d827a085afd9 1001 /* Initialize interface hostname */
emilmont 4:d827a085afd9 1002 netif->hostname = "lwiplpc";
emilmont 4:d827a085afd9 1003 #endif /* LWIP_NETIF_HOSTNAME */
emilmont 4:d827a085afd9 1004
emilmont 4:d827a085afd9 1005 netif->name[0] = 'e';
emilmont 4:d827a085afd9 1006 netif->name[1] = 'n';
emilmont 4:d827a085afd9 1007
emilmont 4:d827a085afd9 1008 netif->output = lpc_etharp_output;
emilmont 4:d827a085afd9 1009 netif->linkoutput = lpc_low_level_output;
emilmont 4:d827a085afd9 1010
emilmont 4:d827a085afd9 1011 /* CMSIS-RTOS, start tasks */
emilmont 4:d827a085afd9 1012 #if NO_SYS == 0
emilmont 4:d827a085afd9 1013 #ifdef CMSIS_OS_RTX
emilmont 4:d827a085afd9 1014 memset(lpc_enetdata.xTXDCountSem.data, 0, sizeof(lpc_enetdata.xTXDCountSem.data));
emilmont 4:d827a085afd9 1015 lpc_enetdata.xTXDCountSem.def.semaphore = lpc_enetdata.xTXDCountSem.data;
emilmont 4:d827a085afd9 1016 #endif
emilmont 4:d827a085afd9 1017 lpc_enetdata.xTXDCountSem.id = osSemaphoreCreate(&lpc_enetdata.xTXDCountSem.def, LPC_NUM_BUFF_TXDESCS);
emilmont 4:d827a085afd9 1018 LWIP_ASSERT("xTXDCountSem creation error", (lpc_enetdata.xTXDCountSem.id != NULL));
emilmont 4:d827a085afd9 1019
emilmont 4:d827a085afd9 1020 err = sys_mutex_new(&lpc_enetdata.TXLockMutex);
emilmont 4:d827a085afd9 1021 LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));
emilmont 4:d827a085afd9 1022
emilmont 4:d827a085afd9 1023 /* Packet receive task */
emilmont 4:d827a085afd9 1024 err = sys_sem_new(&lpc_enetdata.RxSem, 0);
emilmont 4:d827a085afd9 1025 LWIP_ASSERT("RxSem creation error", (err == ERR_OK));
emilmont 4:d827a085afd9 1026 sys_thread_new("receive_thread", packet_rx, netif->state, DEFAULT_THREAD_STACKSIZE, RX_PRIORITY);
emilmont 4:d827a085afd9 1027
emilmont 4:d827a085afd9 1028 /* Transmit cleanup task */
emilmont 4:d827a085afd9 1029 err = sys_sem_new(&lpc_enetdata.TxCleanSem, 0);
emilmont 4:d827a085afd9 1030 LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
emilmont 4:d827a085afd9 1031 sys_thread_new("txclean_thread", packet_tx, netif->state, DEFAULT_THREAD_STACKSIZE, TX_PRIORITY);
emilmont 4:d827a085afd9 1032
emilmont 4:d827a085afd9 1033 /* periodic PHY status update */
emilmont 4:d827a085afd9 1034 osTimerId phy_timer = osTimerCreate(osTimer(phy_update), osTimerPeriodic, (void *)netif);
emilmont 4:d827a085afd9 1035 osTimerStart(phy_timer, 250);
emilmont 4:d827a085afd9 1036 #endif
emilmont 4:d827a085afd9 1037
emilmont 4:d827a085afd9 1038 return ERR_OK;
emilmont 4:d827a085afd9 1039 }
emilmont 4:d827a085afd9 1040
emilmont 4:d827a085afd9 1041 /**
emilmont 4:d827a085afd9 1042 * @}
emilmont 4:d827a085afd9 1043 */
emilmont 4:d827a085afd9 1044
emilmont 4:d827a085afd9 1045 /* --------------------------------- End Of File ------------------------------ */