KEIS

Fork of lwip-eth by mbed official

Committer:
bogdanm
Date:
Mon Aug 19 18:39:00 2013 +0300
Revision:
8:5754e05385b8
Parent:
6:59b01b9349d5
Child:
9:59490137c7a7
Sync with official mbed library release 66

Who changed what in which revision?

UserRevisionLine numberNew contents of line
emilmont 4:d827a085afd9 1 /**********************************************************************
emilmont 5:698d868a5285 2 * $Id$ lpc17_emac.c 2011-11-20
emilmont 4:d827a085afd9 3 *//**
emilmont 5:698d868a5285 4 * @file lpc17_emac.c
emilmont 5:698d868a5285 5 * @brief LPC17 ethernet driver for LWIP
emilmont 5:698d868a5285 6 * @version 1.0
emilmont 5:698d868a5285 7 * @date 20. Nov. 2011
emilmont 5:698d868a5285 8 * @author NXP MCU SW Application Team
bogdanm 8:5754e05385b8 9 *
emilmont 4:d827a085afd9 10 * Copyright(C) 2011, NXP Semiconductor
emilmont 4:d827a085afd9 11 * All rights reserved.
emilmont 4:d827a085afd9 12 *
emilmont 4:d827a085afd9 13 ***********************************************************************
emilmont 4:d827a085afd9 14 * Software that is described herein is for illustrative purposes only
emilmont 4:d827a085afd9 15 * which provides customers with programming information regarding the
emilmont 4:d827a085afd9 16 * products. This software is supplied "AS IS" without any warranties.
emilmont 4:d827a085afd9 17 * NXP Semiconductors assumes no responsibility or liability for the
emilmont 4:d827a085afd9 18 * use of the software, conveys no license or title under any patent,
emilmont 4:d827a085afd9 19 * copyright, or mask work right to the product. NXP Semiconductors
emilmont 4:d827a085afd9 20 * reserves the right to make changes in the software without
emilmont 4:d827a085afd9 21 * notification. NXP Semiconductors also make no representation or
emilmont 4:d827a085afd9 22 * warranty that such application will be suitable for the specified
emilmont 4:d827a085afd9 23 * use without further testing or modification.
emilmont 4:d827a085afd9 24 **********************************************************************/
emilmont 4:d827a085afd9 25
emilmont 4:d827a085afd9 26 #include "lwip/opt.h"
emilmont 4:d827a085afd9 27 #include "lwip/sys.h"
emilmont 4:d827a085afd9 28 #include "lwip/def.h"
emilmont 4:d827a085afd9 29 #include "lwip/mem.h"
emilmont 4:d827a085afd9 30 #include "lwip/pbuf.h"
emilmont 4:d827a085afd9 31 #include "lwip/stats.h"
emilmont 4:d827a085afd9 32 #include "lwip/snmp.h"
emilmont 4:d827a085afd9 33 #include "netif/etharp.h"
emilmont 4:d827a085afd9 34 #include "netif/ppp_oe.h"
emilmont 4:d827a085afd9 35
emilmont 4:d827a085afd9 36 #include "lpc17xx_emac.h"
emilmont 4:d827a085afd9 37 #include "lpc17_emac.h"
emilmont 4:d827a085afd9 38 #include "lpc_emac_config.h"
emilmont 4:d827a085afd9 39 #include "lpc_phy.h"
emilmont 4:d827a085afd9 40 #include "sys_arch.h"
emilmont 4:d827a085afd9 41
emilmont 4:d827a085afd9 42 #include "mbed_interface.h"
emilmont 4:d827a085afd9 43 #include <string.h>
emilmont 4:d827a085afd9 44
emilmont 4:d827a085afd9 45 #ifndef LPC_EMAC_RMII
emilmont 4:d827a085afd9 46 #error LPC_EMAC_RMII is not defined!
emilmont 4:d827a085afd9 47 #endif
emilmont 4:d827a085afd9 48
emilmont 4:d827a085afd9 49 #if LPC_NUM_BUFF_TXDESCS < 2
emilmont 4:d827a085afd9 50 #error LPC_NUM_BUFF_TXDESCS must be at least 2
emilmont 4:d827a085afd9 51 #endif
emilmont 4:d827a085afd9 52
emilmont 4:d827a085afd9 53 #if LPC_NUM_BUFF_RXDESCS < 3
emilmont 4:d827a085afd9 54 #error LPC_NUM_BUFF_RXDESCS must be at least 3
emilmont 4:d827a085afd9 55 #endif
emilmont 4:d827a085afd9 56
emilmont 5:698d868a5285 57 /** @defgroup lwip17xx_emac_DRIVER lpc17 EMAC driver for LWIP
emilmont 4:d827a085afd9 58 * @ingroup lwip_emac
emilmont 4:d827a085afd9 59 *
emilmont 4:d827a085afd9 60 * @{
emilmont 4:d827a085afd9 61 */
emilmont 4:d827a085afd9 62
emilmont 4:d827a085afd9 63 #if NO_SYS == 0
emilmont 4:d827a085afd9 64 /** \brief Driver transmit and receive thread priorities
bogdanm 8:5754e05385b8 65 *
emilmont 4:d827a085afd9 66 * Thread priorities for receive thread and TX cleanup thread. Alter
emilmont 4:d827a085afd9 67 * to prioritize receive or transmit bandwidth. In a heavily loaded
emilmont 4:d827a085afd9 68 * system or with LEIP_DEBUG enabled, the priorities might be better
emilmont 4:d827a085afd9 69 * the same. */
emilmont 4:d827a085afd9 70 #define RX_PRIORITY (osPriorityNormal)
emilmont 4:d827a085afd9 71 #define TX_PRIORITY (osPriorityNormal)
emilmont 4:d827a085afd9 72
emilmont 4:d827a085afd9 73 /** \brief Debug output formatter lock define
bogdanm 8:5754e05385b8 74 *
emilmont 4:d827a085afd9 75 * When using FreeRTOS and with LWIP_DEBUG enabled, enabling this
emilmont 4:d827a085afd9 76 * define will allow RX debug messages to not interleave with the
emilmont 4:d827a085afd9 77 * TX messages (so they are actually readable). Not enabling this
emilmont 4:d827a085afd9 78 * define when the system is under load will cause the output to
emilmont 4:d827a085afd9 79 * be unreadable. There is a small tradeoff in performance for this
emilmont 4:d827a085afd9 80 * so use it only for debug. */
emilmont 4:d827a085afd9 81 //#define LOCK_RX_THREAD
emilmont 4:d827a085afd9 82
emilmont 4:d827a085afd9 83 /** \brief Receive group interrupts
emilmont 4:d827a085afd9 84 */
emilmont 4:d827a085afd9 85 #define RXINTGROUP (EMAC_INT_RX_OVERRUN | EMAC_INT_RX_ERR | EMAC_INT_RX_DONE)
emilmont 4:d827a085afd9 86
emilmont 4:d827a085afd9 87 /** \brief Transmit group interrupts
emilmont 4:d827a085afd9 88 */
emilmont 4:d827a085afd9 89 #define TXINTGROUP (EMAC_INT_TX_UNDERRUN | EMAC_INT_TX_ERR | EMAC_INT_TX_DONE)
emilmont 4:d827a085afd9 90
emilmont 4:d827a085afd9 91 #else
emilmont 4:d827a085afd9 92 #define RXINTGROUP 0
emilmont 4:d827a085afd9 93 #define TXINTGROUP 0
emilmont 4:d827a085afd9 94 #endif
emilmont 4:d827a085afd9 95
emilmont 4:d827a085afd9 96 /** \brief Structure of a TX/RX descriptor
emilmont 4:d827a085afd9 97 */
emilmont 4:d827a085afd9 98 typedef struct
emilmont 4:d827a085afd9 99 {
emilmont 5:698d868a5285 100 volatile u32_t packet; /**< Pointer to buffer */
emilmont 5:698d868a5285 101 volatile u32_t control; /**< Control word */
emilmont 4:d827a085afd9 102 } LPC_TXRX_DESC_T;
emilmont 4:d827a085afd9 103
emilmont 4:d827a085afd9 104 /** \brief Structure of a RX status entry
emilmont 4:d827a085afd9 105 */
emilmont 4:d827a085afd9 106 typedef struct
emilmont 4:d827a085afd9 107 {
emilmont 5:698d868a5285 108 volatile u32_t statusinfo; /**< RX status word */
emilmont 5:698d868a5285 109 volatile u32_t statushashcrc; /**< RX hash CRC */
emilmont 4:d827a085afd9 110 } LPC_TXRX_STATUS_T;
emilmont 4:d827a085afd9 111
emilmont 4:d827a085afd9 112 /* LPC EMAC driver data structure */
emilmont 4:d827a085afd9 113 struct lpc_enetdata {
emilmont 4:d827a085afd9 114 /* prxs must be 8 byte aligned! */
emilmont 5:698d868a5285 115 LPC_TXRX_STATUS_T prxs[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX statuses */
emilmont 5:698d868a5285 116 struct netif *netif; /**< Reference back to LWIP parent netif */
emilmont 5:698d868a5285 117 LPC_TXRX_DESC_T ptxd[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX descriptor list */
emilmont 5:698d868a5285 118 LPC_TXRX_STATUS_T ptxs[LPC_NUM_BUFF_TXDESCS]; /**< Pointer to TX statuses */
emilmont 5:698d868a5285 119 LPC_TXRX_DESC_T prxd[LPC_NUM_BUFF_RXDESCS]; /**< Pointer to RX descriptor list */
emilmont 5:698d868a5285 120 struct pbuf *rxb[LPC_NUM_BUFF_RXDESCS]; /**< RX pbuf pointer list, zero-copy mode */
emilmont 5:698d868a5285 121 u32_t rx_fill_desc_index; /**< RX descriptor next available index */
emilmont 5:698d868a5285 122 volatile u32_t rx_free_descs; /**< Count of free RX descriptors */
emilmont 5:698d868a5285 123 struct pbuf *txb[LPC_NUM_BUFF_TXDESCS]; /**< TX pbuf pointer list, zero-copy mode */
emilmont 5:698d868a5285 124 u32_t lpc_last_tx_idx; /**< TX last descriptor index, zero-copy mode */
emilmont 4:d827a085afd9 125 #if NO_SYS == 0
emilmont 5:698d868a5285 126 sys_sem_t RxSem; /**< RX receive thread wakeup semaphore */
emilmont 5:698d868a5285 127 sys_sem_t TxCleanSem; /**< TX cleanup thread wakeup semaphore */
emilmont 5:698d868a5285 128 sys_mutex_t TXLockMutex; /**< TX critical section mutex */
emilmont 5:698d868a5285 129 sys_sem_t xTXDCountSem; /**< TX free buffer counting semaphore */
emilmont 4:d827a085afd9 130 #endif
emilmont 4:d827a085afd9 131 };
emilmont 4:d827a085afd9 132
emilmont 5:698d868a5285 133 #if defined(TARGET_LPC4088)
emilmont 5:698d868a5285 134 # if defined (__ICCARM__)
emilmont 5:698d868a5285 135 # define ETHMEM_SECTION
emilmont 5:698d868a5285 136 # elif defined(TOOLCHAIN_GCC_CR)
emilmont 5:698d868a5285 137 # define ETHMEM_SECTION __attribute__((section(".data.$RamPeriph32")))
emilmont 5:698d868a5285 138 # else
emilmont 5:698d868a5285 139 # define ETHMEM_SECTION __attribute__((section("AHBSRAM1"),aligned))
emilmont 5:698d868a5285 140 # endif
bogdanm 8:5754e05385b8 141 #elif defined(TARGET_LPC1768)
bogdanm 8:5754e05385b8 142 # if defined(TOOLCHAIN_GCC_ARM)
bogdanm 8:5754e05385b8 143 # define ETHMEM_SECTION __attribute__((section("AHBSRAM1"),aligned))
bogdanm 8:5754e05385b8 144 # endif
bogdanm 8:5754e05385b8 145 #endif
bogdanm 8:5754e05385b8 146
bogdanm 8:5754e05385b8 147 #ifndef ETHMEM_SECTION
bogdanm 8:5754e05385b8 148 #define ETHMEM_SECTION ALIGNED(8)
emilmont 5:698d868a5285 149 #endif
emilmont 5:698d868a5285 150
emilmont 4:d827a085afd9 151 /** \brief LPC EMAC driver work data
emilmont 4:d827a085afd9 152 */
bogdanm 8:5754e05385b8 153 ETHMEM_SECTION struct lpc_enetdata lpc_enetdata;
emilmont 4:d827a085afd9 154
emilmont 4:d827a085afd9 155 /* Write a value via the MII link (non-blocking) */
emilmont 4:d827a085afd9 156 void lpc_mii_write_noblock(u32_t PhyReg, u32_t Value)
emilmont 4:d827a085afd9 157 {
emilmont 5:698d868a5285 158 /* Write value at PHY address and register */
emilmont 5:698d868a5285 159 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 5:698d868a5285 160 LPC_EMAC->MWTD = Value;
emilmont 4:d827a085afd9 161 }
emilmont 4:d827a085afd9 162
emilmont 4:d827a085afd9 163 /* Write a value via the MII link (blocking) */
emilmont 4:d827a085afd9 164 err_t lpc_mii_write(u32_t PhyReg, u32_t Value)
emilmont 4:d827a085afd9 165 {
emilmont 5:698d868a5285 166 u32_t mst = 250;
emilmont 5:698d868a5285 167 err_t sts = ERR_OK;
emilmont 4:d827a085afd9 168
emilmont 5:698d868a5285 169 /* Write value at PHY address and register */
emilmont 5:698d868a5285 170 lpc_mii_write_noblock(PhyReg, Value);
emilmont 4:d827a085afd9 171
emilmont 5:698d868a5285 172 /* Wait for unbusy status */
emilmont 5:698d868a5285 173 while (mst > 0) {
emilmont 5:698d868a5285 174 sts = LPC_EMAC->MIND;
emilmont 5:698d868a5285 175 if ((sts & EMAC_MIND_BUSY) == 0)
emilmont 5:698d868a5285 176 mst = 0;
emilmont 5:698d868a5285 177 else {
emilmont 5:698d868a5285 178 mst--;
emilmont 5:698d868a5285 179 osDelay(1);
emilmont 5:698d868a5285 180 }
emilmont 5:698d868a5285 181 }
emilmont 4:d827a085afd9 182
emilmont 5:698d868a5285 183 if (sts != 0)
emilmont 5:698d868a5285 184 sts = ERR_TIMEOUT;
emilmont 4:d827a085afd9 185
emilmont 5:698d868a5285 186 return sts;
emilmont 4:d827a085afd9 187 }
emilmont 4:d827a085afd9 188
emilmont 4:d827a085afd9 189 /* Reads current MII link busy status */
emilmont 4:d827a085afd9 190 u32_t lpc_mii_is_busy(void)
emilmont 4:d827a085afd9 191 {
emilmont 5:698d868a5285 192 return (u32_t) (LPC_EMAC->MIND & EMAC_MIND_BUSY);
emilmont 4:d827a085afd9 193 }
emilmont 4:d827a085afd9 194
emilmont 4:d827a085afd9 195 /* Starts a read operation via the MII link (non-blocking) */
emilmont 4:d827a085afd9 196 u32_t lpc_mii_read_data(void)
emilmont 4:d827a085afd9 197 {
emilmont 5:698d868a5285 198 u32_t data = LPC_EMAC->MRDD;
emilmont 5:698d868a5285 199 LPC_EMAC->MCMD = 0;
emilmont 4:d827a085afd9 200
emilmont 5:698d868a5285 201 return data;
emilmont 4:d827a085afd9 202 }
emilmont 4:d827a085afd9 203
emilmont 4:d827a085afd9 204 /* Starts a read operation via the MII link (non-blocking) */
bogdanm 8:5754e05385b8 205 void lpc_mii_read_noblock(u32_t PhyReg)
emilmont 4:d827a085afd9 206 {
emilmont 5:698d868a5285 207 /* Read value at PHY address and register */
emilmont 5:698d868a5285 208 LPC_EMAC->MADR = (LPC_PHYDEF_PHYADDR << 8) | PhyReg;
emilmont 5:698d868a5285 209 LPC_EMAC->MCMD = EMAC_MCMD_READ;
emilmont 4:d827a085afd9 210 }
emilmont 4:d827a085afd9 211
emilmont 4:d827a085afd9 212 /* Read a value via the MII link (blocking) */
bogdanm 8:5754e05385b8 213 err_t lpc_mii_read(u32_t PhyReg, u32_t *data)
emilmont 4:d827a085afd9 214 {
emilmont 5:698d868a5285 215 u32_t mst = 250;
emilmont 5:698d868a5285 216 err_t sts = ERR_OK;
emilmont 4:d827a085afd9 217
emilmont 5:698d868a5285 218 /* Read value at PHY address and register */
emilmont 5:698d868a5285 219 lpc_mii_read_noblock(PhyReg);
emilmont 4:d827a085afd9 220
emilmont 5:698d868a5285 221 /* Wait for unbusy status */
emilmont 5:698d868a5285 222 while (mst > 0) {
emilmont 5:698d868a5285 223 sts = LPC_EMAC->MIND & ~EMAC_MIND_MII_LINK_FAIL;
emilmont 5:698d868a5285 224 if ((sts & EMAC_MIND_BUSY) == 0) {
emilmont 5:698d868a5285 225 mst = 0;
emilmont 5:698d868a5285 226 *data = LPC_EMAC->MRDD;
emilmont 5:698d868a5285 227 } else {
emilmont 5:698d868a5285 228 mst--;
emilmont 5:698d868a5285 229 osDelay(1);
emilmont 5:698d868a5285 230 }
emilmont 5:698d868a5285 231 }
emilmont 4:d827a085afd9 232
emilmont 5:698d868a5285 233 LPC_EMAC->MCMD = 0;
emilmont 4:d827a085afd9 234
emilmont 5:698d868a5285 235 if (sts != 0)
emilmont 5:698d868a5285 236 sts = ERR_TIMEOUT;
emilmont 4:d827a085afd9 237
emilmont 5:698d868a5285 238 return sts;
emilmont 4:d827a085afd9 239 }
emilmont 4:d827a085afd9 240
emilmont 4:d827a085afd9 241 /** \brief Queues a pbuf into the RX descriptor list
emilmont 4:d827a085afd9 242 *
emilmont 4:d827a085afd9 243 * \param[in] lpc_enetif Pointer to the drvier data structure
emilmont 4:d827a085afd9 244 * \param[in] p Pointer to pbuf to queue
emilmont 4:d827a085afd9 245 */
emilmont 4:d827a085afd9 246 static void lpc_rxqueue_pbuf(struct lpc_enetdata *lpc_enetif, struct pbuf *p)
emilmont 4:d827a085afd9 247 {
emilmont 5:698d868a5285 248 u32_t idx;
emilmont 4:d827a085afd9 249
emilmont 5:698d868a5285 250 /* Get next free descriptor index */
emilmont 5:698d868a5285 251 idx = lpc_enetif->rx_fill_desc_index;
emilmont 4:d827a085afd9 252
emilmont 5:698d868a5285 253 /* Setup descriptor and clear statuses */
emilmont 5:698d868a5285 254 lpc_enetif->prxd[idx].control = EMAC_RCTRL_INT | ((u32_t) (p->len - 1));
emilmont 5:698d868a5285 255 lpc_enetif->prxd[idx].packet = (u32_t) p->payload;
emilmont 5:698d868a5285 256 lpc_enetif->prxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 5:698d868a5285 257 lpc_enetif->prxs[idx].statushashcrc = 0xFFFFFFFF;
emilmont 4:d827a085afd9 258
emilmont 5:698d868a5285 259 /* Save pbuf pointer for push to network layer later */
emilmont 5:698d868a5285 260 lpc_enetif->rxb[idx] = p;
emilmont 4:d827a085afd9 261
emilmont 5:698d868a5285 262 /* Wrap at end of descriptor list */
emilmont 5:698d868a5285 263 idx++;
emilmont 5:698d868a5285 264 if (idx >= LPC_NUM_BUFF_RXDESCS)
emilmont 5:698d868a5285 265 idx = 0;
emilmont 4:d827a085afd9 266
emilmont 5:698d868a5285 267 /* Queue descriptor(s) */
emilmont 5:698d868a5285 268 lpc_enetif->rx_free_descs -= 1;
emilmont 5:698d868a5285 269 lpc_enetif->rx_fill_desc_index = idx;
emilmont 5:698d868a5285 270 LPC_EMAC->RxConsumeIndex = idx;
emilmont 4:d827a085afd9 271
emilmont 5:698d868a5285 272 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 273 ("lpc_rxqueue_pbuf: pbuf packet queued: %p (free desc=%d)\n", p,
emilmont 5:698d868a5285 274 lpc_enetif->rx_free_descs));
emilmont 4:d827a085afd9 275 }
emilmont 4:d827a085afd9 276
emilmont 4:d827a085afd9 277 /** \brief Attempt to allocate and requeue a new pbuf for RX
emilmont 4:d827a085afd9 278 *
emilmont 4:d827a085afd9 279 * \param[in] netif Pointer to the netif structure
emilmont 4:d827a085afd9 280 * \returns 1 if a packet was allocated and requeued, otherwise 0
emilmont 4:d827a085afd9 281 */
emilmont 4:d827a085afd9 282 s32_t lpc_rx_queue(struct netif *netif)
emilmont 4:d827a085afd9 283 {
emilmont 5:698d868a5285 284 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 5:698d868a5285 285 struct pbuf *p;
emilmont 5:698d868a5285 286 s32_t queued = 0;
emilmont 4:d827a085afd9 287
emilmont 5:698d868a5285 288 /* Attempt to requeue as many packets as possible */
emilmont 5:698d868a5285 289 while (lpc_enetif->rx_free_descs > 0) {
emilmont 5:698d868a5285 290 /* Allocate a pbuf from the pool. We need to allocate at the
emilmont 5:698d868a5285 291 maximum size as we don't know the size of the yet to be
emilmont 5:698d868a5285 292 received packet. */
emilmont 5:698d868a5285 293 p = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
emilmont 5:698d868a5285 294 if (p == NULL) {
emilmont 5:698d868a5285 295 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 296 ("lpc_rx_queue: could not allocate RX pbuf (free desc=%d)\n",
emilmont 5:698d868a5285 297 lpc_enetif->rx_free_descs));
emilmont 5:698d868a5285 298 return queued;
emilmont 5:698d868a5285 299 }
emilmont 4:d827a085afd9 300
emilmont 5:698d868a5285 301 /* pbufs allocated from the RAM pool should be non-chained. */
emilmont 5:698d868a5285 302 LWIP_ASSERT("lpc_rx_queue: pbuf is not contiguous (chained)",
emilmont 5:698d868a5285 303 pbuf_clen(p) <= 1);
emilmont 4:d827a085afd9 304
emilmont 5:698d868a5285 305 /* Queue packet */
emilmont 5:698d868a5285 306 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 4:d827a085afd9 307
emilmont 5:698d868a5285 308 /* Update queued count */
emilmont 5:698d868a5285 309 queued++;
emilmont 5:698d868a5285 310 }
emilmont 4:d827a085afd9 311
emilmont 5:698d868a5285 312 return queued;
emilmont 4:d827a085afd9 313 }
emilmont 4:d827a085afd9 314
emilmont 4:d827a085afd9 315 /** \brief Sets up the RX descriptor ring buffers.
bogdanm 8:5754e05385b8 316 *
emilmont 4:d827a085afd9 317 * This function sets up the descriptor list used for receive packets.
emilmont 4:d827a085afd9 318 *
emilmont 4:d827a085afd9 319 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 320 * \returns Always returns ERR_OK
emilmont 4:d827a085afd9 321 */
emilmont 4:d827a085afd9 322 static err_t lpc_rx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 4:d827a085afd9 323 {
emilmont 5:698d868a5285 324 /* Setup pointers to RX structures */
emilmont 5:698d868a5285 325 LPC_EMAC->RxDescriptor = (u32_t) &lpc_enetif->prxd[0];
emilmont 5:698d868a5285 326 LPC_EMAC->RxStatus = (u32_t) &lpc_enetif->prxs[0];
emilmont 5:698d868a5285 327 LPC_EMAC->RxDescriptorNumber = LPC_NUM_BUFF_RXDESCS - 1;
emilmont 4:d827a085afd9 328
emilmont 5:698d868a5285 329 lpc_enetif->rx_free_descs = LPC_NUM_BUFF_RXDESCS;
emilmont 5:698d868a5285 330 lpc_enetif->rx_fill_desc_index = 0;
emilmont 4:d827a085afd9 331
emilmont 5:698d868a5285 332 /* Build RX buffer and descriptors */
emilmont 5:698d868a5285 333 lpc_rx_queue(lpc_enetif->netif);
emilmont 4:d827a085afd9 334
emilmont 5:698d868a5285 335 return ERR_OK;
emilmont 4:d827a085afd9 336 }
emilmont 4:d827a085afd9 337
emilmont 4:d827a085afd9 338 /** \brief Allocates a pbuf and returns the data from the incoming packet.
emilmont 4:d827a085afd9 339 *
emilmont 4:d827a085afd9 340 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 341 * \return a pbuf filled with the received packet (including MAC header)
emilmont 4:d827a085afd9 342 * NULL on memory error
emilmont 4:d827a085afd9 343 */
emilmont 4:d827a085afd9 344 static struct pbuf *lpc_low_level_input(struct netif *netif)
emilmont 4:d827a085afd9 345 {
emilmont 5:698d868a5285 346 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 5:698d868a5285 347 struct pbuf *p = NULL;
emilmont 5:698d868a5285 348 u32_t idx, length;
emilmont 4:d827a085afd9 349
emilmont 4:d827a085afd9 350 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 351 #if NO_SYS == 0
emilmont 5:698d868a5285 352 /* Get exclusive access */
emilmont 5:698d868a5285 353 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 354 #endif
emilmont 4:d827a085afd9 355 #endif
emilmont 4:d827a085afd9 356
emilmont 5:698d868a5285 357 /* Monitor RX overrun status. This should never happen unless
emilmont 5:698d868a5285 358 (possibly) the internal bus is behing held up by something.
emilmont 5:698d868a5285 359 Unless your system is running at a very low clock speed or
emilmont 5:698d868a5285 360 there are possibilities that the internal buses may be held
emilmont 5:698d868a5285 361 up for a long time, this can probably safely be removed. */
emilmont 5:698d868a5285 362 if (LPC_EMAC->IntStatus & EMAC_INT_RX_OVERRUN) {
emilmont 5:698d868a5285 363 LINK_STATS_INC(link.err);
emilmont 5:698d868a5285 364 LINK_STATS_INC(link.drop);
emilmont 4:d827a085afd9 365
emilmont 5:698d868a5285 366 /* Temporarily disable RX */
emilmont 5:698d868a5285 367 LPC_EMAC->MAC1 &= ~EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 368
emilmont 5:698d868a5285 369 /* Reset the RX side */
emilmont 5:698d868a5285 370 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_RX;
emilmont 5:698d868a5285 371 LPC_EMAC->IntClear = EMAC_INT_RX_OVERRUN;
emilmont 4:d827a085afd9 372
emilmont 5:698d868a5285 373 /* De-allocate all queued RX pbufs */
emilmont 5:698d868a5285 374 for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
emilmont 5:698d868a5285 375 if (lpc_enetif->rxb[idx] != NULL) {
emilmont 5:698d868a5285 376 pbuf_free(lpc_enetif->rxb[idx]);
emilmont 5:698d868a5285 377 lpc_enetif->rxb[idx] = NULL;
emilmont 5:698d868a5285 378 }
emilmont 5:698d868a5285 379 }
emilmont 4:d827a085afd9 380
emilmont 5:698d868a5285 381 /* Start RX side again */
emilmont 5:698d868a5285 382 lpc_rx_setup(lpc_enetif);
emilmont 4:d827a085afd9 383
emilmont 5:698d868a5285 384 /* Re-enable RX */
emilmont 5:698d868a5285 385 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 386
emilmont 4:d827a085afd9 387 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 388 #if NO_SYS == 0
emilmont 5:698d868a5285 389 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 390 #endif
emilmont 4:d827a085afd9 391 #endif
emilmont 4:d827a085afd9 392
emilmont 5:698d868a5285 393 return NULL;
emilmont 5:698d868a5285 394 }
emilmont 4:d827a085afd9 395
emilmont 5:698d868a5285 396 /* Determine if a frame has been received */
emilmont 5:698d868a5285 397 length = 0;
emilmont 5:698d868a5285 398 idx = LPC_EMAC->RxConsumeIndex;
emilmont 5:698d868a5285 399 if (LPC_EMAC->RxProduceIndex != idx) {
emilmont 5:698d868a5285 400 /* Handle errors */
emilmont 5:698d868a5285 401 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 5:698d868a5285 402 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR | EMAC_RINFO_LEN_ERR)) {
emilmont 4:d827a085afd9 403 #if LINK_STATS
emilmont 5:698d868a5285 404 if (lpc_enetif->prxs[idx].statusinfo & (EMAC_RINFO_CRC_ERR |
emilmont 5:698d868a5285 405 EMAC_RINFO_SYM_ERR | EMAC_RINFO_ALIGN_ERR))
emilmont 5:698d868a5285 406 LINK_STATS_INC(link.chkerr);
emilmont 5:698d868a5285 407 if (lpc_enetif->prxs[idx].statusinfo & EMAC_RINFO_LEN_ERR)
emilmont 5:698d868a5285 408 LINK_STATS_INC(link.lenerr);
emilmont 4:d827a085afd9 409 #endif
emilmont 4:d827a085afd9 410
emilmont 5:698d868a5285 411 /* Drop the frame */
emilmont 5:698d868a5285 412 LINK_STATS_INC(link.drop);
emilmont 4:d827a085afd9 413
emilmont 5:698d868a5285 414 /* Re-queue the pbuf for receive */
emilmont 5:698d868a5285 415 lpc_enetif->rx_free_descs++;
emilmont 5:698d868a5285 416 p = lpc_enetif->rxb[idx];
emilmont 5:698d868a5285 417 lpc_enetif->rxb[idx] = NULL;
emilmont 5:698d868a5285 418 lpc_rxqueue_pbuf(lpc_enetif, p);
emilmont 4:d827a085afd9 419
emilmont 5:698d868a5285 420 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 421 ("lpc_low_level_input: Packet dropped with errors (0x%x)\n",
emilmont 5:698d868a5285 422 lpc_enetif->prxs[idx].statusinfo));
bogdanm 8:5754e05385b8 423
emilmont 5:698d868a5285 424 p = NULL;
emilmont 5:698d868a5285 425 } else {
emilmont 5:698d868a5285 426 /* A packet is waiting, get length */
emilmont 5:698d868a5285 427 length = (lpc_enetif->prxs[idx].statusinfo & 0x7FF) + 1;
emilmont 4:d827a085afd9 428
emilmont 5:698d868a5285 429 /* Zero-copy */
emilmont 5:698d868a5285 430 p = lpc_enetif->rxb[idx];
emilmont 5:698d868a5285 431 p->len = (u16_t) length;
emilmont 4:d827a085afd9 432
bogdanm 8:5754e05385b8 433 /* Free pbuf from descriptor */
emilmont 5:698d868a5285 434 lpc_enetif->rxb[idx] = NULL;
emilmont 5:698d868a5285 435 lpc_enetif->rx_free_descs++;
emilmont 4:d827a085afd9 436
bogdanm 8:5754e05385b8 437 /* Attempt to queue new buffer(s) */
bogdanm 8:5754e05385b8 438 if (lpc_rx_queue(lpc_enetif->netif) == 0) {
bogdanm 8:5754e05385b8 439 /* Drop the frame due to OOM. */
bogdanm 8:5754e05385b8 440 LINK_STATS_INC(link.drop);
bogdanm 8:5754e05385b8 441
bogdanm 8:5754e05385b8 442 /* Re-queue the pbuf for receive */
bogdanm 8:5754e05385b8 443 lpc_rxqueue_pbuf(lpc_enetif, p);
bogdanm 8:5754e05385b8 444
bogdanm 8:5754e05385b8 445 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
bogdanm 8:5754e05385b8 446 ("lpc_low_level_input: Packet index %d dropped for OOM\n",
bogdanm 8:5754e05385b8 447 idx));
bogdanm 8:5754e05385b8 448
bogdanm 8:5754e05385b8 449 #ifdef LOCK_RX_THREAD
bogdanm 8:5754e05385b8 450 #if NO_SYS == 0
bogdanm 8:5754e05385b8 451 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
bogdanm 8:5754e05385b8 452 #endif
bogdanm 8:5754e05385b8 453 #endif
bogdanm 8:5754e05385b8 454
bogdanm 8:5754e05385b8 455 return NULL;
bogdanm 8:5754e05385b8 456 }
bogdanm 8:5754e05385b8 457
emilmont 5:698d868a5285 458 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 459 ("lpc_low_level_input: Packet received: %p, size %d (index=%d)\n",
emilmont 5:698d868a5285 460 p, length, idx));
emilmont 4:d827a085afd9 461
emilmont 5:698d868a5285 462 /* Save size */
emilmont 5:698d868a5285 463 p->tot_len = (u16_t) length;
emilmont 5:698d868a5285 464 LINK_STATS_INC(link.recv);
emilmont 5:698d868a5285 465 }
emilmont 5:698d868a5285 466 }
emilmont 4:d827a085afd9 467
emilmont 4:d827a085afd9 468 #ifdef LOCK_RX_THREAD
emilmont 4:d827a085afd9 469 #if NO_SYS == 0
emilmont 5:698d868a5285 470 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 471 #endif
emilmont 4:d827a085afd9 472 #endif
emilmont 4:d827a085afd9 473
bogdanm 8:5754e05385b8 474 return p;
emilmont 4:d827a085afd9 475 }
emilmont 4:d827a085afd9 476
emilmont 4:d827a085afd9 477 /** \brief Attempt to read a packet from the EMAC interface.
emilmont 4:d827a085afd9 478 *
emilmont 4:d827a085afd9 479 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 480 */
emilmont 4:d827a085afd9 481 void lpc_enetif_input(struct netif *netif)
emilmont 4:d827a085afd9 482 {
emilmont 5:698d868a5285 483 struct eth_hdr *ethhdr;
emilmont 5:698d868a5285 484 struct pbuf *p;
emilmont 4:d827a085afd9 485
emilmont 5:698d868a5285 486 /* move received packet into a new pbuf */
emilmont 5:698d868a5285 487 p = lpc_low_level_input(netif);
emilmont 5:698d868a5285 488 if (p == NULL)
emilmont 5:698d868a5285 489 return;
emilmont 4:d827a085afd9 490
emilmont 5:698d868a5285 491 /* points to packet payload, which starts with an Ethernet header */
emilmont 5:698d868a5285 492 ethhdr = p->payload;
emilmont 4:d827a085afd9 493
emilmont 5:698d868a5285 494 switch (htons(ethhdr->type)) {
emilmont 5:698d868a5285 495 case ETHTYPE_IP:
emilmont 5:698d868a5285 496 case ETHTYPE_ARP:
emilmont 4:d827a085afd9 497 #if PPPOE_SUPPORT
emilmont 5:698d868a5285 498 case ETHTYPE_PPPOEDISC:
emilmont 5:698d868a5285 499 case ETHTYPE_PPPOE:
emilmont 4:d827a085afd9 500 #endif /* PPPOE_SUPPORT */
emilmont 5:698d868a5285 501 /* full packet send to tcpip_thread to process */
emilmont 5:698d868a5285 502 if (netif->input(p, netif) != ERR_OK) {
emilmont 5:698d868a5285 503 LWIP_DEBUGF(NETIF_DEBUG, ("lpc_enetif_input: IP input error\n"));
emilmont 5:698d868a5285 504 /* Free buffer */
emilmont 5:698d868a5285 505 pbuf_free(p);
emilmont 5:698d868a5285 506 }
emilmont 5:698d868a5285 507 break;
emilmont 4:d827a085afd9 508
emilmont 5:698d868a5285 509 default:
emilmont 5:698d868a5285 510 /* Return buffer */
emilmont 5:698d868a5285 511 pbuf_free(p);
emilmont 5:698d868a5285 512 break;
emilmont 5:698d868a5285 513 }
emilmont 4:d827a085afd9 514 }
emilmont 4:d827a085afd9 515
emilmont 4:d827a085afd9 516 /** \brief Determine if the passed address is usable for the ethernet
emilmont 4:d827a085afd9 517 * DMA controller.
emilmont 4:d827a085afd9 518 *
emilmont 4:d827a085afd9 519 * \param[in] addr Address of packet to check for DMA safe operation
emilmont 4:d827a085afd9 520 * \return 1 if the packet address is not safe, otherwise 0
emilmont 4:d827a085afd9 521 */
emilmont 4:d827a085afd9 522 static s32_t lpc_packet_addr_notsafe(void *addr) {
emilmont 5:698d868a5285 523 /* Check for legal address ranges */
bogdanm 8:5754e05385b8 524 #if defined(TARGET_LPC1768)
emilmont 5:698d868a5285 525 if ((((u32_t) addr >= 0x2007C000) && ((u32_t) addr < 0x20083FFF))) {
emilmont 5:698d868a5285 526 #elif defined(TARGET_LPC4088)
emilmont 5:698d868a5285 527 if ((((u32_t) addr >= 0x20000000) && ((u32_t) addr < 0x20007FFF))) {
bogdanm 8:5754e05385b8 528 #endif
emilmont 5:698d868a5285 529 return 0;
emilmont 5:698d868a5285 530 }
emilmont 5:698d868a5285 531 return 1;
emilmont 4:d827a085afd9 532 }
emilmont 4:d827a085afd9 533
emilmont 4:d827a085afd9 534 /** \brief Sets up the TX descriptor ring buffers.
emilmont 4:d827a085afd9 535 *
emilmont 4:d827a085afd9 536 * This function sets up the descriptor list used for transmit packets.
emilmont 4:d827a085afd9 537 *
emilmont 4:d827a085afd9 538 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 539 */
emilmont 4:d827a085afd9 540 static err_t lpc_tx_setup(struct lpc_enetdata *lpc_enetif)
emilmont 4:d827a085afd9 541 {
emilmont 5:698d868a5285 542 s32_t idx;
emilmont 4:d827a085afd9 543
emilmont 5:698d868a5285 544 /* Build TX descriptors for local buffers */
emilmont 5:698d868a5285 545 for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) {
emilmont 5:698d868a5285 546 lpc_enetif->ptxd[idx].control = 0;
emilmont 5:698d868a5285 547 lpc_enetif->ptxs[idx].statusinfo = 0xFFFFFFFF;
emilmont 5:698d868a5285 548 }
emilmont 4:d827a085afd9 549
emilmont 5:698d868a5285 550 /* Setup pointers to TX structures */
emilmont 5:698d868a5285 551 LPC_EMAC->TxDescriptor = (u32_t) &lpc_enetif->ptxd[0];
emilmont 5:698d868a5285 552 LPC_EMAC->TxStatus = (u32_t) &lpc_enetif->ptxs[0];
emilmont 5:698d868a5285 553 LPC_EMAC->TxDescriptorNumber = LPC_NUM_BUFF_TXDESCS - 1;
emilmont 4:d827a085afd9 554
emilmont 5:698d868a5285 555 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 4:d827a085afd9 556
emilmont 5:698d868a5285 557 return ERR_OK;
emilmont 4:d827a085afd9 558 }
emilmont 4:d827a085afd9 559
emilmont 4:d827a085afd9 560 /** \brief Free TX buffers that are complete
emilmont 4:d827a085afd9 561 *
emilmont 4:d827a085afd9 562 * \param[in] lpc_enetif Pointer to driver data structure
emilmont 4:d827a085afd9 563 * \param[in] cidx EMAC current descriptor comsumer index
emilmont 4:d827a085afd9 564 */
emilmont 4:d827a085afd9 565 static void lpc_tx_reclaim_st(struct lpc_enetdata *lpc_enetif, u32_t cidx)
emilmont 4:d827a085afd9 566 {
emilmont 4:d827a085afd9 567 #if NO_SYS == 0
emilmont 5:698d868a5285 568 /* Get exclusive access */
emilmont 5:698d868a5285 569 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 570 #endif
emilmont 4:d827a085afd9 571
emilmont 5:698d868a5285 572 while (cidx != lpc_enetif->lpc_last_tx_idx) {
emilmont 5:698d868a5285 573 if (lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] != NULL) {
emilmont 5:698d868a5285 574 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 575 ("lpc_tx_reclaim_st: Freeing packet %p (index %d)\n",
emilmont 5:698d868a5285 576 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx],
emilmont 5:698d868a5285 577 lpc_enetif->lpc_last_tx_idx));
emilmont 5:698d868a5285 578 pbuf_free(lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx]);
emilmont 5:698d868a5285 579 lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] = NULL;
emilmont 5:698d868a5285 580 }
emilmont 4:d827a085afd9 581
emilmont 4:d827a085afd9 582 #if NO_SYS == 0
emilmont 5:698d868a5285 583 osSemaphoreRelease(lpc_enetif->xTXDCountSem.id);
emilmont 4:d827a085afd9 584 #endif
emilmont 5:698d868a5285 585 lpc_enetif->lpc_last_tx_idx++;
emilmont 5:698d868a5285 586 if (lpc_enetif->lpc_last_tx_idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 5:698d868a5285 587 lpc_enetif->lpc_last_tx_idx = 0;
emilmont 5:698d868a5285 588 }
emilmont 4:d827a085afd9 589
emilmont 4:d827a085afd9 590 #if NO_SYS == 0
emilmont 5:698d868a5285 591 /* Restore access */
emilmont 5:698d868a5285 592 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 593 #endif
emilmont 4:d827a085afd9 594 }
emilmont 4:d827a085afd9 595
emilmont 4:d827a085afd9 596 /** \brief User call for freeingTX buffers that are complete
emilmont 4:d827a085afd9 597 *
emilmont 4:d827a085afd9 598 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 599 */
emilmont 4:d827a085afd9 600 void lpc_tx_reclaim(struct netif *netif)
emilmont 4:d827a085afd9 601 {
emilmont 5:698d868a5285 602 lpc_tx_reclaim_st((struct lpc_enetdata *) netif->state,
emilmont 5:698d868a5285 603 LPC_EMAC->TxConsumeIndex);
emilmont 4:d827a085afd9 604 }
emilmont 4:d827a085afd9 605
emilmont 4:d827a085afd9 606 /** \brief Polls if an available TX descriptor is ready. Can be used to
emilmont 4:d827a085afd9 607 * determine if the low level transmit function will block.
emilmont 4:d827a085afd9 608 *
emilmont 4:d827a085afd9 609 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 610 * \return 0 if no descriptors are read, or >0
emilmont 4:d827a085afd9 611 */
emilmont 4:d827a085afd9 612 s32_t lpc_tx_ready(struct netif *netif)
emilmont 4:d827a085afd9 613 {
emilmont 5:698d868a5285 614 s32_t fb;
emilmont 5:698d868a5285 615 u32_t idx, cidx;
emilmont 4:d827a085afd9 616
emilmont 5:698d868a5285 617 cidx = LPC_EMAC->TxConsumeIndex;
emilmont 5:698d868a5285 618 idx = LPC_EMAC->TxProduceIndex;
emilmont 4:d827a085afd9 619
emilmont 5:698d868a5285 620 /* Determine number of free buffers */
emilmont 5:698d868a5285 621 if (idx == cidx)
emilmont 5:698d868a5285 622 fb = LPC_NUM_BUFF_TXDESCS;
emilmont 5:698d868a5285 623 else if (cidx > idx)
emilmont 5:698d868a5285 624 fb = (LPC_NUM_BUFF_TXDESCS - 1) -
emilmont 5:698d868a5285 625 ((idx + LPC_NUM_BUFF_TXDESCS) - cidx);
emilmont 5:698d868a5285 626 else
emilmont 5:698d868a5285 627 fb = (LPC_NUM_BUFF_TXDESCS - 1) - (cidx - idx);
emilmont 4:d827a085afd9 628
emilmont 5:698d868a5285 629 return fb;
emilmont 4:d827a085afd9 630 }
emilmont 4:d827a085afd9 631
emilmont 4:d827a085afd9 632 /** \brief Low level output of a packet. Never call this from an
emilmont 4:d827a085afd9 633 * interrupt context, as it may block until TX descriptors
emilmont 4:d827a085afd9 634 * become available.
emilmont 4:d827a085afd9 635 *
emilmont 4:d827a085afd9 636 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 637 * \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
emilmont 4:d827a085afd9 638 * \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
emilmont 4:d827a085afd9 639 */
emilmont 4:d827a085afd9 640 static err_t lpc_low_level_output(struct netif *netif, struct pbuf *p)
emilmont 4:d827a085afd9 641 {
emilmont 5:698d868a5285 642 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 5:698d868a5285 643 struct pbuf *q;
emilmont 5:698d868a5285 644 u8_t *dst;
bogdanm 8:5754e05385b8 645 u32_t idx, notdmasafe = 0;
emilmont 5:698d868a5285 646 struct pbuf *np;
bogdanm 8:5754e05385b8 647 s32_t dn;
emilmont 4:d827a085afd9 648
emilmont 5:698d868a5285 649 /* Zero-copy TX buffers may be fragmented across mutliple payload
emilmont 5:698d868a5285 650 chains. Determine the number of descriptors needed for the
emilmont 5:698d868a5285 651 transfer. The pbuf chaining can be a mess! */
bogdanm 8:5754e05385b8 652 dn = (s32_t) pbuf_clen(p);
emilmont 4:d827a085afd9 653
emilmont 5:698d868a5285 654 /* Test to make sure packet addresses are DMA safe. A DMA safe
emilmont 5:698d868a5285 655 address is once that uses external memory or periphheral RAM.
emilmont 5:698d868a5285 656 IRAM and FLASH are not safe! */
emilmont 5:698d868a5285 657 for (q = p; q != NULL; q = q->next)
emilmont 5:698d868a5285 658 notdmasafe += lpc_packet_addr_notsafe(q->payload);
emilmont 4:d827a085afd9 659
emilmont 4:d827a085afd9 660 #if LPC_TX_PBUF_BOUNCE_EN==1
emilmont 5:698d868a5285 661 /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be
emilmont 5:698d868a5285 662 created that will be used instead. This requires an copy from the
emilmont 5:698d868a5285 663 non-safe DMA region to the new pbuf */
emilmont 5:698d868a5285 664 if (notdmasafe) {
emilmont 5:698d868a5285 665 /* Allocate a pbuf in DMA memory */
emilmont 5:698d868a5285 666 np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
emilmont 5:698d868a5285 667 if (np == NULL)
bogdanm 8:5754e05385b8 668 return ERR_MEM;
emilmont 4:d827a085afd9 669
emilmont 5:698d868a5285 670 /* This buffer better be contiguous! */
emilmont 5:698d868a5285 671 LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained",
emilmont 5:698d868a5285 672 (pbuf_clen(np) == 1));
emilmont 4:d827a085afd9 673
emilmont 5:698d868a5285 674 /* Copy to DMA safe pbuf */
emilmont 5:698d868a5285 675 dst = (u8_t *) np->payload;
emilmont 5:698d868a5285 676 for(q = p; q != NULL; q = q->next) {
emilmont 5:698d868a5285 677 /* Copy the buffer to the descriptor's buffer */
emilmont 5:698d868a5285 678 MEMCPY(dst, (u8_t *) q->payload, q->len);
emilmont 5:698d868a5285 679 dst += q->len;
emilmont 5:698d868a5285 680 }
bogdanm 8:5754e05385b8 681 np->len = p->tot_len;
emilmont 4:d827a085afd9 682
emilmont 5:698d868a5285 683 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 684 ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\n",
emilmont 5:698d868a5285 685 q, np));
emilmont 4:d827a085afd9 686
emilmont 5:698d868a5285 687 /* use the new buffer for descrptor queueing. The original pbuf will
emilmont 5:698d868a5285 688 be de-allocated outsuide this driver. */
emilmont 5:698d868a5285 689 p = np;
emilmont 5:698d868a5285 690 dn = 1;
emilmont 5:698d868a5285 691 }
emilmont 4:d827a085afd9 692 #else
emilmont 5:698d868a5285 693 if (notdmasafe)
emilmont 5:698d868a5285 694 LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf",
emilmont 5:698d868a5285 695 (notdmasafe == 0));
emilmont 4:d827a085afd9 696 #endif
emilmont 4:d827a085afd9 697
emilmont 5:698d868a5285 698 /* Wait until enough descriptors are available for the transfer. */
emilmont 5:698d868a5285 699 /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
emilmont 5:698d868a5285 700 while (dn > lpc_tx_ready(netif))
emilmont 4:d827a085afd9 701 #if NO_SYS == 0
emilmont 5:698d868a5285 702 osSemaphoreWait(lpc_enetif->xTXDCountSem.id, osWaitForever);
emilmont 4:d827a085afd9 703 #else
emilmont 5:698d868a5285 704 osDelay(1);
emilmont 4:d827a085afd9 705 #endif
emilmont 4:d827a085afd9 706
emilmont 5:698d868a5285 707 /* Get free TX buffer index */
emilmont 5:698d868a5285 708 idx = LPC_EMAC->TxProduceIndex;
emilmont 4:d827a085afd9 709
emilmont 4:d827a085afd9 710 #if NO_SYS == 0
emilmont 5:698d868a5285 711 /* Get exclusive access */
emilmont 5:698d868a5285 712 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 713 #endif
emilmont 4:d827a085afd9 714
emilmont 5:698d868a5285 715 /* Prevent LWIP from de-allocating this pbuf. The driver will
emilmont 5:698d868a5285 716 free it once it's been transmitted. */
emilmont 5:698d868a5285 717 if (!notdmasafe)
emilmont 5:698d868a5285 718 pbuf_ref(p);
emilmont 4:d827a085afd9 719
emilmont 5:698d868a5285 720 /* Setup transfers */
emilmont 5:698d868a5285 721 q = p;
emilmont 5:698d868a5285 722 while (dn > 0) {
emilmont 5:698d868a5285 723 dn--;
emilmont 4:d827a085afd9 724
emilmont 5:698d868a5285 725 /* Only save pointer to free on last descriptor */
emilmont 5:698d868a5285 726 if (dn == 0) {
emilmont 5:698d868a5285 727 /* Save size of packet and signal it's ready */
emilmont 5:698d868a5285 728 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT |
emilmont 5:698d868a5285 729 EMAC_TCTRL_LAST;
emilmont 4:d827a085afd9 730 lpc_enetif->txb[idx] = p;
emilmont 5:698d868a5285 731 }
emilmont 5:698d868a5285 732 else {
emilmont 5:698d868a5285 733 /* Save size of packet, descriptor is not last */
emilmont 5:698d868a5285 734 lpc_enetif->ptxd[idx].control = (q->len - 1) | EMAC_TCTRL_INT;
emilmont 5:698d868a5285 735 lpc_enetif->txb[idx] = NULL;
emilmont 5:698d868a5285 736 }
emilmont 4:d827a085afd9 737
emilmont 5:698d868a5285 738 LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
emilmont 5:698d868a5285 739 ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d,"
emilmont 5:698d868a5285 740 " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
emilmont 4:d827a085afd9 741
emilmont 5:698d868a5285 742 lpc_enetif->ptxd[idx].packet = (u32_t) q->payload;
emilmont 4:d827a085afd9 743
emilmont 5:698d868a5285 744 q = q->next;
emilmont 4:d827a085afd9 745
emilmont 5:698d868a5285 746 idx++;
emilmont 5:698d868a5285 747 if (idx >= LPC_NUM_BUFF_TXDESCS)
emilmont 5:698d868a5285 748 idx = 0;
emilmont 5:698d868a5285 749 }
emilmont 4:d827a085afd9 750
emilmont 5:698d868a5285 751 LPC_EMAC->TxProduceIndex = idx;
emilmont 4:d827a085afd9 752
emilmont 5:698d868a5285 753 LINK_STATS_INC(link.xmit);
emilmont 4:d827a085afd9 754
emilmont 4:d827a085afd9 755 #if NO_SYS == 0
emilmont 5:698d868a5285 756 /* Restore access */
emilmont 5:698d868a5285 757 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 758 #endif
emilmont 4:d827a085afd9 759
emilmont 5:698d868a5285 760 return ERR_OK;
emilmont 4:d827a085afd9 761 }
emilmont 4:d827a085afd9 762
emilmont 4:d827a085afd9 763 /** \brief LPC EMAC interrupt handler.
emilmont 4:d827a085afd9 764 *
emilmont 4:d827a085afd9 765 * This function handles the transmit, receive, and error interrupt of
emilmont 4:d827a085afd9 766 * the LPC177x_8x. This is meant to be used when NO_SYS=0.
emilmont 4:d827a085afd9 767 */
emilmont 4:d827a085afd9 768 void ENET_IRQHandler(void)
emilmont 4:d827a085afd9 769 {
emilmont 4:d827a085afd9 770 #if NO_SYS == 1
emilmont 5:698d868a5285 771 /* Interrupts are not used without an RTOS */
emilmont 4:d827a085afd9 772 NVIC_DisableIRQ(ENET_IRQn);
emilmont 4:d827a085afd9 773 #else
emilmont 5:698d868a5285 774 uint32_t ints;
emilmont 4:d827a085afd9 775
emilmont 5:698d868a5285 776 /* Interrupts are of 2 groups - transmit or receive. Based on the
emilmont 5:698d868a5285 777 interrupt, kick off the receive or transmit (cleanup) task */
emilmont 4:d827a085afd9 778
emilmont 5:698d868a5285 779 /* Get pending interrupts */
emilmont 5:698d868a5285 780 ints = LPC_EMAC->IntStatus;
emilmont 4:d827a085afd9 781
emilmont 5:698d868a5285 782 if (ints & RXINTGROUP) {
emilmont 4:d827a085afd9 783 /* RX group interrupt(s): Give semaphore to wakeup RX receive task.*/
emilmont 4:d827a085afd9 784 sys_sem_signal(&lpc_enetdata.RxSem);
emilmont 4:d827a085afd9 785 }
bogdanm 8:5754e05385b8 786
emilmont 4:d827a085afd9 787 if (ints & TXINTGROUP) {
emilmont 4:d827a085afd9 788 /* TX group interrupt(s): Give semaphore to wakeup TX cleanup task. */
emilmont 4:d827a085afd9 789 sys_sem_signal(&lpc_enetdata.TxCleanSem);
emilmont 4:d827a085afd9 790 }
bogdanm 8:5754e05385b8 791
emilmont 5:698d868a5285 792 /* Clear pending interrupts */
emilmont 5:698d868a5285 793 LPC_EMAC->IntClear = ints;
emilmont 4:d827a085afd9 794 #endif
emilmont 4:d827a085afd9 795 }
emilmont 4:d827a085afd9 796
emilmont 4:d827a085afd9 797 #if NO_SYS == 0
emilmont 4:d827a085afd9 798 /** \brief Packet reception task
emilmont 4:d827a085afd9 799 *
emilmont 4:d827a085afd9 800 * This task is called when a packet is received. It will
emilmont 4:d827a085afd9 801 * pass the packet to the LWIP core.
emilmont 4:d827a085afd9 802 *
emilmont 4:d827a085afd9 803 * \param[in] pvParameters Not used yet
emilmont 4:d827a085afd9 804 */
emilmont 4:d827a085afd9 805 static void packet_rx(void* pvParameters) {
emilmont 4:d827a085afd9 806 struct lpc_enetdata *lpc_enetif = pvParameters;
bogdanm 8:5754e05385b8 807
emilmont 4:d827a085afd9 808 while (1) {
emilmont 4:d827a085afd9 809 /* Wait for receive task to wakeup */
emilmont 4:d827a085afd9 810 sys_arch_sem_wait(&lpc_enetif->RxSem, 0);
bogdanm 8:5754e05385b8 811
emilmont 4:d827a085afd9 812 /* Process packets until all empty */
emilmont 4:d827a085afd9 813 while (LPC_EMAC->RxConsumeIndex != LPC_EMAC->RxProduceIndex)
emilmont 4:d827a085afd9 814 lpc_enetif_input(lpc_enetif->netif);
emilmont 4:d827a085afd9 815 }
emilmont 4:d827a085afd9 816 }
emilmont 4:d827a085afd9 817
emilmont 4:d827a085afd9 818 /** \brief Transmit cleanup task
emilmont 4:d827a085afd9 819 *
emilmont 4:d827a085afd9 820 * This task is called when a transmit interrupt occurs and
emilmont 4:d827a085afd9 821 * reclaims the pbuf and descriptor used for the packet once
emilmont 4:d827a085afd9 822 * the packet has been transferred.
emilmont 4:d827a085afd9 823 *
emilmont 4:d827a085afd9 824 * \param[in] pvParameters Not used yet
emilmont 4:d827a085afd9 825 */
emilmont 4:d827a085afd9 826 static void packet_tx(void* pvParameters) {
emilmont 4:d827a085afd9 827 struct lpc_enetdata *lpc_enetif = pvParameters;
emilmont 4:d827a085afd9 828 s32_t idx;
bogdanm 8:5754e05385b8 829
emilmont 4:d827a085afd9 830 while (1) {
emilmont 4:d827a085afd9 831 /* Wait for transmit cleanup task to wakeup */
emilmont 4:d827a085afd9 832 sys_arch_sem_wait(&lpc_enetif->TxCleanSem, 0);
bogdanm 8:5754e05385b8 833
emilmont 4:d827a085afd9 834 /* Error handling for TX underruns. This should never happen unless
emilmont 4:d827a085afd9 835 something is holding the bus or the clocks are going too slow. It
emilmont 4:d827a085afd9 836 can probably be safely removed. */
emilmont 4:d827a085afd9 837 if (LPC_EMAC->IntStatus & EMAC_INT_TX_UNDERRUN) {
emilmont 4:d827a085afd9 838 LINK_STATS_INC(link.err);
emilmont 4:d827a085afd9 839 LINK_STATS_INC(link.drop);
bogdanm 8:5754e05385b8 840
emilmont 4:d827a085afd9 841 #if NO_SYS == 0
emilmont 4:d827a085afd9 842 /* Get exclusive access */
emilmont 4:d827a085afd9 843 sys_mutex_lock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 844 #endif
emilmont 4:d827a085afd9 845 /* Reset the TX side */
emilmont 4:d827a085afd9 846 LPC_EMAC->MAC1 |= EMAC_MAC1_RES_TX;
emilmont 4:d827a085afd9 847 LPC_EMAC->IntClear = EMAC_INT_TX_UNDERRUN;
bogdanm 8:5754e05385b8 848
emilmont 4:d827a085afd9 849 /* De-allocate all queued TX pbufs */
emilmont 6:59b01b9349d5 850 for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) {
emilmont 4:d827a085afd9 851 if (lpc_enetif->txb[idx] != NULL) {
emilmont 4:d827a085afd9 852 pbuf_free(lpc_enetif->txb[idx]);
emilmont 4:d827a085afd9 853 lpc_enetif->txb[idx] = NULL;
emilmont 4:d827a085afd9 854 }
emilmont 4:d827a085afd9 855 }
bogdanm 8:5754e05385b8 856
emilmont 4:d827a085afd9 857 #if NO_SYS == 0
emilmont 4:d827a085afd9 858 /* Restore access */
emilmont 4:d827a085afd9 859 sys_mutex_unlock(&lpc_enetif->TXLockMutex);
emilmont 4:d827a085afd9 860 #endif
emilmont 4:d827a085afd9 861 /* Start TX side again */
emilmont 4:d827a085afd9 862 lpc_tx_setup(lpc_enetif);
emilmont 4:d827a085afd9 863 } else {
emilmont 4:d827a085afd9 864 /* Free TX buffers that are done sending */
emilmont 4:d827a085afd9 865 lpc_tx_reclaim(lpc_enetdata.netif);
emilmont 4:d827a085afd9 866 }
emilmont 4:d827a085afd9 867 }
emilmont 4:d827a085afd9 868 }
emilmont 4:d827a085afd9 869 #endif
emilmont 4:d827a085afd9 870
emilmont 4:d827a085afd9 871 /** \brief Low level init of the MAC and PHY.
emilmont 4:d827a085afd9 872 *
emilmont 4:d827a085afd9 873 * \param[in] netif Pointer to LWIP netif structure
emilmont 4:d827a085afd9 874 */
emilmont 4:d827a085afd9 875 static err_t low_level_init(struct netif *netif)
emilmont 4:d827a085afd9 876 {
emilmont 5:698d868a5285 877 struct lpc_enetdata *lpc_enetif = netif->state;
emilmont 5:698d868a5285 878 err_t err = ERR_OK;
emilmont 4:d827a085afd9 879
emilmont 5:698d868a5285 880 /* Enable MII clocking */
emilmont 5:698d868a5285 881 LPC_SC->PCONP |= CLKPWR_PCONP_PCENET;
bogdanm 8:5754e05385b8 882
bogdanm 8:5754e05385b8 883 #if defined(TARGET_LPC1768)
emilmont 5:698d868a5285 884 LPC_PINCON->PINSEL2 = 0x50150105; /* Enable P1 Ethernet Pins. */
emilmont 5:698d868a5285 885 LPC_PINCON->PINSEL3 = (LPC_PINCON->PINSEL3 & ~0x0000000F) | 0x00000005;
emilmont 5:698d868a5285 886 #elif defined(TARGET_LPC4088)
emilmont 5:698d868a5285 887 LPC_IOCON->P1_0 &= ~0x07; /* ENET I/O config */
emilmont 5:698d868a5285 888 LPC_IOCON->P1_0 |= 0x01; /* ENET_TXD0 */
emilmont 5:698d868a5285 889 LPC_IOCON->P1_1 &= ~0x07;
emilmont 5:698d868a5285 890 LPC_IOCON->P1_1 |= 0x01; /* ENET_TXD1 */
emilmont 5:698d868a5285 891 LPC_IOCON->P1_4 &= ~0x07;
emilmont 5:698d868a5285 892 LPC_IOCON->P1_4 |= 0x01; /* ENET_TXEN */
emilmont 5:698d868a5285 893 LPC_IOCON->P1_8 &= ~0x07;
emilmont 5:698d868a5285 894 LPC_IOCON->P1_8 |= 0x01; /* ENET_CRS */
emilmont 5:698d868a5285 895 LPC_IOCON->P1_9 &= ~0x07;
emilmont 5:698d868a5285 896 LPC_IOCON->P1_9 |= 0x01; /* ENET_RXD0 */
emilmont 5:698d868a5285 897 LPC_IOCON->P1_10 &= ~0x07;
emilmont 5:698d868a5285 898 LPC_IOCON->P1_10 |= 0x01; /* ENET_RXD1 */
emilmont 5:698d868a5285 899 LPC_IOCON->P1_14 &= ~0x07;
emilmont 5:698d868a5285 900 LPC_IOCON->P1_14 |= 0x01; /* ENET_RX_ER */
emilmont 5:698d868a5285 901 LPC_IOCON->P1_15 &= ~0x07;
emilmont 5:698d868a5285 902 LPC_IOCON->P1_15 |= 0x01; /* ENET_REF_CLK */
emilmont 5:698d868a5285 903 LPC_IOCON->P1_16 &= ~0x07; /* ENET/PHY I/O config */
emilmont 5:698d868a5285 904 LPC_IOCON->P1_16 |= 0x01; /* ENET_MDC */
emilmont 5:698d868a5285 905 LPC_IOCON->P1_17 &= ~0x07;
emilmont 5:698d868a5285 906 LPC_IOCON->P1_17 |= 0x01; /* ENET_MDIO */
bogdanm 8:5754e05385b8 907 #endif
bogdanm 8:5754e05385b8 908
emilmont 5:698d868a5285 909 /* Reset all MAC logic */
emilmont 5:698d868a5285 910 LPC_EMAC->MAC1 = EMAC_MAC1_RES_TX | EMAC_MAC1_RES_MCS_TX |
emilmont 5:698d868a5285 911 EMAC_MAC1_RES_RX | EMAC_MAC1_RES_MCS_RX | EMAC_MAC1_SIM_RES |
emilmont 5:698d868a5285 912 EMAC_MAC1_SOFT_RES;
emilmont 5:698d868a5285 913 LPC_EMAC->Command = EMAC_CR_REG_RES | EMAC_CR_TX_RES | EMAC_CR_RX_RES |
emilmont 5:698d868a5285 914 EMAC_CR_PASS_RUNT_FRM;
emilmont 5:698d868a5285 915 osDelay(10);
bogdanm 8:5754e05385b8 916
emilmont 5:698d868a5285 917 /* Initial MAC initialization */
emilmont 5:698d868a5285 918 LPC_EMAC->MAC1 = EMAC_MAC1_PASS_ALL;
emilmont 5:698d868a5285 919 LPC_EMAC->MAC2 = EMAC_MAC2_CRC_EN | EMAC_MAC2_PAD_EN |
emilmont 5:698d868a5285 920 EMAC_MAC2_VLAN_PAD_EN;
emilmont 5:698d868a5285 921 LPC_EMAC->MAXF = EMAC_ETH_MAX_FLEN;
emilmont 4:d827a085afd9 922
emilmont 5:698d868a5285 923 /* Set RMII management clock rate to lowest speed */
emilmont 5:698d868a5285 924 LPC_EMAC->MCFG = EMAC_MCFG_CLK_SEL(11) | EMAC_MCFG_RES_MII;
emilmont 5:698d868a5285 925 LPC_EMAC->MCFG &= ~EMAC_MCFG_RES_MII;
emilmont 4:d827a085afd9 926
emilmont 5:698d868a5285 927 /* Maximum number of retries, 0x37 collision window, gap */
emilmont 5:698d868a5285 928 LPC_EMAC->CLRT = EMAC_CLRT_DEF;
emilmont 5:698d868a5285 929 LPC_EMAC->IPGR = EMAC_IPGR_P1_DEF | EMAC_IPGR_P2_DEF;
emilmont 4:d827a085afd9 930
emilmont 4:d827a085afd9 931 #if LPC_EMAC_RMII
emilmont 5:698d868a5285 932 /* RMII setup */
emilmont 5:698d868a5285 933 LPC_EMAC->Command = EMAC_CR_PASS_RUNT_FRM | EMAC_CR_RMII;
emilmont 4:d827a085afd9 934 #else
emilmont 5:698d868a5285 935 /* MII setup */
emilmont 5:698d868a5285 936 LPC_EMAC->CR = EMAC_CR_PASS_RUNT_FRM;
emilmont 4:d827a085afd9 937 #endif
emilmont 4:d827a085afd9 938
emilmont 5:698d868a5285 939 /* Initialize the PHY and reset */
emilmont 4:d827a085afd9 940 err = lpc_phy_init(netif, LPC_EMAC_RMII);
emilmont 5:698d868a5285 941 if (err != ERR_OK)
emilmont 5:698d868a5285 942 return err;
emilmont 4:d827a085afd9 943
emilmont 5:698d868a5285 944 /* Save station address */
emilmont 5:698d868a5285 945 LPC_EMAC->SA2 = (u32_t) netif->hwaddr[0] |
emilmont 5:698d868a5285 946 (((u32_t) netif->hwaddr[1]) << 8);
emilmont 5:698d868a5285 947 LPC_EMAC->SA1 = (u32_t) netif->hwaddr[2] |
emilmont 5:698d868a5285 948 (((u32_t) netif->hwaddr[3]) << 8);
emilmont 5:698d868a5285 949 LPC_EMAC->SA0 = (u32_t) netif->hwaddr[4] |
emilmont 5:698d868a5285 950 (((u32_t) netif->hwaddr[5]) << 8);
emilmont 4:d827a085afd9 951
emilmont 5:698d868a5285 952 /* Setup transmit and receive descriptors */
emilmont 5:698d868a5285 953 if (lpc_tx_setup(lpc_enetif) != ERR_OK)
emilmont 5:698d868a5285 954 return ERR_BUF;
emilmont 5:698d868a5285 955 if (lpc_rx_setup(lpc_enetif) != ERR_OK)
emilmont 5:698d868a5285 956 return ERR_BUF;
emilmont 4:d827a085afd9 957
emilmont 5:698d868a5285 958 /* Enable packet reception */
emilmont 4:d827a085afd9 959 #if IP_SOF_BROADCAST_RECV
emilmont 5:698d868a5285 960 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN | EMAC_RFC_BCAST_EN | EMAC_RFC_MCAST_EN;
emilmont 4:d827a085afd9 961 #else
emilmont 5:698d868a5285 962 LPC_EMAC->RxFilterCtrl = EMAC_RFC_PERFECT_EN;
emilmont 4:d827a085afd9 963 #endif
emilmont 4:d827a085afd9 964
emilmont 5:698d868a5285 965 /* Clear and enable rx/tx interrupts */
emilmont 5:698d868a5285 966 LPC_EMAC->IntClear = 0xFFFF;
emilmont 5:698d868a5285 967 LPC_EMAC->IntEnable = RXINTGROUP | TXINTGROUP;
emilmont 4:d827a085afd9 968
emilmont 5:698d868a5285 969 /* Enable RX and TX */
emilmont 5:698d868a5285 970 LPC_EMAC->Command |= EMAC_CR_RX_EN | EMAC_CR_TX_EN;
emilmont 5:698d868a5285 971 LPC_EMAC->MAC1 |= EMAC_MAC1_REC_EN;
emilmont 4:d827a085afd9 972
emilmont 5:698d868a5285 973 return err;
emilmont 4:d827a085afd9 974 }
emilmont 4:d827a085afd9 975
emilmont 4:d827a085afd9 976 /* This function provides a method for the PHY to setup the EMAC
emilmont 4:d827a085afd9 977 for the PHY negotiated duplex mode */
emilmont 4:d827a085afd9 978 void lpc_emac_set_duplex(int full_duplex)
emilmont 4:d827a085afd9 979 {
emilmont 5:698d868a5285 980 if (full_duplex) {
emilmont 5:698d868a5285 981 LPC_EMAC->MAC2 |= EMAC_MAC2_FULL_DUP;
emilmont 5:698d868a5285 982 LPC_EMAC->Command |= EMAC_CR_FULL_DUP;
emilmont 5:698d868a5285 983 LPC_EMAC->IPGT = EMAC_IPGT_FULL_DUP;
emilmont 5:698d868a5285 984 } else {
emilmont 5:698d868a5285 985 LPC_EMAC->MAC2 &= ~EMAC_MAC2_FULL_DUP;
emilmont 5:698d868a5285 986 LPC_EMAC->Command &= ~EMAC_CR_FULL_DUP;
emilmont 5:698d868a5285 987 LPC_EMAC->IPGT = EMAC_IPGT_HALF_DUP;
emilmont 5:698d868a5285 988 }
emilmont 4:d827a085afd9 989 }
emilmont 4:d827a085afd9 990
emilmont 4:d827a085afd9 991 /* This function provides a method for the PHY to setup the EMAC
emilmont 4:d827a085afd9 992 for the PHY negotiated bit rate */
emilmont 4:d827a085afd9 993 void lpc_emac_set_speed(int mbs_100)
emilmont 4:d827a085afd9 994 {
emilmont 5:698d868a5285 995 if (mbs_100)
emilmont 5:698d868a5285 996 LPC_EMAC->SUPP = EMAC_SUPP_SPEED;
emilmont 5:698d868a5285 997 else
emilmont 5:698d868a5285 998 LPC_EMAC->SUPP = 0;
emilmont 4:d827a085afd9 999 }
emilmont 4:d827a085afd9 1000
emilmont 4:d827a085afd9 1001 /**
emilmont 4:d827a085afd9 1002 * This function is the ethernet packet send function. It calls
emilmont 4:d827a085afd9 1003 * etharp_output after checking link status.
emilmont 4:d827a085afd9 1004 *
emilmont 4:d827a085afd9 1005 * \param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 1006 * \param[in] q Pointer to pbug to send
bogdanm 8:5754e05385b8 1007 * \param[in] ipaddr IP address
emilmont 4:d827a085afd9 1008 * \return ERR_OK or error code
emilmont 4:d827a085afd9 1009 */
emilmont 4:d827a085afd9 1010 err_t lpc_etharp_output(struct netif *netif, struct pbuf *q,
emilmont 5:698d868a5285 1011 ip_addr_t *ipaddr)
emilmont 4:d827a085afd9 1012 {
emilmont 5:698d868a5285 1013 /* Only send packet is link is up */
emilmont 5:698d868a5285 1014 if (netif->flags & NETIF_FLAG_LINK_UP)
emilmont 5:698d868a5285 1015 return etharp_output(netif, q, ipaddr);
emilmont 4:d827a085afd9 1016
emilmont 5:698d868a5285 1017 return ERR_CONN;
emilmont 4:d827a085afd9 1018 }
emilmont 4:d827a085afd9 1019
emilmont 4:d827a085afd9 1020 #if NO_SYS == 0
emilmont 4:d827a085afd9 1021 /* periodic PHY status update */
emilmont 4:d827a085afd9 1022 void phy_update(void const *nif) {
emilmont 4:d827a085afd9 1023 lpc_phy_sts_sm((struct netif*)nif);
emilmont 4:d827a085afd9 1024 }
emilmont 4:d827a085afd9 1025 osTimerDef(phy_update, phy_update);
emilmont 4:d827a085afd9 1026 #endif
emilmont 4:d827a085afd9 1027
emilmont 4:d827a085afd9 1028 /**
emilmont 4:d827a085afd9 1029 * Should be called at the beginning of the program to set up the
emilmont 4:d827a085afd9 1030 * network interface.
emilmont 4:d827a085afd9 1031 *
emilmont 4:d827a085afd9 1032 * This function should be passed as a parameter to netif_add().
emilmont 4:d827a085afd9 1033 *
emilmont 4:d827a085afd9 1034 * @param[in] netif the lwip network interface structure for this lpc_enetif
emilmont 4:d827a085afd9 1035 * @return ERR_OK if the loopif is initialized
emilmont 4:d827a085afd9 1036 * ERR_MEM if private data couldn't be allocated
emilmont 4:d827a085afd9 1037 * any other err_t on error
emilmont 4:d827a085afd9 1038 */
emilmont 4:d827a085afd9 1039 err_t lpc_enetif_init(struct netif *netif)
emilmont 4:d827a085afd9 1040 {
emilmont 5:698d868a5285 1041 err_t err;
emilmont 4:d827a085afd9 1042
emilmont 5:698d868a5285 1043 LWIP_ASSERT("netif != NULL", (netif != NULL));
bogdanm 8:5754e05385b8 1044
emilmont 5:698d868a5285 1045 lpc_enetdata.netif = netif;
emilmont 4:d827a085afd9 1046
emilmont 5:698d868a5285 1047 /* set MAC hardware address */
bogdanm 8:5754e05385b8 1048 #if (MBED_MAC_ADDRESS_SUM != MBED_MAC_ADDR_INTERFACE)
bogdanm 8:5754e05385b8 1049 netif->hwaddr[0] = MBED_MAC_ADDR_0;
bogdanm 8:5754e05385b8 1050 netif->hwaddr[1] = MBED_MAC_ADDR_1;
bogdanm 8:5754e05385b8 1051 netif->hwaddr[2] = MBED_MAC_ADDR_2;
bogdanm 8:5754e05385b8 1052 netif->hwaddr[3] = MBED_MAC_ADDR_3;
bogdanm 8:5754e05385b8 1053 netif->hwaddr[4] = MBED_MAC_ADDR_4;
bogdanm 8:5754e05385b8 1054 netif->hwaddr[5] = MBED_MAC_ADDR_5;
bogdanm 8:5754e05385b8 1055 #else
emilmont 5:698d868a5285 1056 mbed_mac_address((char *)netif->hwaddr);
bogdanm 8:5754e05385b8 1057 #endif
emilmont 5:698d868a5285 1058 netif->hwaddr_len = ETHARP_HWADDR_LEN;
emilmont 4:d827a085afd9 1059
emilmont 5:698d868a5285 1060 /* maximum transfer unit */
emilmont 5:698d868a5285 1061 netif->mtu = 1500;
emilmont 4:d827a085afd9 1062
emilmont 5:698d868a5285 1063 /* device capabilities */
emilmont 5:698d868a5285 1064 netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP;
emilmont 4:d827a085afd9 1065
emilmont 5:698d868a5285 1066 /* Initialize the hardware */
emilmont 5:698d868a5285 1067 netif->state = &lpc_enetdata;
emilmont 5:698d868a5285 1068 err = low_level_init(netif);
emilmont 5:698d868a5285 1069 if (err != ERR_OK)
emilmont 5:698d868a5285 1070 return err;
emilmont 4:d827a085afd9 1071
emilmont 4:d827a085afd9 1072 #if LWIP_NETIF_HOSTNAME
emilmont 5:698d868a5285 1073 /* Initialize interface hostname */
emilmont 5:698d868a5285 1074 netif->hostname = "lwiplpc";
emilmont 4:d827a085afd9 1075 #endif /* LWIP_NETIF_HOSTNAME */
emilmont 4:d827a085afd9 1076
emilmont 5:698d868a5285 1077 netif->name[0] = 'e';
emilmont 5:698d868a5285 1078 netif->name[1] = 'n';
emilmont 4:d827a085afd9 1079
emilmont 5:698d868a5285 1080 netif->output = lpc_etharp_output;
emilmont 5:698d868a5285 1081 netif->linkoutput = lpc_low_level_output;
emilmont 4:d827a085afd9 1082
emilmont 4:d827a085afd9 1083 /* CMSIS-RTOS, start tasks */
emilmont 4:d827a085afd9 1084 #if NO_SYS == 0
emilmont 4:d827a085afd9 1085 #ifdef CMSIS_OS_RTX
emilmont 4:d827a085afd9 1086 memset(lpc_enetdata.xTXDCountSem.data, 0, sizeof(lpc_enetdata.xTXDCountSem.data));
emilmont 4:d827a085afd9 1087 lpc_enetdata.xTXDCountSem.def.semaphore = lpc_enetdata.xTXDCountSem.data;
emilmont 4:d827a085afd9 1088 #endif
emilmont 4:d827a085afd9 1089 lpc_enetdata.xTXDCountSem.id = osSemaphoreCreate(&lpc_enetdata.xTXDCountSem.def, LPC_NUM_BUFF_TXDESCS);
emilmont 5:698d868a5285 1090 LWIP_ASSERT("xTXDCountSem creation error", (lpc_enetdata.xTXDCountSem.id != NULL));
emilmont 4:d827a085afd9 1091
emilmont 5:698d868a5285 1092 err = sys_mutex_new(&lpc_enetdata.TXLockMutex);
emilmont 5:698d868a5285 1093 LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));
emilmont 4:d827a085afd9 1094
emilmont 5:698d868a5285 1095 /* Packet receive task */
emilmont 5:698d868a5285 1096 err = sys_sem_new(&lpc_enetdata.RxSem, 0);
emilmont 5:698d868a5285 1097 LWIP_ASSERT("RxSem creation error", (err == ERR_OK));
emilmont 5:698d868a5285 1098 sys_thread_new("receive_thread", packet_rx, netif->state, DEFAULT_THREAD_STACKSIZE, RX_PRIORITY);
emilmont 4:d827a085afd9 1099
emilmont 5:698d868a5285 1100 /* Transmit cleanup task */
emilmont 5:698d868a5285 1101 err = sys_sem_new(&lpc_enetdata.TxCleanSem, 0);
emilmont 5:698d868a5285 1102 LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
emilmont 5:698d868a5285 1103 sys_thread_new("txclean_thread", packet_tx, netif->state, DEFAULT_THREAD_STACKSIZE, TX_PRIORITY);
bogdanm 8:5754e05385b8 1104
emilmont 5:698d868a5285 1105 /* periodic PHY status update */
emilmont 5:698d868a5285 1106 osTimerId phy_timer = osTimerCreate(osTimer(phy_update), osTimerPeriodic, (void *)netif);
emilmont 5:698d868a5285 1107 osTimerStart(phy_timer, 250);
emilmont 4:d827a085afd9 1108 #endif
bogdanm 8:5754e05385b8 1109
emilmont 4:d827a085afd9 1110 return ERR_OK;
emilmont 4:d827a085afd9 1111 }
emilmont 4:d827a085afd9 1112
emilmont 4:d827a085afd9 1113 /**
emilmont 4:d827a085afd9 1114 * @}
emilmont 4:d827a085afd9 1115 */
emilmont 4:d827a085afd9 1116
emilmont 4:d827a085afd9 1117 /* --------------------------------- End Of File ------------------------------ */