mbed library sources. Supersedes mbed-src.

Dependents:   Nucleo_Hello_Encoder BLE_iBeaconScan AM1805_DEMO DISCO-F429ZI_ExportTemplate1 ... more

Committer:
Anna Bridge
Date:
Fri Apr 28 14:04:18 2017 +0100
Revision:
163:74e0ce7f98e8
Parent:
150:02e0a0aed4ec
Child:
167:e84263d55307
This updates the lib to the mbed lib v141

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 144:ef7eb2e8f9f7 1 /***************************************************************************//**
<> 144:ef7eb2e8f9f7 2 * @file spi_api.c
<> 144:ef7eb2e8f9f7 3 *******************************************************************************
<> 144:ef7eb2e8f9f7 4 * @section License
<> 144:ef7eb2e8f9f7 5 * <b>(C) Copyright 2015 Silicon Labs, http://www.silabs.com</b>
<> 144:ef7eb2e8f9f7 6 *******************************************************************************
<> 144:ef7eb2e8f9f7 7 *
<> 144:ef7eb2e8f9f7 8 * SPDX-License-Identifier: Apache-2.0
<> 144:ef7eb2e8f9f7 9 *
<> 144:ef7eb2e8f9f7 10 * Licensed under the Apache License, Version 2.0 (the "License"); you may
<> 144:ef7eb2e8f9f7 11 * not use this file except in compliance with the License.
<> 144:ef7eb2e8f9f7 12 * You may obtain a copy of the License at
<> 144:ef7eb2e8f9f7 13 *
<> 144:ef7eb2e8f9f7 14 * http://www.apache.org/licenses/LICENSE-2.0
<> 144:ef7eb2e8f9f7 15 *
<> 144:ef7eb2e8f9f7 16 * Unless required by applicable law or agreed to in writing, software
<> 144:ef7eb2e8f9f7 17 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
<> 144:ef7eb2e8f9f7 18 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<> 144:ef7eb2e8f9f7 19 * See the License for the specific language governing permissions and
<> 144:ef7eb2e8f9f7 20 * limitations under the License.
<> 144:ef7eb2e8f9f7 21 *
<> 144:ef7eb2e8f9f7 22 ******************************************************************************/
<> 144:ef7eb2e8f9f7 23
<> 144:ef7eb2e8f9f7 24 #include "device.h"
<> 144:ef7eb2e8f9f7 25 #include "clocking.h"
<> 144:ef7eb2e8f9f7 26 #if DEVICE_SPI
<> 144:ef7eb2e8f9f7 27
<> 144:ef7eb2e8f9f7 28 #include "mbed_assert.h"
<> 144:ef7eb2e8f9f7 29 #include "PeripheralPins.h"
<> 144:ef7eb2e8f9f7 30 #include "pinmap.h"
<> 144:ef7eb2e8f9f7 31 #include "pinmap_function.h"
<> 150:02e0a0aed4ec 32 #include "mbed_error.h"
<> 144:ef7eb2e8f9f7 33
<> 144:ef7eb2e8f9f7 34 #include "dma_api.h"
<> 144:ef7eb2e8f9f7 35 #include "dma_api_HAL.h"
<> 144:ef7eb2e8f9f7 36 #include "serial_api_HAL.h"
<> 144:ef7eb2e8f9f7 37 #include "spi_api.h"
<> 144:ef7eb2e8f9f7 38 #include "em_usart.h"
<> 144:ef7eb2e8f9f7 39 #include "em_cmu.h"
<> 144:ef7eb2e8f9f7 40 #include "em_dma.h"
<> 144:ef7eb2e8f9f7 41 #include "sleep_api.h"
<> 144:ef7eb2e8f9f7 42 #include "sleepmodes.h"
<> 144:ef7eb2e8f9f7 43
<> 144:ef7eb2e8f9f7 44 static uint16_t fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 45
<> 144:ef7eb2e8f9f7 46 #define SPI_LEAST_ACTIVE_SLEEPMODE EM1
<> 144:ef7eb2e8f9f7 47
<> 144:ef7eb2e8f9f7 48 static inline CMU_Clock_TypeDef spi_get_clock_tree(spi_t *obj)
<> 144:ef7eb2e8f9f7 49 {
<> 144:ef7eb2e8f9f7 50 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 51 #ifdef USART0
<> 144:ef7eb2e8f9f7 52 case SPI_0:
<> 144:ef7eb2e8f9f7 53 return cmuClock_USART0;
<> 144:ef7eb2e8f9f7 54 #endif
<> 144:ef7eb2e8f9f7 55 #ifdef USART1
<> 144:ef7eb2e8f9f7 56 case SPI_1:
<> 144:ef7eb2e8f9f7 57 return cmuClock_USART1;
<> 144:ef7eb2e8f9f7 58 #endif
<> 144:ef7eb2e8f9f7 59 #ifdef USART2
<> 144:ef7eb2e8f9f7 60 case SPI_2:
<> 144:ef7eb2e8f9f7 61 return cmuClock_USART2;
<> 144:ef7eb2e8f9f7 62 #endif
<> 144:ef7eb2e8f9f7 63 default:
<> 144:ef7eb2e8f9f7 64 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 65 return cmuClock_HFPER;
<> 144:ef7eb2e8f9f7 66 }
<> 144:ef7eb2e8f9f7 67 }
<> 144:ef7eb2e8f9f7 68
<> 144:ef7eb2e8f9f7 69 static inline uint8_t spi_get_index(spi_t *obj)
<> 144:ef7eb2e8f9f7 70 {
<> 144:ef7eb2e8f9f7 71 uint8_t index = 0;
<> 144:ef7eb2e8f9f7 72 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 73 #ifdef USART0
<> 144:ef7eb2e8f9f7 74 case SPI_0:
<> 144:ef7eb2e8f9f7 75 index = 0;
<> 144:ef7eb2e8f9f7 76 break;
<> 144:ef7eb2e8f9f7 77 #endif
<> 144:ef7eb2e8f9f7 78 #ifdef USART1
<> 144:ef7eb2e8f9f7 79 case SPI_1:
<> 144:ef7eb2e8f9f7 80 index = 1;
<> 144:ef7eb2e8f9f7 81 break;
<> 144:ef7eb2e8f9f7 82 #endif
<> 144:ef7eb2e8f9f7 83 #ifdef USART2
<> 144:ef7eb2e8f9f7 84 case SPI_2:
<> 144:ef7eb2e8f9f7 85 index = 2;
<> 144:ef7eb2e8f9f7 86 break;
<> 144:ef7eb2e8f9f7 87 #endif
<> 144:ef7eb2e8f9f7 88 default:
<> 144:ef7eb2e8f9f7 89 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 90 break;
<> 144:ef7eb2e8f9f7 91 }
<> 144:ef7eb2e8f9f7 92 return index;
<> 144:ef7eb2e8f9f7 93 }
<> 144:ef7eb2e8f9f7 94
<> 144:ef7eb2e8f9f7 95 uint8_t spi_get_module(spi_t *obj)
<> 144:ef7eb2e8f9f7 96 {
<> 144:ef7eb2e8f9f7 97 return spi_get_index(obj);
<> 144:ef7eb2e8f9f7 98 }
<> 144:ef7eb2e8f9f7 99
<> 144:ef7eb2e8f9f7 100 static void usart_init(spi_t *obj, uint32_t baudrate, USART_Databits_TypeDef databits, bool master, USART_ClockMode_TypeDef clockMode )
<> 144:ef7eb2e8f9f7 101 {
<> 144:ef7eb2e8f9f7 102 USART_InitSync_TypeDef init = USART_INITSYNC_DEFAULT;
<> 144:ef7eb2e8f9f7 103 init.enable = usartDisable;
<> 144:ef7eb2e8f9f7 104 init.baudrate = baudrate;
<> 144:ef7eb2e8f9f7 105 init.databits = databits;
<> 144:ef7eb2e8f9f7 106 init.master = master;
<> 144:ef7eb2e8f9f7 107 init.msbf = 1;
<> 144:ef7eb2e8f9f7 108 init.clockMode = clockMode;
<> 144:ef7eb2e8f9f7 109
<> 144:ef7eb2e8f9f7 110 /* Determine the reference clock, because the correct clock may not be set up at init time (e.g. before main()) */
<> 144:ef7eb2e8f9f7 111 init.refFreq = REFERENCE_FREQUENCY;
<> 144:ef7eb2e8f9f7 112
<> 144:ef7eb2e8f9f7 113 USART_InitSync(obj->spi.spi, &init);
<> 144:ef7eb2e8f9f7 114 }
<> 144:ef7eb2e8f9f7 115
<> 144:ef7eb2e8f9f7 116 void spi_preinit(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 117 {
<> 144:ef7eb2e8f9f7 118 SPIName spi_mosi = (SPIName) pinmap_peripheral(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 119 SPIName spi_miso = (SPIName) pinmap_peripheral(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 120 SPIName spi_clk = (SPIName) pinmap_peripheral(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 121 SPIName spi_cs = (SPIName) pinmap_peripheral(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 122 SPIName spi_data = (SPIName) pinmap_merge(spi_mosi, spi_miso);
<> 144:ef7eb2e8f9f7 123 SPIName spi_ctrl = (SPIName) pinmap_merge(spi_clk, spi_cs);
<> 144:ef7eb2e8f9f7 124
<> 144:ef7eb2e8f9f7 125 obj->spi.spi = (USART_TypeDef *) pinmap_merge(spi_data, spi_ctrl);
<> 144:ef7eb2e8f9f7 126 MBED_ASSERT((int) obj->spi.spi != NC);
<> 144:ef7eb2e8f9f7 127
<> 144:ef7eb2e8f9f7 128 if (cs != NC) { /* Slave mode */
<> 144:ef7eb2e8f9f7 129 obj->spi.master = false;
<> 144:ef7eb2e8f9f7 130 } else {
<> 144:ef7eb2e8f9f7 131 obj->spi.master = true;
<> 144:ef7eb2e8f9f7 132 }
<> 144:ef7eb2e8f9f7 133
<> 144:ef7eb2e8f9f7 134 #if defined(_SILICON_LABS_32B_PLATFORM_1)
<> 144:ef7eb2e8f9f7 135 // On P1, we need to ensure all pins are on same location
<> 144:ef7eb2e8f9f7 136 uint32_t loc_mosi = pin_location(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 137 uint32_t loc_miso = pin_location(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 138 uint32_t loc_clk = pin_location(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 139 uint32_t loc_cs = pin_location(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 140 uint32_t loc_data = pinmap_merge(loc_mosi, loc_miso);
<> 144:ef7eb2e8f9f7 141 uint32_t loc_ctrl = pinmap_merge(loc_clk, loc_cs);
<> 144:ef7eb2e8f9f7 142 obj->spi.location = pinmap_merge(loc_data, loc_ctrl);
<> 144:ef7eb2e8f9f7 143 MBED_ASSERT(obj->spi.location != NC);
<> 144:ef7eb2e8f9f7 144 #endif
<> 144:ef7eb2e8f9f7 145
<> 144:ef7eb2e8f9f7 146 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 147 }
<> 144:ef7eb2e8f9f7 148
<> 144:ef7eb2e8f9f7 149 void spi_enable_pins(spi_t *obj, uint8_t enable, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 150 {
<> 144:ef7eb2e8f9f7 151 if (enable) {
<> 144:ef7eb2e8f9f7 152 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 153 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 154 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 155 pin_mode(mosi, PushPull);
<> 144:ef7eb2e8f9f7 156 }
<> 144:ef7eb2e8f9f7 157 if (miso != NC) {
<> 144:ef7eb2e8f9f7 158 pin_mode(miso, Input);
<> 144:ef7eb2e8f9f7 159 }
<> 144:ef7eb2e8f9f7 160 pin_mode(clk, PushPull);
<> 144:ef7eb2e8f9f7 161 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 162 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 163 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 164 pin_mode(mosi, Input);
<> 144:ef7eb2e8f9f7 165 }
<> 144:ef7eb2e8f9f7 166 if (miso != NC) {
<> 144:ef7eb2e8f9f7 167 pin_mode(miso, PushPull);
<> 144:ef7eb2e8f9f7 168 }
<> 144:ef7eb2e8f9f7 169 pin_mode(clk, Input);
<> 144:ef7eb2e8f9f7 170 pin_mode(cs, Input);
<> 144:ef7eb2e8f9f7 171 }
<> 144:ef7eb2e8f9f7 172 } else {
<> 144:ef7eb2e8f9f7 173 // TODO_LP return PinMode to the previous state
<> 144:ef7eb2e8f9f7 174 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 175 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 176 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 177 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 178 }
<> 144:ef7eb2e8f9f7 179 if (miso != NC) {
<> 144:ef7eb2e8f9f7 180 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 181 }
<> 144:ef7eb2e8f9f7 182 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 183 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 184 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 185 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 186 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 187 }
<> 144:ef7eb2e8f9f7 188 if (miso != NC) {
<> 144:ef7eb2e8f9f7 189 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 190 }
<> 144:ef7eb2e8f9f7 191 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 192 pin_mode(cs, Disabled);
<> 144:ef7eb2e8f9f7 193 }
<> 144:ef7eb2e8f9f7 194 }
<> 144:ef7eb2e8f9f7 195
<> 144:ef7eb2e8f9f7 196 /* Enabling pins and setting location */
<> 144:ef7eb2e8f9f7 197 #ifdef _USART_ROUTEPEN_RESETVALUE
<> 144:ef7eb2e8f9f7 198 uint32_t route = USART_ROUTEPEN_CLKPEN;
<> 144:ef7eb2e8f9f7 199 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CLKLOC_MASK;
<> 144:ef7eb2e8f9f7 200 obj->spi.spi->ROUTELOC0 |= pin_location(clk, PinMap_SPI_CLK)<<_USART_ROUTELOC0_CLKLOC_SHIFT;
<> 144:ef7eb2e8f9f7 201 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 202 route |= USART_ROUTEPEN_TXPEN;
<> 144:ef7eb2e8f9f7 203 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_TXLOC_MASK;
<> 144:ef7eb2e8f9f7 204 obj->spi.spi->ROUTELOC0 |= pin_location(mosi, PinMap_SPI_MOSI)<<_USART_ROUTELOC0_TXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 205 }
<> 144:ef7eb2e8f9f7 206 if (miso != NC) {
<> 144:ef7eb2e8f9f7 207 route |= USART_ROUTEPEN_RXPEN;
<> 144:ef7eb2e8f9f7 208 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_RXLOC_MASK;
<> 144:ef7eb2e8f9f7 209 obj->spi.spi->ROUTELOC0 |= pin_location(miso, PinMap_SPI_MOSI)<<_USART_ROUTELOC0_RXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 210 }
<> 144:ef7eb2e8f9f7 211 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 212 route |= USART_ROUTEPEN_CSPEN;
<> 144:ef7eb2e8f9f7 213 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CSLOC_MASK;
<> 144:ef7eb2e8f9f7 214 obj->spi.spi->ROUTELOC0 |= pin_location(cs, PinMap_SPI_MOSI)<<_USART_ROUTELOC0_CSLOC_SHIFT;
<> 144:ef7eb2e8f9f7 215 }
Anna Bridge 163:74e0ce7f98e8 216 obj->spi.location = obj->spi.spi->ROUTELOC0;
Anna Bridge 163:74e0ce7f98e8 217 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 218 obj->spi.spi->ROUTEPEN = route;
<> 144:ef7eb2e8f9f7 219 }
<> 144:ef7eb2e8f9f7 220 #else
Anna Bridge 163:74e0ce7f98e8 221 uint32_t route = USART_ROUTE_CLKPEN;
<> 144:ef7eb2e8f9f7 222
<> 144:ef7eb2e8f9f7 223 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 224 route |= USART_ROUTE_TXPEN;
<> 144:ef7eb2e8f9f7 225 }
<> 144:ef7eb2e8f9f7 226 if (miso != NC) {
<> 144:ef7eb2e8f9f7 227 route |= USART_ROUTE_RXPEN;
<> 144:ef7eb2e8f9f7 228 }
<> 144:ef7eb2e8f9f7 229 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 230 route |= USART_ROUTE_CSPEN;
<> 144:ef7eb2e8f9f7 231 }
Anna Bridge 163:74e0ce7f98e8 232 route |= obj->spi.location << _USART_ROUTE_LOCATION_SHIFT;
<> 144:ef7eb2e8f9f7 233 obj->spi.spi->ROUTE = route;
Anna Bridge 163:74e0ce7f98e8 234 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 235 }
<> 144:ef7eb2e8f9f7 236 #endif
<> 144:ef7eb2e8f9f7 237 void spi_enable(spi_t *obj, uint8_t enable)
<> 144:ef7eb2e8f9f7 238 {
<> 144:ef7eb2e8f9f7 239 USART_Enable(obj->spi.spi, (enable ? usartEnable : usartDisable));
<> 144:ef7eb2e8f9f7 240 }
<> 144:ef7eb2e8f9f7 241
<> 144:ef7eb2e8f9f7 242 void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 243 {
<> 144:ef7eb2e8f9f7 244 CMU_ClockEnable(cmuClock_HFPER, true);
<> 144:ef7eb2e8f9f7 245 spi_preinit(obj, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 246 CMU_ClockEnable(spi_get_clock_tree(obj), true);
<> 144:ef7eb2e8f9f7 247 usart_init(obj, 100000, usartDatabits8, true, usartClockMode0);
<> 144:ef7eb2e8f9f7 248
<> 144:ef7eb2e8f9f7 249 spi_enable_pins(obj, true, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 250 spi_enable(obj, true);
<> 144:ef7eb2e8f9f7 251 }
<> 144:ef7eb2e8f9f7 252
<> 144:ef7eb2e8f9f7 253 void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
<> 144:ef7eb2e8f9f7 254 {
<> 144:ef7eb2e8f9f7 255 if(enable) obj->spi.event |= event;
<> 144:ef7eb2e8f9f7 256 else obj->spi.event &= ~event;
<> 144:ef7eb2e8f9f7 257 }
<> 144:ef7eb2e8f9f7 258
<> 144:ef7eb2e8f9f7 259 /****************************************************************************
<> 144:ef7eb2e8f9f7 260 * void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 261 *
<> 144:ef7eb2e8f9f7 262 * This will enable the interrupt in NVIC for the associated USART RX channel
<> 144:ef7eb2e8f9f7 263 *
<> 144:ef7eb2e8f9f7 264 * * obj: pointer to spi object
<> 144:ef7eb2e8f9f7 265 * * handler: pointer to interrupt handler for this channel
<> 144:ef7eb2e8f9f7 266 * * enable: Whether to enable (true) or disable (false) the interrupt
<> 144:ef7eb2e8f9f7 267 *
<> 144:ef7eb2e8f9f7 268 ****************************************************************************/
<> 144:ef7eb2e8f9f7 269 void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 270 {
<> 144:ef7eb2e8f9f7 271 IRQn_Type IRQvector;
<> 144:ef7eb2e8f9f7 272
<> 144:ef7eb2e8f9f7 273 switch ((uint32_t)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 274 #ifdef USART0
<> 144:ef7eb2e8f9f7 275 case USART_0:
<> 144:ef7eb2e8f9f7 276 IRQvector = USART0_RX_IRQn;
<> 144:ef7eb2e8f9f7 277 break;
<> 144:ef7eb2e8f9f7 278 #endif
<> 144:ef7eb2e8f9f7 279 #ifdef USART1
<> 144:ef7eb2e8f9f7 280 case USART_1:
<> 144:ef7eb2e8f9f7 281 IRQvector = USART1_RX_IRQn;
<> 144:ef7eb2e8f9f7 282 break;
<> 144:ef7eb2e8f9f7 283 #endif
<> 144:ef7eb2e8f9f7 284 #ifdef USART2
<> 144:ef7eb2e8f9f7 285 case USART_2:
<> 144:ef7eb2e8f9f7 286 IRQvector = USART2_RX_IRQn;
<> 144:ef7eb2e8f9f7 287 break;
<> 144:ef7eb2e8f9f7 288 #endif
<> 144:ef7eb2e8f9f7 289 default:
<> 144:ef7eb2e8f9f7 290 error("Undefined SPI peripheral");
<> 144:ef7eb2e8f9f7 291 return;
<> 144:ef7eb2e8f9f7 292 }
<> 144:ef7eb2e8f9f7 293
<> 144:ef7eb2e8f9f7 294 if (enable == true) {
<> 144:ef7eb2e8f9f7 295 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 296 USART_IntEnable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 297 NVIC_EnableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 298 } else {
<> 144:ef7eb2e8f9f7 299 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 300 USART_IntDisable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 301 NVIC_DisableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 302 }
<> 144:ef7eb2e8f9f7 303 }
<> 144:ef7eb2e8f9f7 304
<> 144:ef7eb2e8f9f7 305 void spi_format(spi_t *obj, int bits, int mode, int slave)
<> 144:ef7eb2e8f9f7 306 {
<> 144:ef7eb2e8f9f7 307 /* Bits: values between 4 and 16 are valid */
<> 144:ef7eb2e8f9f7 308 MBED_ASSERT(bits >= 4 && bits <= 16);
<> 144:ef7eb2e8f9f7 309 obj->spi.bits = bits;
<> 144:ef7eb2e8f9f7 310 /* 0x01 = usartDatabits4, etc, up to 0x0D = usartDatabits16 */
<> 144:ef7eb2e8f9f7 311 USART_Databits_TypeDef databits = (USART_Databits_TypeDef) (bits - 3);
<> 144:ef7eb2e8f9f7 312
<> 144:ef7eb2e8f9f7 313 USART_ClockMode_TypeDef clockMode;
<> 144:ef7eb2e8f9f7 314 MBED_ASSERT(mode >= 0 && mode <= 3);
<> 144:ef7eb2e8f9f7 315 switch (mode) {
<> 144:ef7eb2e8f9f7 316 case 0:
<> 144:ef7eb2e8f9f7 317 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 318 break;
<> 144:ef7eb2e8f9f7 319 case 1:
<> 144:ef7eb2e8f9f7 320 clockMode = usartClockMode1;
<> 144:ef7eb2e8f9f7 321 break;
<> 144:ef7eb2e8f9f7 322 case 2:
<> 144:ef7eb2e8f9f7 323 clockMode = usartClockMode2;
<> 144:ef7eb2e8f9f7 324 break;
<> 144:ef7eb2e8f9f7 325 case 3:
<> 144:ef7eb2e8f9f7 326 clockMode = usartClockMode3;
<> 144:ef7eb2e8f9f7 327 break;
<> 144:ef7eb2e8f9f7 328 default:
<> 144:ef7eb2e8f9f7 329 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 330 }
<> 144:ef7eb2e8f9f7 331 uint32_t iflags = obj->spi.spi->IEN;
<> 144:ef7eb2e8f9f7 332 bool enabled = (obj->spi.spi->STATUS & (USART_STATUS_RXENS | USART_STATUS_TXENS)) != 0;
<> 144:ef7eb2e8f9f7 333
<> 144:ef7eb2e8f9f7 334 usart_init(obj, 100000, databits, (slave ? false : true), clockMode);
<> 144:ef7eb2e8f9f7 335
<> 144:ef7eb2e8f9f7 336 //restore state
<> 144:ef7eb2e8f9f7 337 #ifdef _USART_ROUTEPEN_RESETVALUE
Anna Bridge 163:74e0ce7f98e8 338 obj->spi.spi->ROUTEPEN = obj->spi.route;
Anna Bridge 163:74e0ce7f98e8 339 obj->spi.spi->ROUTELOC0 = obj->spi.location;
<> 144:ef7eb2e8f9f7 340 #else
Anna Bridge 163:74e0ce7f98e8 341 obj->spi.spi->ROUTE = obj->spi.route;
<> 144:ef7eb2e8f9f7 342 #endif
<> 144:ef7eb2e8f9f7 343 obj->spi.spi->IEN = iflags;
<> 144:ef7eb2e8f9f7 344
<> 144:ef7eb2e8f9f7 345 if(enabled) spi_enable(obj, enabled);
<> 144:ef7eb2e8f9f7 346 }
<> 144:ef7eb2e8f9f7 347
<> 144:ef7eb2e8f9f7 348 void spi_frequency(spi_t *obj, int hz)
<> 144:ef7eb2e8f9f7 349 {
<> 144:ef7eb2e8f9f7 350 USART_BaudrateSyncSet(obj->spi.spi, REFERENCE_FREQUENCY, hz);
<> 144:ef7eb2e8f9f7 351 }
<> 144:ef7eb2e8f9f7 352
<> 144:ef7eb2e8f9f7 353 /* Read/Write */
<> 144:ef7eb2e8f9f7 354
<> 144:ef7eb2e8f9f7 355 void spi_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 356 {
<> 144:ef7eb2e8f9f7 357 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 358 USART_Tx(obj->spi.spi, (uint8_t) value);
<> 144:ef7eb2e8f9f7 359 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 360 USART_TxExt(obj->spi.spi, (uint16_t) value & 0x1FF);
<> 144:ef7eb2e8f9f7 361 } else {
<> 144:ef7eb2e8f9f7 362 USART_TxDouble(obj->spi.spi, (uint16_t) value);
<> 144:ef7eb2e8f9f7 363 }
<> 144:ef7eb2e8f9f7 364 }
<> 144:ef7eb2e8f9f7 365
<> 144:ef7eb2e8f9f7 366 int spi_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 367 {
<> 144:ef7eb2e8f9f7 368 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 369 return (int) obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 370 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 371 return (int) obj->spi.spi->RXDATAX & 0x1FF;
<> 144:ef7eb2e8f9f7 372 } else {
<> 144:ef7eb2e8f9f7 373 return (int) obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 374 }
<> 144:ef7eb2e8f9f7 375 }
<> 144:ef7eb2e8f9f7 376
<> 144:ef7eb2e8f9f7 377 int spi_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 378 {
<> 144:ef7eb2e8f9f7 379 return spi_read(obj);
<> 144:ef7eb2e8f9f7 380 }
<> 144:ef7eb2e8f9f7 381
<> 144:ef7eb2e8f9f7 382 int spi_master_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 383 {
<> 144:ef7eb2e8f9f7 384 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 385
<> 144:ef7eb2e8f9f7 386 /* Wait for transmission of last byte */
<> 144:ef7eb2e8f9f7 387 while (!(obj->spi.spi->STATUS & USART_STATUS_TXC)) {
<> 144:ef7eb2e8f9f7 388 }
<> 144:ef7eb2e8f9f7 389
<> 144:ef7eb2e8f9f7 390 return spi_read(obj);
<> 144:ef7eb2e8f9f7 391 }
<> 144:ef7eb2e8f9f7 392
<> 144:ef7eb2e8f9f7 393 inline uint8_t spi_master_tx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 394 {
<> 144:ef7eb2e8f9f7 395 return (obj->spi.spi->STATUS & USART_STATUS_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 396 }
<> 144:ef7eb2e8f9f7 397
<> 144:ef7eb2e8f9f7 398 uint8_t spi_master_rx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 399 {
<> 144:ef7eb2e8f9f7 400 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? true : false;
<> 144:ef7eb2e8f9f7 401 }
<> 144:ef7eb2e8f9f7 402
<> 144:ef7eb2e8f9f7 403 uint8_t spi_master_tx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 404 {
<> 144:ef7eb2e8f9f7 405 return (obj->spi.spi->IF & USART_IF_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 406 }
<> 144:ef7eb2e8f9f7 407
<> 144:ef7eb2e8f9f7 408 uint8_t spi_master_rx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 409 {
<> 144:ef7eb2e8f9f7 410 return (obj->spi.spi->IF & (USART_IF_RXDATAV | USART_IF_RXFULL)) ? true : false;
<> 144:ef7eb2e8f9f7 411 }
<> 144:ef7eb2e8f9f7 412
<> 144:ef7eb2e8f9f7 413 void spi_master_read_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 414 {
<> 144:ef7eb2e8f9f7 415 obj->spi.spi->IFC = USART_IFC_RXFULL; // in case it got full
<> 144:ef7eb2e8f9f7 416 }
<> 144:ef7eb2e8f9f7 417
<> 144:ef7eb2e8f9f7 418 void spi_master_write_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 419 {
<> 144:ef7eb2e8f9f7 420 obj->spi.spi->IFC = USART_IFC_TXC;
<> 144:ef7eb2e8f9f7 421 }
<> 144:ef7eb2e8f9f7 422
<> 144:ef7eb2e8f9f7 423 void spi_irq_handler(spi_t *obj)
<> 144:ef7eb2e8f9f7 424 {
<> 144:ef7eb2e8f9f7 425 spi_read(obj); //TODO_LP store data to the object?
<> 144:ef7eb2e8f9f7 426 }
<> 144:ef7eb2e8f9f7 427
<> 144:ef7eb2e8f9f7 428 uint8_t spi_active(spi_t *obj)
<> 144:ef7eb2e8f9f7 429 {
<> 144:ef7eb2e8f9f7 430 switch(obj->spi.dmaOptionsTX.dmaUsageState) {
<> 144:ef7eb2e8f9f7 431 case DMA_USAGE_TEMPORARY_ALLOCATED:
<> 144:ef7eb2e8f9f7 432 return true;
<> 144:ef7eb2e8f9f7 433 case DMA_USAGE_ALLOCATED:
<> 144:ef7eb2e8f9f7 434 /* Check whether the allocated DMA channel is active */
<> 144:ef7eb2e8f9f7 435 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 436 return(LDMAx_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 437 #else
<> 144:ef7eb2e8f9f7 438 return(DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 439 #endif
<> 144:ef7eb2e8f9f7 440 default:
<> 144:ef7eb2e8f9f7 441 /* Check whether interrupt for spi is enabled */
<> 144:ef7eb2e8f9f7 442 return (obj->spi.spi->IEN & (USART_IEN_RXDATAV | USART_IEN_TXBL)) ? true : false;
<> 144:ef7eb2e8f9f7 443 }
<> 144:ef7eb2e8f9f7 444 }
<> 144:ef7eb2e8f9f7 445
<> 144:ef7eb2e8f9f7 446 void spi_buffer_set(spi_t *obj, const void *tx, uint32_t tx_length, void *rx, uint32_t rx_length, uint8_t bit_width)
<> 144:ef7eb2e8f9f7 447 {
<> 144:ef7eb2e8f9f7 448 uint32_t i;
<> 144:ef7eb2e8f9f7 449 uint16_t *tx_ptr = (uint16_t *) tx;
<> 144:ef7eb2e8f9f7 450
<> 144:ef7eb2e8f9f7 451 obj->tx_buff.buffer = (void *)tx;
<> 144:ef7eb2e8f9f7 452 obj->rx_buff.buffer = rx;
<> 144:ef7eb2e8f9f7 453 obj->tx_buff.length = tx_length;
<> 144:ef7eb2e8f9f7 454 obj->rx_buff.length = rx_length;
<> 144:ef7eb2e8f9f7 455 obj->tx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 456 obj->rx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 457 obj->tx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 458 obj->rx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 459
<> 144:ef7eb2e8f9f7 460 if((obj->spi.bits == 9) && (tx != 0)) {
<> 144:ef7eb2e8f9f7 461 // Make sure we don't have inadvertent non-zero bits outside 9-bit frames which could trigger unwanted operation
<> 144:ef7eb2e8f9f7 462 for(i = 0; i < (tx_length / 2); i++) {
<> 144:ef7eb2e8f9f7 463 tx_ptr[i] &= 0x1FF;
<> 144:ef7eb2e8f9f7 464 }
<> 144:ef7eb2e8f9f7 465 }
<> 144:ef7eb2e8f9f7 466 }
<> 144:ef7eb2e8f9f7 467
<> 144:ef7eb2e8f9f7 468 static void spi_buffer_tx_write(spi_t *obj)
<> 144:ef7eb2e8f9f7 469 {
<> 144:ef7eb2e8f9f7 470 uint32_t data = 0;
<> 144:ef7eb2e8f9f7 471
<> 144:ef7eb2e8f9f7 472 // Interpret buffer according to declared width
<> 144:ef7eb2e8f9f7 473 if (!obj->tx_buff.buffer) {
<> 144:ef7eb2e8f9f7 474 data = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 475 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 476 uint32_t * tx = (uint32_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 477 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 478 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 479 uint16_t * tx = (uint16_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 480 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 481 } else {
<> 144:ef7eb2e8f9f7 482 uint8_t * tx = (uint8_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 483 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 484 }
<> 144:ef7eb2e8f9f7 485 obj->tx_buff.pos++;
<> 144:ef7eb2e8f9f7 486
<> 144:ef7eb2e8f9f7 487 // Send buffer
<> 144:ef7eb2e8f9f7 488 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 489 obj->spi.spi->TXDOUBLE = data;
<> 144:ef7eb2e8f9f7 490 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 491 obj->spi.spi->TXDATAX = data;
<> 144:ef7eb2e8f9f7 492 } else {
<> 144:ef7eb2e8f9f7 493 obj->spi.spi->TXDATA = data;
<> 144:ef7eb2e8f9f7 494 }
<> 144:ef7eb2e8f9f7 495 }
<> 144:ef7eb2e8f9f7 496
<> 144:ef7eb2e8f9f7 497 static void spi_buffer_rx_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 498 {
<> 144:ef7eb2e8f9f7 499 uint32_t data;
<> 144:ef7eb2e8f9f7 500
<> 144:ef7eb2e8f9f7 501 if (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) {
<> 144:ef7eb2e8f9f7 502 // Read from the FIFO
<> 144:ef7eb2e8f9f7 503 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 504 data = obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 505 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 506 data = obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 507 } else {
<> 144:ef7eb2e8f9f7 508 data = obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 509 }
<> 144:ef7eb2e8f9f7 510
<> 144:ef7eb2e8f9f7 511 // If there is room in the buffer, store the data
<> 144:ef7eb2e8f9f7 512 if (obj->rx_buff.buffer && obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 513 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 514 uint32_t * rx = (uint32_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 515 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 516 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 517 uint16_t * rx = (uint16_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 518 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 519 } else {
<> 144:ef7eb2e8f9f7 520 uint8_t * rx = (uint8_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 521 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 522 }
<> 144:ef7eb2e8f9f7 523 obj->rx_buff.pos++;
<> 144:ef7eb2e8f9f7 524 }
<> 144:ef7eb2e8f9f7 525 }
<> 144:ef7eb2e8f9f7 526 }
<> 144:ef7eb2e8f9f7 527
<> 144:ef7eb2e8f9f7 528 int spi_master_write_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 529 {
<> 144:ef7eb2e8f9f7 530 int ndata = 0;
<> 144:ef7eb2e8f9f7 531 while ((obj->tx_buff.pos < obj->tx_buff.length) && (obj->spi.spi->STATUS & USART_STATUS_TXBL)) {
<> 144:ef7eb2e8f9f7 532 spi_buffer_tx_write(obj);
<> 144:ef7eb2e8f9f7 533 ndata++;
<> 144:ef7eb2e8f9f7 534 }
<> 144:ef7eb2e8f9f7 535 return ndata;
<> 144:ef7eb2e8f9f7 536 }
<> 144:ef7eb2e8f9f7 537
<> 144:ef7eb2e8f9f7 538 int spi_master_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 539 {
<> 144:ef7eb2e8f9f7 540 int ndata = 0;
<> 144:ef7eb2e8f9f7 541 while ((obj->rx_buff.pos < obj->rx_buff.length) && (obj->spi.spi->STATUS & (USART_STATUS_RXDATAV | USART_STATUS_RXFULL))) {
<> 144:ef7eb2e8f9f7 542 spi_buffer_rx_read(obj);
<> 144:ef7eb2e8f9f7 543 ndata++;
<> 144:ef7eb2e8f9f7 544 }
<> 144:ef7eb2e8f9f7 545 // all sent but still more to receive? need to align tx buffer
<> 144:ef7eb2e8f9f7 546 if ((obj->tx_buff.pos >= obj->tx_buff.length) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 547 obj->tx_buff.buffer = (void *)0;
<> 144:ef7eb2e8f9f7 548 obj->tx_buff.length = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 549 }
<> 144:ef7eb2e8f9f7 550
<> 144:ef7eb2e8f9f7 551 return ndata;
<> 144:ef7eb2e8f9f7 552 }
<> 144:ef7eb2e8f9f7 553
<> 144:ef7eb2e8f9f7 554 uint8_t spi_buffer_rx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 555 {
<> 144:ef7eb2e8f9f7 556 return (obj->rx_buff.pos >= obj->rx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 557 }
<> 144:ef7eb2e8f9f7 558
<> 144:ef7eb2e8f9f7 559 uint8_t spi_buffer_tx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 560 {
<> 144:ef7eb2e8f9f7 561 return (obj->tx_buff.pos >= obj->tx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 562 }
<> 144:ef7eb2e8f9f7 563
<> 144:ef7eb2e8f9f7 564 //TODO_LP implement slave
<> 144:ef7eb2e8f9f7 565
<> 144:ef7eb2e8f9f7 566 int spi_slave_receive(spi_t *obj)
<> 144:ef7eb2e8f9f7 567 {
<> 144:ef7eb2e8f9f7 568 if (obj->spi.bits <= 9) {
<> 144:ef7eb2e8f9f7 569 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? 1 : 0;
<> 144:ef7eb2e8f9f7 570 } else {
<> 144:ef7eb2e8f9f7 571 return (obj->spi.spi->STATUS & USART_STATUS_RXFULL) ? 1 : 0;
<> 144:ef7eb2e8f9f7 572 }
<> 144:ef7eb2e8f9f7 573 }
<> 144:ef7eb2e8f9f7 574
<> 144:ef7eb2e8f9f7 575 int spi_slave_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 576 {
<> 144:ef7eb2e8f9f7 577 return spi_read(obj);
<> 144:ef7eb2e8f9f7 578 }
<> 144:ef7eb2e8f9f7 579
<> 144:ef7eb2e8f9f7 580 void spi_slave_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 581 {
<> 144:ef7eb2e8f9f7 582 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 583 }
<> 144:ef7eb2e8f9f7 584
<> 144:ef7eb2e8f9f7 585 uint32_t spi_event_check(spi_t *obj)
<> 144:ef7eb2e8f9f7 586 {
<> 144:ef7eb2e8f9f7 587 uint32_t requestedEvent = obj->spi.event;
<> 144:ef7eb2e8f9f7 588 uint32_t event = 0;
<> 144:ef7eb2e8f9f7 589 uint8_t quit = spi_buffer_rx_empty(obj) & spi_buffer_tx_empty(obj);
<> 144:ef7eb2e8f9f7 590 if (((requestedEvent & SPI_EVENT_COMPLETE) != 0) && (quit == true)) {
<> 144:ef7eb2e8f9f7 591 event |= SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 592 }
<> 144:ef7eb2e8f9f7 593
<> 144:ef7eb2e8f9f7 594 if(quit == true) {
<> 144:ef7eb2e8f9f7 595 event |= SPI_EVENT_INTERNAL_TRANSFER_COMPLETE;
<> 144:ef7eb2e8f9f7 596 }
<> 144:ef7eb2e8f9f7 597
<> 144:ef7eb2e8f9f7 598 return event;
<> 144:ef7eb2e8f9f7 599 }
<> 144:ef7eb2e8f9f7 600 /******************************************
<> 144:ef7eb2e8f9f7 601 * void transferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 602 *
<> 144:ef7eb2e8f9f7 603 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 604 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 605 ******************************************/
<> 144:ef7eb2e8f9f7 606 void transferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 607 {
<> 144:ef7eb2e8f9f7 608 (void) channel;
<> 144:ef7eb2e8f9f7 609 (void) primary;
<> 144:ef7eb2e8f9f7 610
<> 144:ef7eb2e8f9f7 611 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 612 if (user != NULL) {
<> 144:ef7eb2e8f9f7 613 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 614 }
<> 144:ef7eb2e8f9f7 615 }
<> 144:ef7eb2e8f9f7 616
<> 144:ef7eb2e8f9f7 617 /******************************************
<> 144:ef7eb2e8f9f7 618 * bool spi_allocate_dma(spi_t *obj);
<> 144:ef7eb2e8f9f7 619 * (helper function for spi_enable_dma)
<> 144:ef7eb2e8f9f7 620 *
<> 144:ef7eb2e8f9f7 621 * This function will request two DMA channels from the DMA API if needed
<> 144:ef7eb2e8f9f7 622 * by the hint provided. They will be allocated to the SPI object pointed to.
<> 144:ef7eb2e8f9f7 623 *
<> 144:ef7eb2e8f9f7 624 * return value: whether the channels were acquired successfully (true) or not.
<> 144:ef7eb2e8f9f7 625 ******************************************/
<> 144:ef7eb2e8f9f7 626 bool spi_allocate_dma(spi_t *obj)
<> 144:ef7eb2e8f9f7 627 {
<> 144:ef7eb2e8f9f7 628 int dmaChannelIn, dmaChannelOut;
<> 144:ef7eb2e8f9f7 629 dmaChannelIn = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 630 if (dmaChannelIn == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 631 return false;
<> 144:ef7eb2e8f9f7 632 }
<> 144:ef7eb2e8f9f7 633 dmaChannelOut = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 634 if (dmaChannelOut == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 635 dma_channel_free(dmaChannelIn);
<> 144:ef7eb2e8f9f7 636 return false;
<> 144:ef7eb2e8f9f7 637 }
<> 144:ef7eb2e8f9f7 638
<> 144:ef7eb2e8f9f7 639 obj->spi.dmaOptionsTX.dmaChannel = dmaChannelOut;
<> 144:ef7eb2e8f9f7 640 obj->spi.dmaOptionsRX.dmaChannel = dmaChannelIn;
<> 144:ef7eb2e8f9f7 641 return true;
<> 144:ef7eb2e8f9f7 642 }
<> 144:ef7eb2e8f9f7 643
<> 144:ef7eb2e8f9f7 644 /******************************************
<> 144:ef7eb2e8f9f7 645 * void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 646 *
<> 144:ef7eb2e8f9f7 647 * This function tries to allocate DMA as indicated by the hint (state).
<> 144:ef7eb2e8f9f7 648 * There are three possibilities:
<> 144:ef7eb2e8f9f7 649 * * state = NEVER:
<> 144:ef7eb2e8f9f7 650 * if there were channels allocated by state = ALWAYS, they will be released
<> 144:ef7eb2e8f9f7 651 * * state = OPPORTUNITIC:
<> 144:ef7eb2e8f9f7 652 * if there are channels available, they will get used, but freed upon transfer completion
<> 144:ef7eb2e8f9f7 653 * * state = ALWAYS
<> 144:ef7eb2e8f9f7 654 * if there are channels available, they will get allocated and not be freed until state changes
<> 144:ef7eb2e8f9f7 655 ******************************************/
<> 144:ef7eb2e8f9f7 656 void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 657 {
<> 144:ef7eb2e8f9f7 658 if (state == DMA_USAGE_ALWAYS && obj->spi.dmaOptionsTX.dmaUsageState != DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 659 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 660 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 661 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_ALLOCATED;
<> 144:ef7eb2e8f9f7 662 } else {
<> 144:ef7eb2e8f9f7 663 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 664 }
<> 144:ef7eb2e8f9f7 665 } else if (state == DMA_USAGE_OPPORTUNISTIC) {
<> 144:ef7eb2e8f9f7 666 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 667 /* Channels have already been allocated previously by an ALWAYS state, so after this transfer, we will release them */
<> 144:ef7eb2e8f9f7 668 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 669 } else {
<> 144:ef7eb2e8f9f7 670 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 671 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 672 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 673 } else {
<> 144:ef7eb2e8f9f7 674 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 675 }
<> 144:ef7eb2e8f9f7 676 }
<> 144:ef7eb2e8f9f7 677 } else if (state == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 678 /* If channels are allocated, get rid of them */
<> 144:ef7eb2e8f9f7 679 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 680 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 681 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 682 }
<> 144:ef7eb2e8f9f7 683 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_NEVER;
<> 144:ef7eb2e8f9f7 684 }
<> 144:ef7eb2e8f9f7 685 }
<> 144:ef7eb2e8f9f7 686
<> 144:ef7eb2e8f9f7 687 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 688 /************************************************************************************
<> 144:ef7eb2e8f9f7 689 * DMA helper functions *
<> 144:ef7eb2e8f9f7 690 ************************************************************************************/
<> 144:ef7eb2e8f9f7 691 /******************************************
<> 144:ef7eb2e8f9f7 692 * static void serial_dmaTransferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 693 *
<> 144:ef7eb2e8f9f7 694 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 695 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 696 ******************************************/
<> 144:ef7eb2e8f9f7 697 static void serial_dmaTransferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 698 {
<> 144:ef7eb2e8f9f7 699
<> 144:ef7eb2e8f9f7 700 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 701 if (user != NULL) {
<> 144:ef7eb2e8f9f7 702 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 703 }
<> 144:ef7eb2e8f9f7 704 }
<> 144:ef7eb2e8f9f7 705 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 706 {
<> 144:ef7eb2e8f9f7 707 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 708 }
<> 144:ef7eb2e8f9f7 709 #else
<> 144:ef7eb2e8f9f7 710 /******************************************
<> 144:ef7eb2e8f9f7 711 * void spi_master_dma_channel_setup(spi_t *obj)
<> 144:ef7eb2e8f9f7 712 *
<> 144:ef7eb2e8f9f7 713 * This function will setup the DMA configuration for SPI transfers
<> 144:ef7eb2e8f9f7 714 *
<> 144:ef7eb2e8f9f7 715 * The channel numbers are fetched from the SPI instance, so this function
<> 144:ef7eb2e8f9f7 716 * should only be called when those channels have actually been allocated.
<> 144:ef7eb2e8f9f7 717 ******************************************/
<> 144:ef7eb2e8f9f7 718 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 719 {
<> 144:ef7eb2e8f9f7 720 DMA_CfgChannel_TypeDef rxChnlCfg;
<> 144:ef7eb2e8f9f7 721 DMA_CfgChannel_TypeDef txChnlCfg;
<> 144:ef7eb2e8f9f7 722
<> 144:ef7eb2e8f9f7 723 /* Setting up channel for rx. */
<> 144:ef7eb2e8f9f7 724 obj->spi.dmaOptionsRX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 725 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 726
<> 144:ef7eb2e8f9f7 727 rxChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 728 rxChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 729 rxChnlCfg.cb = &(obj->spi.dmaOptionsRX.dmaCallback);
<> 144:ef7eb2e8f9f7 730
<> 144:ef7eb2e8f9f7 731 /* Setting up channel for tx. */
<> 144:ef7eb2e8f9f7 732 obj->spi.dmaOptionsTX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 733 obj->spi.dmaOptionsTX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 734
<> 144:ef7eb2e8f9f7 735 txChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 736 txChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 737 txChnlCfg.cb = &(obj->spi.dmaOptionsTX.dmaCallback);
<> 144:ef7eb2e8f9f7 738
<> 144:ef7eb2e8f9f7 739 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 740 #ifdef USART0
<> 144:ef7eb2e8f9f7 741 case SPI_0:
<> 144:ef7eb2e8f9f7 742 rxChnlCfg.select = DMAREQ_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 743 txChnlCfg.select = DMAREQ_USART0_TXEMPTY;
<> 144:ef7eb2e8f9f7 744 break;
<> 144:ef7eb2e8f9f7 745 #endif
<> 144:ef7eb2e8f9f7 746 #ifdef USART1
<> 144:ef7eb2e8f9f7 747 case SPI_1:
<> 144:ef7eb2e8f9f7 748 rxChnlCfg.select = DMAREQ_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 749 txChnlCfg.select = DMAREQ_USART1_TXEMPTY;
<> 144:ef7eb2e8f9f7 750 break;
<> 144:ef7eb2e8f9f7 751 #endif
<> 144:ef7eb2e8f9f7 752 #ifdef USART2
<> 144:ef7eb2e8f9f7 753 case SPI_2:
<> 144:ef7eb2e8f9f7 754 rxChnlCfg.select = DMAREQ_USART2_RXDATAV;
<> 144:ef7eb2e8f9f7 755 txChnlCfg.select = DMAREQ_USART2_TXEMPTY;
<> 144:ef7eb2e8f9f7 756 break;
<> 144:ef7eb2e8f9f7 757 #endif
<> 144:ef7eb2e8f9f7 758 default:
<> 144:ef7eb2e8f9f7 759 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 760 break;
<> 144:ef7eb2e8f9f7 761 }
<> 144:ef7eb2e8f9f7 762 DMA_CfgChannel(obj->spi.dmaOptionsRX.dmaChannel, &rxChnlCfg);
<> 144:ef7eb2e8f9f7 763 DMA_CfgChannel(obj->spi.dmaOptionsTX.dmaChannel, &txChnlCfg);
<> 144:ef7eb2e8f9f7 764 }
<> 144:ef7eb2e8f9f7 765 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 766 /******************************************
<> 144:ef7eb2e8f9f7 767 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 768 *
<> 144:ef7eb2e8f9f7 769 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 770 *
<> 144:ef7eb2e8f9f7 771 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 772 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 773 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 774 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 775 ******************************************/
<> 144:ef7eb2e8f9f7 776 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 777 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 778 {
<> 144:ef7eb2e8f9f7 779 LDMA_PeripheralSignal_t dma_periph;
<> 144:ef7eb2e8f9f7 780
<> 144:ef7eb2e8f9f7 781 if(rxdata) {
<> 144:ef7eb2e8f9f7 782 volatile const void *source_addr;
<> 144:ef7eb2e8f9f7 783 /* Select RX source address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 784 10 bit and larger frame requires to use RXDOUBLE register. */
<> 144:ef7eb2e8f9f7 785 switch((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 786 case USART_0:
<> 144:ef7eb2e8f9f7 787 dma_periph = ldmaPeripheralSignal_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 788 break;
<> 144:ef7eb2e8f9f7 789 case USART_1:
<> 144:ef7eb2e8f9f7 790 dma_periph = ldmaPeripheralSignal_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 791 break;
<> 144:ef7eb2e8f9f7 792 default:
<> 144:ef7eb2e8f9f7 793 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 794 while(1);
<> 144:ef7eb2e8f9f7 795 break;
<> 144:ef7eb2e8f9f7 796 }
<> 144:ef7eb2e8f9f7 797
<> 144:ef7eb2e8f9f7 798 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 799 source_addr = &obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 800 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 801 source_addr = &obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 802 } else {
<> 144:ef7eb2e8f9f7 803 source_addr = &obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 804 }
<> 144:ef7eb2e8f9f7 805
<> 144:ef7eb2e8f9f7 806 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 807 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_P2M_BYTE(source_addr, rxdata, rx_length);
<> 144:ef7eb2e8f9f7 808
<> 144:ef7eb2e8f9f7 809 if(obj->spi.bits >= 9){
<> 144:ef7eb2e8f9f7 810 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 811 }
<> 144:ef7eb2e8f9f7 812
<> 144:ef7eb2e8f9f7 813 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 814 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 815 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 816 } else {
<> 144:ef7eb2e8f9f7 817 desc.xfer.dstInc = ldmaCtrlDstIncFour;
<> 144:ef7eb2e8f9f7 818 }
<> 144:ef7eb2e8f9f7 819 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 820 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 821 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 822 } else {
<> 144:ef7eb2e8f9f7 823 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 824 }
<> 144:ef7eb2e8f9f7 825 } else {
<> 144:ef7eb2e8f9f7 826 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 827 }
<> 144:ef7eb2e8f9f7 828
<> 144:ef7eb2e8f9f7 829 LDMAx_StartTransfer(obj->spi.dmaOptionsRX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsRX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 830 }
<> 144:ef7eb2e8f9f7 831
<> 144:ef7eb2e8f9f7 832 volatile void *target_addr;
<> 144:ef7eb2e8f9f7 833
<> 144:ef7eb2e8f9f7 834 /* Select TX target address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 835 10 bit and larger frame requires to use TXDOUBLE register. */
<> 144:ef7eb2e8f9f7 836 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 837 case USART_0:
<> 144:ef7eb2e8f9f7 838 dma_periph = ldmaPeripheralSignal_USART0_TXBL;
<> 144:ef7eb2e8f9f7 839 break;
<> 144:ef7eb2e8f9f7 840 case USART_1:
<> 144:ef7eb2e8f9f7 841 dma_periph = ldmaPeripheralSignal_USART1_TXBL;
<> 144:ef7eb2e8f9f7 842 break;
<> 144:ef7eb2e8f9f7 843 default:
<> 144:ef7eb2e8f9f7 844 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 845 while(1);
<> 144:ef7eb2e8f9f7 846 break;
<> 144:ef7eb2e8f9f7 847 }
<> 144:ef7eb2e8f9f7 848
<> 144:ef7eb2e8f9f7 849 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 850 target_addr = &obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 851 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 852 target_addr = &obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 853 } else {
<> 144:ef7eb2e8f9f7 854 target_addr = &obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 855 }
<> 144:ef7eb2e8f9f7 856
<> 144:ef7eb2e8f9f7 857 /* Check the transmit length, and split long transfers to smaller ones */
<> 144:ef7eb2e8f9f7 858 int max_length = 1024;
<> 144:ef7eb2e8f9f7 859 #ifdef _LDMA_CH_CTRL_XFERCNT_MASK
<> 144:ef7eb2e8f9f7 860 max_length = (_LDMA_CH_CTRL_XFERCNT_MASK>>_LDMA_CH_CTRL_XFERCNT_SHIFT)+1;
<> 144:ef7eb2e8f9f7 861 #endif
<> 144:ef7eb2e8f9f7 862 if (tx_length > max_length) {
<> 144:ef7eb2e8f9f7 863 tx_length = max_length;
<> 144:ef7eb2e8f9f7 864 }
<> 144:ef7eb2e8f9f7 865
<> 144:ef7eb2e8f9f7 866 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 867 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 868
<> 144:ef7eb2e8f9f7 869 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 870 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_M2P_BYTE((txdata ? txdata : &fill_word), target_addr, tx_length);
<> 144:ef7eb2e8f9f7 871
<> 144:ef7eb2e8f9f7 872 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 873 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 874 }
<> 144:ef7eb2e8f9f7 875
<> 144:ef7eb2e8f9f7 876 if (!txdata) {
<> 144:ef7eb2e8f9f7 877 desc.xfer.srcInc = ldmaCtrlSrcIncNone;
<> 144:ef7eb2e8f9f7 878 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 879 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 880 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 881 } else {
<> 144:ef7eb2e8f9f7 882 desc.xfer.srcInc = ldmaCtrlSrcIncFour;
<> 144:ef7eb2e8f9f7 883 }
<> 144:ef7eb2e8f9f7 884 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 885 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 886 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 887 } else {
<> 144:ef7eb2e8f9f7 888 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 889 }
<> 144:ef7eb2e8f9f7 890 } else {
<> 144:ef7eb2e8f9f7 891 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 892 }
<> 144:ef7eb2e8f9f7 893
<> 144:ef7eb2e8f9f7 894 // Kick off DMA TX
<> 144:ef7eb2e8f9f7 895 LDMAx_StartTransfer(obj->spi.dmaOptionsTX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsTX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 896 }
<> 144:ef7eb2e8f9f7 897
<> 144:ef7eb2e8f9f7 898 #else
<> 144:ef7eb2e8f9f7 899 /******************************************
<> 144:ef7eb2e8f9f7 900 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 901 *
<> 144:ef7eb2e8f9f7 902 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 903 *
<> 144:ef7eb2e8f9f7 904 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 905 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 906 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 907 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 908 ******************************************/
<> 144:ef7eb2e8f9f7 909 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 910 {
<> 144:ef7eb2e8f9f7 911 /* DMA descriptors */
<> 144:ef7eb2e8f9f7 912 DMA_CfgDescr_TypeDef rxDescrCfg;
<> 144:ef7eb2e8f9f7 913 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 914
<> 144:ef7eb2e8f9f7 915 /* Split up transfers if the length is larger than what the DMA supports. */
<> 144:ef7eb2e8f9f7 916 const int DMA_MAX_TRANSFER = (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT);
<> 144:ef7eb2e8f9f7 917
<> 144:ef7eb2e8f9f7 918 if (tx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 919 tx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 920 }
<> 144:ef7eb2e8f9f7 921 if (rx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 922 rx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 923 }
<> 144:ef7eb2e8f9f7 924
<> 144:ef7eb2e8f9f7 925 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 926 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 927 obj->rx_buff.pos += rx_length;
<> 144:ef7eb2e8f9f7 928
<> 144:ef7eb2e8f9f7 929 /* Only activate RX DMA if a receive buffer is specified */
<> 144:ef7eb2e8f9f7 930 if (rxdata != NULL) {
<> 144:ef7eb2e8f9f7 931 // Setting up channel descriptor
<> 144:ef7eb2e8f9f7 932 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 933 rxDescrCfg.dstInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 934 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 935 rxDescrCfg.dstInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 936 } else {
<> 144:ef7eb2e8f9f7 937 rxDescrCfg.dstInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 938 }
<> 144:ef7eb2e8f9f7 939 rxDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 940 rxDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use RXDOUBLE
<> 144:ef7eb2e8f9f7 941 rxDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 942 rxDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 943 DMA_CfgDescr(obj->spi.dmaOptionsRX.dmaChannel, true, &rxDescrCfg);
<> 144:ef7eb2e8f9f7 944
<> 144:ef7eb2e8f9f7 945 void * rx_reg;
<> 144:ef7eb2e8f9f7 946 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 947 rx_reg = (void *)&obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 948 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 949 rx_reg = (void *)&obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 950 } else {
<> 144:ef7eb2e8f9f7 951 rx_reg = (void *)&obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 952 }
<> 144:ef7eb2e8f9f7 953
<> 144:ef7eb2e8f9f7 954 /* Activate RX channel */
<> 144:ef7eb2e8f9f7 955 DMA_ActivateBasic(obj->spi.dmaOptionsRX.dmaChannel,
<> 144:ef7eb2e8f9f7 956 true,
<> 144:ef7eb2e8f9f7 957 false,
<> 144:ef7eb2e8f9f7 958 rxdata,
<> 144:ef7eb2e8f9f7 959 rx_reg,
<> 144:ef7eb2e8f9f7 960 rx_length - 1);
<> 144:ef7eb2e8f9f7 961 }
<> 144:ef7eb2e8f9f7 962
<> 144:ef7eb2e8f9f7 963 // buffer with all FFs.
<> 144:ef7eb2e8f9f7 964 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 965 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 966 if (txdata == 0) {
<> 144:ef7eb2e8f9f7 967 // Don't increment source when there is no transmit buffer
<> 144:ef7eb2e8f9f7 968 txDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 969 } else {
<> 144:ef7eb2e8f9f7 970 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 971 txDescrCfg.srcInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 972 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 973 txDescrCfg.srcInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 974 } else {
<> 144:ef7eb2e8f9f7 975 txDescrCfg.srcInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 976 }
<> 144:ef7eb2e8f9f7 977 }
<> 144:ef7eb2e8f9f7 978 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use TXDOUBLE
<> 144:ef7eb2e8f9f7 979 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 980 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 981 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 982
<> 144:ef7eb2e8f9f7 983 void * tx_reg;
<> 144:ef7eb2e8f9f7 984 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 985 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 986 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 987 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 988 } else {
<> 144:ef7eb2e8f9f7 989 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 990 }
<> 144:ef7eb2e8f9f7 991
<> 144:ef7eb2e8f9f7 992 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 993 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 994 true,
<> 144:ef7eb2e8f9f7 995 false,
<> 144:ef7eb2e8f9f7 996 tx_reg,
<> 144:ef7eb2e8f9f7 997 (txdata == 0 ? &fill_word : (void *)txdata), // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 998 (tx_length - 1));
<> 144:ef7eb2e8f9f7 999 }
<> 144:ef7eb2e8f9f7 1000 #endif //LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1001 /********************************************************************
<> 144:ef7eb2e8f9f7 1002 * spi_master_transfer_dma(spi_t *obj, void *rxdata, void *txdata, int length, DMACallback cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1003 *
<> 144:ef7eb2e8f9f7 1004 * Start an SPI transfer by using DMA and the supplied hint for DMA useage
<> 144:ef7eb2e8f9f7 1005 *
<> 144:ef7eb2e8f9f7 1006 * * obj: pointer to specific SPI instance
<> 144:ef7eb2e8f9f7 1007 * * rxdata: pointer to rx buffer. If null, we will assume only TX is relevant, and RX will be ignored.
<> 144:ef7eb2e8f9f7 1008 * * txdata: pointer to TX buffer. If null, we will assume only the read is relevant, and will send FF's for reading back.
<> 144:ef7eb2e8f9f7 1009 * * length: How many bytes should be written/read.
<> 144:ef7eb2e8f9f7 1010 * * cb: thunk pointer into CPP-land to get the spi object
<> 144:ef7eb2e8f9f7 1011 * * hint: hint for the requested DMA useage.
<> 144:ef7eb2e8f9f7 1012 * * NEVER: do not use DMA, but use IRQ instead
<> 144:ef7eb2e8f9f7 1013 * * OPPORTUNISTIC: use DMA if there are channels available, but return them after the transfer.
<> 144:ef7eb2e8f9f7 1014 * * ALWAYS: use DMA if channels are available, and hold on to the channels after the transfer.
<> 144:ef7eb2e8f9f7 1015 * If the previous transfer has kept the channel, that channel will continue to get used.
<> 144:ef7eb2e8f9f7 1016 *
<> 144:ef7eb2e8f9f7 1017 ********************************************************************/
<> 144:ef7eb2e8f9f7 1018 void spi_master_transfer_dma(spi_t *obj, const void *txdata, void *rxdata, int tx_length, int rx_length, void* cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1019 {
<> 144:ef7eb2e8f9f7 1020 /* Init DMA here to include it in the power figure */
<> 144:ef7eb2e8f9f7 1021 dma_init();
<> 144:ef7eb2e8f9f7 1022 /* Clear TX and RX registers */
<> 144:ef7eb2e8f9f7 1023 obj->spi.spi->CMD = USART_CMD_CLEARTX;
<> 144:ef7eb2e8f9f7 1024 obj->spi.spi->CMD = USART_CMD_CLEARRX;
<> 144:ef7eb2e8f9f7 1025 /* If the DMA channels are already allocated, we can assume they have been setup already */
<> 144:ef7eb2e8f9f7 1026 if (hint != DMA_USAGE_NEVER && obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1027 /* setup has already been done, so just activate the transfer */
<> 144:ef7eb2e8f9f7 1028 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1029 } else if (hint == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 1030 /* use IRQ */
<> 144:ef7eb2e8f9f7 1031 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1032 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1033 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1034 } else {
<> 144:ef7eb2e8f9f7 1035 /* try to acquire channels */
<> 144:ef7eb2e8f9f7 1036 dma_init();
<> 144:ef7eb2e8f9f7 1037 spi_enable_dma(obj, hint);
<> 144:ef7eb2e8f9f7 1038
<> 144:ef7eb2e8f9f7 1039 /* decide between DMA and IRQ */
<> 144:ef7eb2e8f9f7 1040 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1041 /* disable the interrupts that may have been left open previously */
<> 144:ef7eb2e8f9f7 1042 spi_enable_interrupt(obj, (uint32_t)cb, false);
<> 144:ef7eb2e8f9f7 1043
<> 144:ef7eb2e8f9f7 1044 /* DMA channels are allocated, so do their setup */
<> 144:ef7eb2e8f9f7 1045 spi_master_dma_channel_setup(obj, cb);
<> 144:ef7eb2e8f9f7 1046 /* and activate the transfer */
<> 144:ef7eb2e8f9f7 1047 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1048 } else {
<> 144:ef7eb2e8f9f7 1049 /* DMA is unavailable, so fall back to IRQ */
<> 144:ef7eb2e8f9f7 1050 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1051 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1052 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1053 }
<> 144:ef7eb2e8f9f7 1054 }
<> 144:ef7eb2e8f9f7 1055 }
<> 144:ef7eb2e8f9f7 1056
<> 144:ef7eb2e8f9f7 1057 /** Begin the SPI transfer. Buffer pointers and lengths are specified in tx_buff and rx_buff
<> 144:ef7eb2e8f9f7 1058 *
<> 144:ef7eb2e8f9f7 1059 * @param[in] obj The SPI object which holds the transfer information
<> 144:ef7eb2e8f9f7 1060 * @param[in] tx The buffer to send
<> 144:ef7eb2e8f9f7 1061 * @param[in] tx_length The number of words to transmit
<> 144:ef7eb2e8f9f7 1062 * @param[in] rx The buffer to receive
<> 144:ef7eb2e8f9f7 1063 * @param[in] rx_length The number of words to receive
<> 144:ef7eb2e8f9f7 1064 * @param[in] bit_width The bit width of buffer words
<> 144:ef7eb2e8f9f7 1065 * @param[in] event The logical OR of events to be registered
<> 144:ef7eb2e8f9f7 1066 * @param[in] handler SPI interrupt handler
<> 144:ef7eb2e8f9f7 1067 * @param[in] hint A suggestion for how to use DMA with this transfer
<> 144:ef7eb2e8f9f7 1068 */
<> 144:ef7eb2e8f9f7 1069 void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1070 {
<> 144:ef7eb2e8f9f7 1071 if( spi_active(obj) ) return;
<> 144:ef7eb2e8f9f7 1072
<> 144:ef7eb2e8f9f7 1073 /* update fill word if on 9-bit frame size */
<> 144:ef7eb2e8f9f7 1074 if(obj->spi.bits == 9) fill_word = SPI_FILL_WORD & 0x1FF;
<> 144:ef7eb2e8f9f7 1075 else fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1076
<> 144:ef7eb2e8f9f7 1077 /* check corner case */
<> 144:ef7eb2e8f9f7 1078 if(tx_length == 0) {
<> 144:ef7eb2e8f9f7 1079 tx_length = rx_length;
<> 144:ef7eb2e8f9f7 1080 tx = (void*) 0;
<> 144:ef7eb2e8f9f7 1081 }
<> 144:ef7eb2e8f9f7 1082
<> 144:ef7eb2e8f9f7 1083 /* First, set the buffer */
<> 144:ef7eb2e8f9f7 1084 spi_buffer_set(obj, tx, tx_length, rx, rx_length, bit_width);
<> 144:ef7eb2e8f9f7 1085
<> 144:ef7eb2e8f9f7 1086 /* Then, enable the events */
<> 144:ef7eb2e8f9f7 1087 spi_enable_event(obj, SPI_EVENT_ALL, false);
<> 144:ef7eb2e8f9f7 1088 spi_enable_event(obj, event, true);
<> 144:ef7eb2e8f9f7 1089
<> 144:ef7eb2e8f9f7 1090 // Set the sleep mode
<> 144:ef7eb2e8f9f7 1091 blockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1092
<> 144:ef7eb2e8f9f7 1093 /* And kick off the transfer */
<> 144:ef7eb2e8f9f7 1094 spi_master_transfer_dma(obj, tx, rx, tx_length, rx_length, (void*)handler, hint);
<> 144:ef7eb2e8f9f7 1095 }
<> 144:ef7eb2e8f9f7 1096
<> 144:ef7eb2e8f9f7 1097
<> 144:ef7eb2e8f9f7 1098 /********************************************************************
<> 144:ef7eb2e8f9f7 1099 * uint32_t spi_irq_handler_generic(spi_t* obj)
<> 144:ef7eb2e8f9f7 1100 *
<> 144:ef7eb2e8f9f7 1101 * handler which should get called by CPP-land when either a DMA or SPI IRQ gets fired for a SPI transaction.
<> 144:ef7eb2e8f9f7 1102 *
<> 144:ef7eb2e8f9f7 1103 * * obj: pointer to the specific SPI instance
<> 144:ef7eb2e8f9f7 1104 *
<> 144:ef7eb2e8f9f7 1105 * return: event mask. Currently only 0 or SPI_EVENT_COMPLETE upon transfer completion.
<> 144:ef7eb2e8f9f7 1106 *
<> 144:ef7eb2e8f9f7 1107 ********************************************************************/
<> 144:ef7eb2e8f9f7 1108 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1109 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1110 {
<> 144:ef7eb2e8f9f7 1111 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1112 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1113 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1114 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1115 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1116 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1117 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1118
<> 144:ef7eb2e8f9f7 1119 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1120 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1121
<> 144:ef7eb2e8f9f7 1122 return 0;
<> 144:ef7eb2e8f9f7 1123 }
<> 144:ef7eb2e8f9f7 1124 /* If there is an RX transfer ongoing, wait for it to finish */
<> 144:ef7eb2e8f9f7 1125 if (LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1126 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1127 if (LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel) && (obj->tx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1128 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1129 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1130 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1131 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1132 } else return 0;
<> 144:ef7eb2e8f9f7 1133 }
<> 144:ef7eb2e8f9f7 1134 /* If there is still a TX transfer ongoing (tx_length > rx_length), wait for it to finish */
<> 144:ef7eb2e8f9f7 1135 if (!LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1136 return 0;
<> 144:ef7eb2e8f9f7 1137 }
<> 144:ef7eb2e8f9f7 1138 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1139 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1140 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1141 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1142 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1143 }
<> 144:ef7eb2e8f9f7 1144
<> 144:ef7eb2e8f9f7 1145 /* Wait transmit to complete, before user code is indicated*/
<> 144:ef7eb2e8f9f7 1146 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1147 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1148 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1149 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1150 } else {
<> 144:ef7eb2e8f9f7 1151 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1152 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1153 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1154 }
<> 144:ef7eb2e8f9f7 1155
<> 144:ef7eb2e8f9f7 1156 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1157 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1158 }
<> 144:ef7eb2e8f9f7 1159
<> 144:ef7eb2e8f9f7 1160 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1161 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1162 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1163 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1164
<> 144:ef7eb2e8f9f7 1165 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1166 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1167 return event;
<> 144:ef7eb2e8f9f7 1168 }
<> 144:ef7eb2e8f9f7 1169
<> 144:ef7eb2e8f9f7 1170 return 0;
<> 144:ef7eb2e8f9f7 1171 }
<> 144:ef7eb2e8f9f7 1172 }
<> 144:ef7eb2e8f9f7 1173 #else
<> 144:ef7eb2e8f9f7 1174 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1175 {
<> 144:ef7eb2e8f9f7 1176
<> 144:ef7eb2e8f9f7 1177 /* Determine whether the current scenario is DMA or IRQ, and act accordingly */
<> 144:ef7eb2e8f9f7 1178
<> 144:ef7eb2e8f9f7 1179 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1180 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1181
<> 144:ef7eb2e8f9f7 1182 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1183 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1184 /* If there is still a TX transfer ongoing, let it finish
<> 144:ef7eb2e8f9f7 1185 * before (if necessary) kicking off a new transfer */
<> 144:ef7eb2e8f9f7 1186 if (DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1187 return 0;
<> 144:ef7eb2e8f9f7 1188 }
<> 144:ef7eb2e8f9f7 1189 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1190 void * tx_pointer;
<> 144:ef7eb2e8f9f7 1191 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1192 tx_pointer = ((uint32_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1193 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1194 tx_pointer = ((uint16_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1195 } else {
<> 144:ef7eb2e8f9f7 1196 tx_pointer = ((uint8_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1197 }
<> 144:ef7eb2e8f9f7 1198 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1199
<> 144:ef7eb2e8f9f7 1200 /* Refresh RX transfer too if it exists */
<> 144:ef7eb2e8f9f7 1201 void * rx_pointer = NULL;
<> 144:ef7eb2e8f9f7 1202 if (obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 1203 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1204 rx_pointer = ((uint32_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1205 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1206 rx_pointer = ((uint16_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1207 } else {
<> 144:ef7eb2e8f9f7 1208 rx_pointer = ((uint8_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1209 }
<> 144:ef7eb2e8f9f7 1210 }
<> 144:ef7eb2e8f9f7 1211 uint32_t rx_length = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1212
<> 144:ef7eb2e8f9f7 1213 /* Wait for the previous transfer to complete. */
<> 144:ef7eb2e8f9f7 1214 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1215
<> 144:ef7eb2e8f9f7 1216 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1217 spi_activate_dma(obj, rx_pointer, tx_pointer, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1218
<> 144:ef7eb2e8f9f7 1219 return 0;
<> 144:ef7eb2e8f9f7 1220 }
<> 144:ef7eb2e8f9f7 1221
<> 144:ef7eb2e8f9f7 1222 /* If an RX transfer is ongoing, continue processing RX data */
<> 144:ef7eb2e8f9f7 1223 if (DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1224 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1225 if (!DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1226 //Save state of TX transfer amount
<> 144:ef7eb2e8f9f7 1227 int length_diff = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1228 obj->tx_buff.pos = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 1229
<> 144:ef7eb2e8f9f7 1230 //Kick off a new DMA transfer
<> 144:ef7eb2e8f9f7 1231 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 1232
<> 144:ef7eb2e8f9f7 1233 fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1234 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 1235 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1236 txDescrCfg.srcInc = dmaDataIncNone; //Do not increment source pointer when there is no transmit buffer
<> 144:ef7eb2e8f9f7 1237 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size > 9, we can use TXDOUBLE to save bandwidth
<> 144:ef7eb2e8f9f7 1238 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 1239 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 1240 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 1241
<> 144:ef7eb2e8f9f7 1242 void * tx_reg;
<> 144:ef7eb2e8f9f7 1243 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1244 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 1245 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1246 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 1247 } else {
<> 144:ef7eb2e8f9f7 1248 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 1249 }
<> 144:ef7eb2e8f9f7 1250
<> 144:ef7eb2e8f9f7 1251 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 1252 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 1253 true,
<> 144:ef7eb2e8f9f7 1254 false,
<> 144:ef7eb2e8f9f7 1255 tx_reg, //When frame size > 9, point to TXDOUBLE
<> 144:ef7eb2e8f9f7 1256 &fill_word, // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 1257 length_diff - 1);
<> 144:ef7eb2e8f9f7 1258 } else {
<> 144:ef7eb2e8f9f7 1259 /* Nothing to do */
<> 144:ef7eb2e8f9f7 1260 return 0;
<> 144:ef7eb2e8f9f7 1261 }
<> 144:ef7eb2e8f9f7 1262 }
<> 144:ef7eb2e8f9f7 1263
<> 144:ef7eb2e8f9f7 1264 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1265 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1266 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1267 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1268 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1269 }
<> 144:ef7eb2e8f9f7 1270
<> 144:ef7eb2e8f9f7 1271 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1272 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1273 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1274
<> 144:ef7eb2e8f9f7 1275 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1276 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1277 } else {
<> 144:ef7eb2e8f9f7 1278 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1279 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1280 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1281 }
<> 144:ef7eb2e8f9f7 1282
<> 144:ef7eb2e8f9f7 1283 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1284 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1285 }
<> 144:ef7eb2e8f9f7 1286
<> 144:ef7eb2e8f9f7 1287 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1288 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1289 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1290 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1291
<> 144:ef7eb2e8f9f7 1292 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1293 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1294 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1295
<> 144:ef7eb2e8f9f7 1296 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1297 return event;
<> 144:ef7eb2e8f9f7 1298 }
<> 144:ef7eb2e8f9f7 1299
<> 144:ef7eb2e8f9f7 1300 return 0;
<> 144:ef7eb2e8f9f7 1301 }
<> 144:ef7eb2e8f9f7 1302 }
<> 144:ef7eb2e8f9f7 1303 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1304 /** Abort an SPI transfer
<> 144:ef7eb2e8f9f7 1305 *
<> 144:ef7eb2e8f9f7 1306 * @param obj The SPI peripheral to stop
<> 144:ef7eb2e8f9f7 1307 */
<> 144:ef7eb2e8f9f7 1308 void spi_abort_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 1309 {
<> 144:ef7eb2e8f9f7 1310 // If we're not currently transferring, then there's nothing to do here
<> 144:ef7eb2e8f9f7 1311 if(spi_active(obj) != 0) return;
<> 144:ef7eb2e8f9f7 1312
<> 144:ef7eb2e8f9f7 1313 // Determine whether we're running DMA or interrupt
<> 144:ef7eb2e8f9f7 1314 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1315 // Cancel the DMA transfers
<> 144:ef7eb2e8f9f7 1316 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1317 LDMA_StopTransfer(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1318 LDMA_StopTransfer(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1319 #else
<> 144:ef7eb2e8f9f7 1320 DMA_ChannelEnable(obj->spi.dmaOptionsTX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1321 DMA_ChannelEnable(obj->spi.dmaOptionsRX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1322 #endif
<> 144:ef7eb2e8f9f7 1323 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1324 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1325 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1326 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1327 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1328 }
<> 144:ef7eb2e8f9f7 1329
<> 144:ef7eb2e8f9f7 1330 } else {
<> 144:ef7eb2e8f9f7 1331 // Interrupt implementation: switch off interrupts
<> 144:ef7eb2e8f9f7 1332 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1333 }
<> 144:ef7eb2e8f9f7 1334
<> 144:ef7eb2e8f9f7 1335 // Release sleep mode block
<> 144:ef7eb2e8f9f7 1336 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1337 }
<> 144:ef7eb2e8f9f7 1338
<> 144:ef7eb2e8f9f7 1339 #endif