mbed library sources. Supersedes mbed-src.

Dependents:   Nucleo_Hello_Encoder BLE_iBeaconScan AM1805_DEMO DISCO-F429ZI_ExportTemplate1 ... more

Committer:
Kojto
Date:
Thu Aug 03 13:13:39 2017 +0100
Revision:
170:19eb464bc2be
Parent:
167:e84263d55307
Child:
179:b0033dcd6934
This updates the lib to the mbed lib v 148

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 144:ef7eb2e8f9f7 1 /***************************************************************************//**
<> 144:ef7eb2e8f9f7 2 * @file spi_api.c
<> 144:ef7eb2e8f9f7 3 *******************************************************************************
<> 144:ef7eb2e8f9f7 4 * @section License
<> 144:ef7eb2e8f9f7 5 * <b>(C) Copyright 2015 Silicon Labs, http://www.silabs.com</b>
<> 144:ef7eb2e8f9f7 6 *******************************************************************************
<> 144:ef7eb2e8f9f7 7 *
<> 144:ef7eb2e8f9f7 8 * SPDX-License-Identifier: Apache-2.0
<> 144:ef7eb2e8f9f7 9 *
<> 144:ef7eb2e8f9f7 10 * Licensed under the Apache License, Version 2.0 (the "License"); you may
<> 144:ef7eb2e8f9f7 11 * not use this file except in compliance with the License.
<> 144:ef7eb2e8f9f7 12 * You may obtain a copy of the License at
<> 144:ef7eb2e8f9f7 13 *
<> 144:ef7eb2e8f9f7 14 * http://www.apache.org/licenses/LICENSE-2.0
<> 144:ef7eb2e8f9f7 15 *
<> 144:ef7eb2e8f9f7 16 * Unless required by applicable law or agreed to in writing, software
<> 144:ef7eb2e8f9f7 17 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
<> 144:ef7eb2e8f9f7 18 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<> 144:ef7eb2e8f9f7 19 * See the License for the specific language governing permissions and
<> 144:ef7eb2e8f9f7 20 * limitations under the License.
<> 144:ef7eb2e8f9f7 21 *
<> 144:ef7eb2e8f9f7 22 ******************************************************************************/
<> 144:ef7eb2e8f9f7 23
<> 144:ef7eb2e8f9f7 24 #include "device.h"
<> 144:ef7eb2e8f9f7 25 #include "clocking.h"
<> 144:ef7eb2e8f9f7 26 #if DEVICE_SPI
<> 144:ef7eb2e8f9f7 27
<> 144:ef7eb2e8f9f7 28 #include "mbed_assert.h"
<> 144:ef7eb2e8f9f7 29 #include "PeripheralPins.h"
<> 144:ef7eb2e8f9f7 30 #include "pinmap.h"
<> 144:ef7eb2e8f9f7 31 #include "pinmap_function.h"
<> 150:02e0a0aed4ec 32 #include "mbed_error.h"
<> 144:ef7eb2e8f9f7 33
<> 144:ef7eb2e8f9f7 34 #include "dma_api.h"
<> 144:ef7eb2e8f9f7 35 #include "dma_api_HAL.h"
<> 144:ef7eb2e8f9f7 36 #include "serial_api_HAL.h"
<> 144:ef7eb2e8f9f7 37 #include "spi_api.h"
<> 144:ef7eb2e8f9f7 38 #include "em_usart.h"
<> 144:ef7eb2e8f9f7 39 #include "em_cmu.h"
<> 144:ef7eb2e8f9f7 40 #include "em_dma.h"
<> 144:ef7eb2e8f9f7 41 #include "sleep_api.h"
<> 144:ef7eb2e8f9f7 42 #include "sleepmodes.h"
<> 144:ef7eb2e8f9f7 43
<> 144:ef7eb2e8f9f7 44 static uint16_t fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 45
<> 144:ef7eb2e8f9f7 46 #define SPI_LEAST_ACTIVE_SLEEPMODE EM1
<> 144:ef7eb2e8f9f7 47
<> 144:ef7eb2e8f9f7 48 static inline CMU_Clock_TypeDef spi_get_clock_tree(spi_t *obj)
<> 144:ef7eb2e8f9f7 49 {
<> 144:ef7eb2e8f9f7 50 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 51 #ifdef USART0
<> 144:ef7eb2e8f9f7 52 case SPI_0:
<> 144:ef7eb2e8f9f7 53 return cmuClock_USART0;
<> 144:ef7eb2e8f9f7 54 #endif
<> 144:ef7eb2e8f9f7 55 #ifdef USART1
<> 144:ef7eb2e8f9f7 56 case SPI_1:
<> 144:ef7eb2e8f9f7 57 return cmuClock_USART1;
<> 144:ef7eb2e8f9f7 58 #endif
<> 144:ef7eb2e8f9f7 59 #ifdef USART2
<> 144:ef7eb2e8f9f7 60 case SPI_2:
<> 144:ef7eb2e8f9f7 61 return cmuClock_USART2;
<> 144:ef7eb2e8f9f7 62 #endif
<> 144:ef7eb2e8f9f7 63 default:
<> 144:ef7eb2e8f9f7 64 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 65 return cmuClock_HFPER;
<> 144:ef7eb2e8f9f7 66 }
<> 144:ef7eb2e8f9f7 67 }
<> 144:ef7eb2e8f9f7 68
<> 144:ef7eb2e8f9f7 69 static inline uint8_t spi_get_index(spi_t *obj)
<> 144:ef7eb2e8f9f7 70 {
<> 144:ef7eb2e8f9f7 71 uint8_t index = 0;
<> 144:ef7eb2e8f9f7 72 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 73 #ifdef USART0
<> 144:ef7eb2e8f9f7 74 case SPI_0:
<> 144:ef7eb2e8f9f7 75 index = 0;
<> 144:ef7eb2e8f9f7 76 break;
<> 144:ef7eb2e8f9f7 77 #endif
<> 144:ef7eb2e8f9f7 78 #ifdef USART1
<> 144:ef7eb2e8f9f7 79 case SPI_1:
<> 144:ef7eb2e8f9f7 80 index = 1;
<> 144:ef7eb2e8f9f7 81 break;
<> 144:ef7eb2e8f9f7 82 #endif
<> 144:ef7eb2e8f9f7 83 #ifdef USART2
<> 144:ef7eb2e8f9f7 84 case SPI_2:
<> 144:ef7eb2e8f9f7 85 index = 2;
<> 144:ef7eb2e8f9f7 86 break;
<> 144:ef7eb2e8f9f7 87 #endif
<> 144:ef7eb2e8f9f7 88 default:
<> 144:ef7eb2e8f9f7 89 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 90 break;
<> 144:ef7eb2e8f9f7 91 }
<> 144:ef7eb2e8f9f7 92 return index;
<> 144:ef7eb2e8f9f7 93 }
<> 144:ef7eb2e8f9f7 94
<> 144:ef7eb2e8f9f7 95 uint8_t spi_get_module(spi_t *obj)
<> 144:ef7eb2e8f9f7 96 {
<> 144:ef7eb2e8f9f7 97 return spi_get_index(obj);
<> 144:ef7eb2e8f9f7 98 }
<> 144:ef7eb2e8f9f7 99
<> 144:ef7eb2e8f9f7 100 static void usart_init(spi_t *obj, uint32_t baudrate, USART_Databits_TypeDef databits, bool master, USART_ClockMode_TypeDef clockMode )
<> 144:ef7eb2e8f9f7 101 {
<> 144:ef7eb2e8f9f7 102 USART_InitSync_TypeDef init = USART_INITSYNC_DEFAULT;
<> 144:ef7eb2e8f9f7 103 init.enable = usartDisable;
<> 144:ef7eb2e8f9f7 104 init.baudrate = baudrate;
<> 144:ef7eb2e8f9f7 105 init.databits = databits;
<> 144:ef7eb2e8f9f7 106 init.master = master;
<> 144:ef7eb2e8f9f7 107 init.msbf = 1;
<> 144:ef7eb2e8f9f7 108 init.clockMode = clockMode;
<> 144:ef7eb2e8f9f7 109
<> 144:ef7eb2e8f9f7 110 /* Determine the reference clock, because the correct clock may not be set up at init time (e.g. before main()) */
<> 144:ef7eb2e8f9f7 111 init.refFreq = REFERENCE_FREQUENCY;
<> 144:ef7eb2e8f9f7 112
<> 144:ef7eb2e8f9f7 113 USART_InitSync(obj->spi.spi, &init);
<> 144:ef7eb2e8f9f7 114 }
<> 144:ef7eb2e8f9f7 115
<> 144:ef7eb2e8f9f7 116 void spi_preinit(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 117 {
<> 144:ef7eb2e8f9f7 118 SPIName spi_mosi = (SPIName) pinmap_peripheral(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 119 SPIName spi_miso = (SPIName) pinmap_peripheral(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 120 SPIName spi_clk = (SPIName) pinmap_peripheral(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 121 SPIName spi_cs = (SPIName) pinmap_peripheral(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 122 SPIName spi_data = (SPIName) pinmap_merge(spi_mosi, spi_miso);
<> 144:ef7eb2e8f9f7 123 SPIName spi_ctrl = (SPIName) pinmap_merge(spi_clk, spi_cs);
<> 144:ef7eb2e8f9f7 124
<> 144:ef7eb2e8f9f7 125 obj->spi.spi = (USART_TypeDef *) pinmap_merge(spi_data, spi_ctrl);
<> 144:ef7eb2e8f9f7 126 MBED_ASSERT((int) obj->spi.spi != NC);
<> 144:ef7eb2e8f9f7 127
<> 144:ef7eb2e8f9f7 128 if (cs != NC) { /* Slave mode */
<> 144:ef7eb2e8f9f7 129 obj->spi.master = false;
<> 144:ef7eb2e8f9f7 130 } else {
<> 144:ef7eb2e8f9f7 131 obj->spi.master = true;
<> 144:ef7eb2e8f9f7 132 }
<> 144:ef7eb2e8f9f7 133
<> 144:ef7eb2e8f9f7 134 #if defined(_SILICON_LABS_32B_PLATFORM_1)
<> 144:ef7eb2e8f9f7 135 // On P1, we need to ensure all pins are on same location
<> 144:ef7eb2e8f9f7 136 uint32_t loc_mosi = pin_location(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 137 uint32_t loc_miso = pin_location(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 138 uint32_t loc_clk = pin_location(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 139 uint32_t loc_cs = pin_location(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 140 uint32_t loc_data = pinmap_merge(loc_mosi, loc_miso);
<> 144:ef7eb2e8f9f7 141 uint32_t loc_ctrl = pinmap_merge(loc_clk, loc_cs);
<> 144:ef7eb2e8f9f7 142 obj->spi.location = pinmap_merge(loc_data, loc_ctrl);
<> 144:ef7eb2e8f9f7 143 MBED_ASSERT(obj->spi.location != NC);
<> 144:ef7eb2e8f9f7 144 #endif
<> 144:ef7eb2e8f9f7 145
<> 144:ef7eb2e8f9f7 146 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 147 }
<> 144:ef7eb2e8f9f7 148
<> 144:ef7eb2e8f9f7 149 void spi_enable_pins(spi_t *obj, uint8_t enable, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 150 {
<> 144:ef7eb2e8f9f7 151 if (enable) {
<> 144:ef7eb2e8f9f7 152 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 153 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 154 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 155 pin_mode(mosi, PushPull);
<> 144:ef7eb2e8f9f7 156 }
<> 144:ef7eb2e8f9f7 157 if (miso != NC) {
<> 144:ef7eb2e8f9f7 158 pin_mode(miso, Input);
<> 144:ef7eb2e8f9f7 159 }
<> 144:ef7eb2e8f9f7 160 pin_mode(clk, PushPull);
<> 144:ef7eb2e8f9f7 161 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 162 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 163 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 164 pin_mode(mosi, Input);
<> 144:ef7eb2e8f9f7 165 }
<> 144:ef7eb2e8f9f7 166 if (miso != NC) {
<> 144:ef7eb2e8f9f7 167 pin_mode(miso, PushPull);
<> 144:ef7eb2e8f9f7 168 }
<> 144:ef7eb2e8f9f7 169 pin_mode(clk, Input);
<> 144:ef7eb2e8f9f7 170 pin_mode(cs, Input);
<> 144:ef7eb2e8f9f7 171 }
<> 144:ef7eb2e8f9f7 172 } else {
<> 144:ef7eb2e8f9f7 173 // TODO_LP return PinMode to the previous state
<> 144:ef7eb2e8f9f7 174 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 175 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 176 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 177 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 178 }
<> 144:ef7eb2e8f9f7 179 if (miso != NC) {
<> 144:ef7eb2e8f9f7 180 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 181 }
<> 144:ef7eb2e8f9f7 182 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 183 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 184 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 185 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 186 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 187 }
<> 144:ef7eb2e8f9f7 188 if (miso != NC) {
<> 144:ef7eb2e8f9f7 189 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 190 }
<> 144:ef7eb2e8f9f7 191 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 192 pin_mode(cs, Disabled);
<> 144:ef7eb2e8f9f7 193 }
<> 144:ef7eb2e8f9f7 194 }
<> 144:ef7eb2e8f9f7 195
<> 144:ef7eb2e8f9f7 196 /* Enabling pins and setting location */
<> 144:ef7eb2e8f9f7 197 #ifdef _USART_ROUTEPEN_RESETVALUE
<> 144:ef7eb2e8f9f7 198 uint32_t route = USART_ROUTEPEN_CLKPEN;
AnnaBridge 167:e84263d55307 199
<> 144:ef7eb2e8f9f7 200 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CLKLOC_MASK;
<> 144:ef7eb2e8f9f7 201 obj->spi.spi->ROUTELOC0 |= pin_location(clk, PinMap_SPI_CLK)<<_USART_ROUTELOC0_CLKLOC_SHIFT;
<> 144:ef7eb2e8f9f7 202 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 203 route |= USART_ROUTEPEN_TXPEN;
<> 144:ef7eb2e8f9f7 204 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_TXLOC_MASK;
<> 144:ef7eb2e8f9f7 205 obj->spi.spi->ROUTELOC0 |= pin_location(mosi, PinMap_SPI_MOSI)<<_USART_ROUTELOC0_TXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 206 }
<> 144:ef7eb2e8f9f7 207 if (miso != NC) {
<> 144:ef7eb2e8f9f7 208 route |= USART_ROUTEPEN_RXPEN;
<> 144:ef7eb2e8f9f7 209 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_RXLOC_MASK;
AnnaBridge 167:e84263d55307 210 obj->spi.spi->ROUTELOC0 |= pin_location(miso, PinMap_SPI_MISO)<<_USART_ROUTELOC0_RXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 211 }
<> 144:ef7eb2e8f9f7 212 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 213 route |= USART_ROUTEPEN_CSPEN;
<> 144:ef7eb2e8f9f7 214 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CSLOC_MASK;
AnnaBridge 167:e84263d55307 215 obj->spi.spi->ROUTELOC0 |= pin_location(cs, PinMap_SPI_CS)<<_USART_ROUTELOC0_CSLOC_SHIFT;
<> 144:ef7eb2e8f9f7 216 }
Anna Bridge 163:74e0ce7f98e8 217 obj->spi.location = obj->spi.spi->ROUTELOC0;
Anna Bridge 163:74e0ce7f98e8 218 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 219 obj->spi.spi->ROUTEPEN = route;
<> 144:ef7eb2e8f9f7 220 }
<> 144:ef7eb2e8f9f7 221 #else
Anna Bridge 163:74e0ce7f98e8 222 uint32_t route = USART_ROUTE_CLKPEN;
<> 144:ef7eb2e8f9f7 223
<> 144:ef7eb2e8f9f7 224 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 225 route |= USART_ROUTE_TXPEN;
<> 144:ef7eb2e8f9f7 226 }
<> 144:ef7eb2e8f9f7 227 if (miso != NC) {
<> 144:ef7eb2e8f9f7 228 route |= USART_ROUTE_RXPEN;
<> 144:ef7eb2e8f9f7 229 }
<> 144:ef7eb2e8f9f7 230 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 231 route |= USART_ROUTE_CSPEN;
<> 144:ef7eb2e8f9f7 232 }
Anna Bridge 163:74e0ce7f98e8 233 route |= obj->spi.location << _USART_ROUTE_LOCATION_SHIFT;
<> 144:ef7eb2e8f9f7 234 obj->spi.spi->ROUTE = route;
Anna Bridge 163:74e0ce7f98e8 235 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 236 }
<> 144:ef7eb2e8f9f7 237 #endif
<> 144:ef7eb2e8f9f7 238 void spi_enable(spi_t *obj, uint8_t enable)
<> 144:ef7eb2e8f9f7 239 {
<> 144:ef7eb2e8f9f7 240 USART_Enable(obj->spi.spi, (enable ? usartEnable : usartDisable));
<> 144:ef7eb2e8f9f7 241 }
<> 144:ef7eb2e8f9f7 242
<> 144:ef7eb2e8f9f7 243 void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 244 {
<> 144:ef7eb2e8f9f7 245 CMU_ClockEnable(cmuClock_HFPER, true);
<> 144:ef7eb2e8f9f7 246 spi_preinit(obj, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 247 CMU_ClockEnable(spi_get_clock_tree(obj), true);
<> 144:ef7eb2e8f9f7 248 usart_init(obj, 100000, usartDatabits8, true, usartClockMode0);
<> 144:ef7eb2e8f9f7 249
<> 144:ef7eb2e8f9f7 250 spi_enable_pins(obj, true, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 251 spi_enable(obj, true);
<> 144:ef7eb2e8f9f7 252 }
<> 144:ef7eb2e8f9f7 253
<> 144:ef7eb2e8f9f7 254 void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
<> 144:ef7eb2e8f9f7 255 {
<> 144:ef7eb2e8f9f7 256 if(enable) obj->spi.event |= event;
<> 144:ef7eb2e8f9f7 257 else obj->spi.event &= ~event;
<> 144:ef7eb2e8f9f7 258 }
<> 144:ef7eb2e8f9f7 259
<> 144:ef7eb2e8f9f7 260 /****************************************************************************
<> 144:ef7eb2e8f9f7 261 * void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 262 *
<> 144:ef7eb2e8f9f7 263 * This will enable the interrupt in NVIC for the associated USART RX channel
<> 144:ef7eb2e8f9f7 264 *
<> 144:ef7eb2e8f9f7 265 * * obj: pointer to spi object
<> 144:ef7eb2e8f9f7 266 * * handler: pointer to interrupt handler for this channel
<> 144:ef7eb2e8f9f7 267 * * enable: Whether to enable (true) or disable (false) the interrupt
<> 144:ef7eb2e8f9f7 268 *
<> 144:ef7eb2e8f9f7 269 ****************************************************************************/
<> 144:ef7eb2e8f9f7 270 void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 271 {
<> 144:ef7eb2e8f9f7 272 IRQn_Type IRQvector;
<> 144:ef7eb2e8f9f7 273
<> 144:ef7eb2e8f9f7 274 switch ((uint32_t)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 275 #ifdef USART0
<> 144:ef7eb2e8f9f7 276 case USART_0:
<> 144:ef7eb2e8f9f7 277 IRQvector = USART0_RX_IRQn;
<> 144:ef7eb2e8f9f7 278 break;
<> 144:ef7eb2e8f9f7 279 #endif
<> 144:ef7eb2e8f9f7 280 #ifdef USART1
<> 144:ef7eb2e8f9f7 281 case USART_1:
<> 144:ef7eb2e8f9f7 282 IRQvector = USART1_RX_IRQn;
<> 144:ef7eb2e8f9f7 283 break;
<> 144:ef7eb2e8f9f7 284 #endif
<> 144:ef7eb2e8f9f7 285 #ifdef USART2
<> 144:ef7eb2e8f9f7 286 case USART_2:
<> 144:ef7eb2e8f9f7 287 IRQvector = USART2_RX_IRQn;
<> 144:ef7eb2e8f9f7 288 break;
<> 144:ef7eb2e8f9f7 289 #endif
<> 144:ef7eb2e8f9f7 290 default:
<> 144:ef7eb2e8f9f7 291 error("Undefined SPI peripheral");
<> 144:ef7eb2e8f9f7 292 return;
<> 144:ef7eb2e8f9f7 293 }
<> 144:ef7eb2e8f9f7 294
<> 144:ef7eb2e8f9f7 295 if (enable == true) {
<> 144:ef7eb2e8f9f7 296 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 297 USART_IntEnable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 298 NVIC_EnableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 299 } else {
<> 144:ef7eb2e8f9f7 300 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 301 USART_IntDisable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 302 NVIC_DisableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 303 }
<> 144:ef7eb2e8f9f7 304 }
<> 144:ef7eb2e8f9f7 305
<> 144:ef7eb2e8f9f7 306 void spi_format(spi_t *obj, int bits, int mode, int slave)
<> 144:ef7eb2e8f9f7 307 {
<> 144:ef7eb2e8f9f7 308 /* Bits: values between 4 and 16 are valid */
<> 144:ef7eb2e8f9f7 309 MBED_ASSERT(bits >= 4 && bits <= 16);
<> 144:ef7eb2e8f9f7 310 obj->spi.bits = bits;
<> 144:ef7eb2e8f9f7 311 /* 0x01 = usartDatabits4, etc, up to 0x0D = usartDatabits16 */
<> 144:ef7eb2e8f9f7 312 USART_Databits_TypeDef databits = (USART_Databits_TypeDef) (bits - 3);
<> 144:ef7eb2e8f9f7 313
<> 144:ef7eb2e8f9f7 314 USART_ClockMode_TypeDef clockMode;
<> 144:ef7eb2e8f9f7 315 MBED_ASSERT(mode >= 0 && mode <= 3);
<> 144:ef7eb2e8f9f7 316 switch (mode) {
<> 144:ef7eb2e8f9f7 317 case 0:
<> 144:ef7eb2e8f9f7 318 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 319 break;
<> 144:ef7eb2e8f9f7 320 case 1:
<> 144:ef7eb2e8f9f7 321 clockMode = usartClockMode1;
<> 144:ef7eb2e8f9f7 322 break;
<> 144:ef7eb2e8f9f7 323 case 2:
<> 144:ef7eb2e8f9f7 324 clockMode = usartClockMode2;
<> 144:ef7eb2e8f9f7 325 break;
<> 144:ef7eb2e8f9f7 326 case 3:
<> 144:ef7eb2e8f9f7 327 clockMode = usartClockMode3;
<> 144:ef7eb2e8f9f7 328 break;
<> 144:ef7eb2e8f9f7 329 default:
<> 144:ef7eb2e8f9f7 330 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 331 }
<> 144:ef7eb2e8f9f7 332 uint32_t iflags = obj->spi.spi->IEN;
<> 144:ef7eb2e8f9f7 333 bool enabled = (obj->spi.spi->STATUS & (USART_STATUS_RXENS | USART_STATUS_TXENS)) != 0;
<> 144:ef7eb2e8f9f7 334
<> 144:ef7eb2e8f9f7 335 usart_init(obj, 100000, databits, (slave ? false : true), clockMode);
<> 144:ef7eb2e8f9f7 336
<> 144:ef7eb2e8f9f7 337 //restore state
<> 144:ef7eb2e8f9f7 338 #ifdef _USART_ROUTEPEN_RESETVALUE
Anna Bridge 163:74e0ce7f98e8 339 obj->spi.spi->ROUTEPEN = obj->spi.route;
Anna Bridge 163:74e0ce7f98e8 340 obj->spi.spi->ROUTELOC0 = obj->spi.location;
<> 144:ef7eb2e8f9f7 341 #else
Anna Bridge 163:74e0ce7f98e8 342 obj->spi.spi->ROUTE = obj->spi.route;
<> 144:ef7eb2e8f9f7 343 #endif
<> 144:ef7eb2e8f9f7 344 obj->spi.spi->IEN = iflags;
<> 144:ef7eb2e8f9f7 345
<> 144:ef7eb2e8f9f7 346 if(enabled) spi_enable(obj, enabled);
<> 144:ef7eb2e8f9f7 347 }
<> 144:ef7eb2e8f9f7 348
<> 144:ef7eb2e8f9f7 349 void spi_frequency(spi_t *obj, int hz)
<> 144:ef7eb2e8f9f7 350 {
<> 144:ef7eb2e8f9f7 351 USART_BaudrateSyncSet(obj->spi.spi, REFERENCE_FREQUENCY, hz);
<> 144:ef7eb2e8f9f7 352 }
<> 144:ef7eb2e8f9f7 353
<> 144:ef7eb2e8f9f7 354 /* Read/Write */
<> 144:ef7eb2e8f9f7 355
<> 144:ef7eb2e8f9f7 356 void spi_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 357 {
<> 144:ef7eb2e8f9f7 358 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 359 USART_Tx(obj->spi.spi, (uint8_t) value);
<> 144:ef7eb2e8f9f7 360 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 361 USART_TxExt(obj->spi.spi, (uint16_t) value & 0x1FF);
<> 144:ef7eb2e8f9f7 362 } else {
<> 144:ef7eb2e8f9f7 363 USART_TxDouble(obj->spi.spi, (uint16_t) value);
<> 144:ef7eb2e8f9f7 364 }
<> 144:ef7eb2e8f9f7 365 }
<> 144:ef7eb2e8f9f7 366
<> 144:ef7eb2e8f9f7 367 int spi_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 368 {
<> 144:ef7eb2e8f9f7 369 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 370 return (int) obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 371 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 372 return (int) obj->spi.spi->RXDATAX & 0x1FF;
<> 144:ef7eb2e8f9f7 373 } else {
<> 144:ef7eb2e8f9f7 374 return (int) obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 375 }
<> 144:ef7eb2e8f9f7 376 }
<> 144:ef7eb2e8f9f7 377
<> 144:ef7eb2e8f9f7 378 int spi_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 379 {
<> 144:ef7eb2e8f9f7 380 return spi_read(obj);
<> 144:ef7eb2e8f9f7 381 }
<> 144:ef7eb2e8f9f7 382
<> 144:ef7eb2e8f9f7 383 int spi_master_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 384 {
<> 144:ef7eb2e8f9f7 385 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 386
<> 144:ef7eb2e8f9f7 387 /* Wait for transmission of last byte */
<> 144:ef7eb2e8f9f7 388 while (!(obj->spi.spi->STATUS & USART_STATUS_TXC)) {
<> 144:ef7eb2e8f9f7 389 }
<> 144:ef7eb2e8f9f7 390
<> 144:ef7eb2e8f9f7 391 return spi_read(obj);
<> 144:ef7eb2e8f9f7 392 }
<> 144:ef7eb2e8f9f7 393
Kojto 170:19eb464bc2be 394 int spi_master_block_write(spi_t *obj, const char *tx_buffer, int tx_length,
Kojto 170:19eb464bc2be 395 char *rx_buffer, int rx_length, char write_fill) {
AnnaBridge 167:e84263d55307 396 int total = (tx_length > rx_length) ? tx_length : rx_length;
AnnaBridge 167:e84263d55307 397
AnnaBridge 167:e84263d55307 398 for (int i = 0; i < total; i++) {
Kojto 170:19eb464bc2be 399 char out = (i < tx_length) ? tx_buffer[i] : write_fill;
AnnaBridge 167:e84263d55307 400 char in = spi_master_write(obj, out);
AnnaBridge 167:e84263d55307 401 if (i < rx_length) {
AnnaBridge 167:e84263d55307 402 rx_buffer[i] = in;
AnnaBridge 167:e84263d55307 403 }
AnnaBridge 167:e84263d55307 404 }
AnnaBridge 167:e84263d55307 405
AnnaBridge 167:e84263d55307 406 return total;
AnnaBridge 167:e84263d55307 407 }
AnnaBridge 167:e84263d55307 408
<> 144:ef7eb2e8f9f7 409 inline uint8_t spi_master_tx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 410 {
<> 144:ef7eb2e8f9f7 411 return (obj->spi.spi->STATUS & USART_STATUS_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 412 }
<> 144:ef7eb2e8f9f7 413
<> 144:ef7eb2e8f9f7 414 uint8_t spi_master_rx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 415 {
<> 144:ef7eb2e8f9f7 416 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? true : false;
<> 144:ef7eb2e8f9f7 417 }
<> 144:ef7eb2e8f9f7 418
<> 144:ef7eb2e8f9f7 419 uint8_t spi_master_tx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 420 {
<> 144:ef7eb2e8f9f7 421 return (obj->spi.spi->IF & USART_IF_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 422 }
<> 144:ef7eb2e8f9f7 423
<> 144:ef7eb2e8f9f7 424 uint8_t spi_master_rx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 425 {
<> 144:ef7eb2e8f9f7 426 return (obj->spi.spi->IF & (USART_IF_RXDATAV | USART_IF_RXFULL)) ? true : false;
<> 144:ef7eb2e8f9f7 427 }
<> 144:ef7eb2e8f9f7 428
<> 144:ef7eb2e8f9f7 429 void spi_master_read_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 430 {
<> 144:ef7eb2e8f9f7 431 obj->spi.spi->IFC = USART_IFC_RXFULL; // in case it got full
<> 144:ef7eb2e8f9f7 432 }
<> 144:ef7eb2e8f9f7 433
<> 144:ef7eb2e8f9f7 434 void spi_master_write_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 435 {
<> 144:ef7eb2e8f9f7 436 obj->spi.spi->IFC = USART_IFC_TXC;
<> 144:ef7eb2e8f9f7 437 }
<> 144:ef7eb2e8f9f7 438
<> 144:ef7eb2e8f9f7 439 void spi_irq_handler(spi_t *obj)
<> 144:ef7eb2e8f9f7 440 {
<> 144:ef7eb2e8f9f7 441 spi_read(obj); //TODO_LP store data to the object?
<> 144:ef7eb2e8f9f7 442 }
<> 144:ef7eb2e8f9f7 443
<> 144:ef7eb2e8f9f7 444 uint8_t spi_active(spi_t *obj)
<> 144:ef7eb2e8f9f7 445 {
<> 144:ef7eb2e8f9f7 446 switch(obj->spi.dmaOptionsTX.dmaUsageState) {
<> 144:ef7eb2e8f9f7 447 case DMA_USAGE_TEMPORARY_ALLOCATED:
<> 144:ef7eb2e8f9f7 448 return true;
<> 144:ef7eb2e8f9f7 449 case DMA_USAGE_ALLOCATED:
<> 144:ef7eb2e8f9f7 450 /* Check whether the allocated DMA channel is active */
<> 144:ef7eb2e8f9f7 451 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 452 return(LDMAx_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 453 #else
<> 144:ef7eb2e8f9f7 454 return(DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 455 #endif
<> 144:ef7eb2e8f9f7 456 default:
<> 144:ef7eb2e8f9f7 457 /* Check whether interrupt for spi is enabled */
<> 144:ef7eb2e8f9f7 458 return (obj->spi.spi->IEN & (USART_IEN_RXDATAV | USART_IEN_TXBL)) ? true : false;
<> 144:ef7eb2e8f9f7 459 }
<> 144:ef7eb2e8f9f7 460 }
<> 144:ef7eb2e8f9f7 461
<> 144:ef7eb2e8f9f7 462 void spi_buffer_set(spi_t *obj, const void *tx, uint32_t tx_length, void *rx, uint32_t rx_length, uint8_t bit_width)
<> 144:ef7eb2e8f9f7 463 {
<> 144:ef7eb2e8f9f7 464 uint32_t i;
<> 144:ef7eb2e8f9f7 465 uint16_t *tx_ptr = (uint16_t *) tx;
<> 144:ef7eb2e8f9f7 466
<> 144:ef7eb2e8f9f7 467 obj->tx_buff.buffer = (void *)tx;
<> 144:ef7eb2e8f9f7 468 obj->rx_buff.buffer = rx;
<> 144:ef7eb2e8f9f7 469 obj->tx_buff.length = tx_length;
<> 144:ef7eb2e8f9f7 470 obj->rx_buff.length = rx_length;
<> 144:ef7eb2e8f9f7 471 obj->tx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 472 obj->rx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 473 obj->tx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 474 obj->rx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 475
<> 144:ef7eb2e8f9f7 476 if((obj->spi.bits == 9) && (tx != 0)) {
<> 144:ef7eb2e8f9f7 477 // Make sure we don't have inadvertent non-zero bits outside 9-bit frames which could trigger unwanted operation
<> 144:ef7eb2e8f9f7 478 for(i = 0; i < (tx_length / 2); i++) {
<> 144:ef7eb2e8f9f7 479 tx_ptr[i] &= 0x1FF;
<> 144:ef7eb2e8f9f7 480 }
<> 144:ef7eb2e8f9f7 481 }
<> 144:ef7eb2e8f9f7 482 }
<> 144:ef7eb2e8f9f7 483
<> 144:ef7eb2e8f9f7 484 static void spi_buffer_tx_write(spi_t *obj)
<> 144:ef7eb2e8f9f7 485 {
<> 144:ef7eb2e8f9f7 486 uint32_t data = 0;
<> 144:ef7eb2e8f9f7 487
<> 144:ef7eb2e8f9f7 488 // Interpret buffer according to declared width
<> 144:ef7eb2e8f9f7 489 if (!obj->tx_buff.buffer) {
<> 144:ef7eb2e8f9f7 490 data = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 491 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 492 uint32_t * tx = (uint32_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 493 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 494 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 495 uint16_t * tx = (uint16_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 496 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 497 } else {
<> 144:ef7eb2e8f9f7 498 uint8_t * tx = (uint8_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 499 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 500 }
<> 144:ef7eb2e8f9f7 501 obj->tx_buff.pos++;
<> 144:ef7eb2e8f9f7 502
<> 144:ef7eb2e8f9f7 503 // Send buffer
<> 144:ef7eb2e8f9f7 504 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 505 obj->spi.spi->TXDOUBLE = data;
<> 144:ef7eb2e8f9f7 506 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 507 obj->spi.spi->TXDATAX = data;
<> 144:ef7eb2e8f9f7 508 } else {
<> 144:ef7eb2e8f9f7 509 obj->spi.spi->TXDATA = data;
<> 144:ef7eb2e8f9f7 510 }
<> 144:ef7eb2e8f9f7 511 }
<> 144:ef7eb2e8f9f7 512
<> 144:ef7eb2e8f9f7 513 static void spi_buffer_rx_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 514 {
<> 144:ef7eb2e8f9f7 515 uint32_t data;
<> 144:ef7eb2e8f9f7 516
<> 144:ef7eb2e8f9f7 517 if (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) {
<> 144:ef7eb2e8f9f7 518 // Read from the FIFO
<> 144:ef7eb2e8f9f7 519 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 520 data = obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 521 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 522 data = obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 523 } else {
<> 144:ef7eb2e8f9f7 524 data = obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 525 }
<> 144:ef7eb2e8f9f7 526
<> 144:ef7eb2e8f9f7 527 // If there is room in the buffer, store the data
<> 144:ef7eb2e8f9f7 528 if (obj->rx_buff.buffer && obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 529 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 530 uint32_t * rx = (uint32_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 531 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 532 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 533 uint16_t * rx = (uint16_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 534 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 535 } else {
<> 144:ef7eb2e8f9f7 536 uint8_t * rx = (uint8_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 537 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 538 }
<> 144:ef7eb2e8f9f7 539 obj->rx_buff.pos++;
<> 144:ef7eb2e8f9f7 540 }
<> 144:ef7eb2e8f9f7 541 }
<> 144:ef7eb2e8f9f7 542 }
<> 144:ef7eb2e8f9f7 543
<> 144:ef7eb2e8f9f7 544 int spi_master_write_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 545 {
<> 144:ef7eb2e8f9f7 546 int ndata = 0;
<> 144:ef7eb2e8f9f7 547 while ((obj->tx_buff.pos < obj->tx_buff.length) && (obj->spi.spi->STATUS & USART_STATUS_TXBL)) {
<> 144:ef7eb2e8f9f7 548 spi_buffer_tx_write(obj);
<> 144:ef7eb2e8f9f7 549 ndata++;
<> 144:ef7eb2e8f9f7 550 }
<> 144:ef7eb2e8f9f7 551 return ndata;
<> 144:ef7eb2e8f9f7 552 }
<> 144:ef7eb2e8f9f7 553
<> 144:ef7eb2e8f9f7 554 int spi_master_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 555 {
<> 144:ef7eb2e8f9f7 556 int ndata = 0;
<> 144:ef7eb2e8f9f7 557 while ((obj->rx_buff.pos < obj->rx_buff.length) && (obj->spi.spi->STATUS & (USART_STATUS_RXDATAV | USART_STATUS_RXFULL))) {
<> 144:ef7eb2e8f9f7 558 spi_buffer_rx_read(obj);
<> 144:ef7eb2e8f9f7 559 ndata++;
<> 144:ef7eb2e8f9f7 560 }
<> 144:ef7eb2e8f9f7 561 // all sent but still more to receive? need to align tx buffer
<> 144:ef7eb2e8f9f7 562 if ((obj->tx_buff.pos >= obj->tx_buff.length) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 563 obj->tx_buff.buffer = (void *)0;
<> 144:ef7eb2e8f9f7 564 obj->tx_buff.length = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 565 }
<> 144:ef7eb2e8f9f7 566
<> 144:ef7eb2e8f9f7 567 return ndata;
<> 144:ef7eb2e8f9f7 568 }
<> 144:ef7eb2e8f9f7 569
<> 144:ef7eb2e8f9f7 570 uint8_t spi_buffer_rx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 571 {
<> 144:ef7eb2e8f9f7 572 return (obj->rx_buff.pos >= obj->rx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 573 }
<> 144:ef7eb2e8f9f7 574
<> 144:ef7eb2e8f9f7 575 uint8_t spi_buffer_tx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 576 {
<> 144:ef7eb2e8f9f7 577 return (obj->tx_buff.pos >= obj->tx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 578 }
<> 144:ef7eb2e8f9f7 579
<> 144:ef7eb2e8f9f7 580 //TODO_LP implement slave
<> 144:ef7eb2e8f9f7 581
<> 144:ef7eb2e8f9f7 582 int spi_slave_receive(spi_t *obj)
<> 144:ef7eb2e8f9f7 583 {
<> 144:ef7eb2e8f9f7 584 if (obj->spi.bits <= 9) {
<> 144:ef7eb2e8f9f7 585 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? 1 : 0;
<> 144:ef7eb2e8f9f7 586 } else {
<> 144:ef7eb2e8f9f7 587 return (obj->spi.spi->STATUS & USART_STATUS_RXFULL) ? 1 : 0;
<> 144:ef7eb2e8f9f7 588 }
<> 144:ef7eb2e8f9f7 589 }
<> 144:ef7eb2e8f9f7 590
<> 144:ef7eb2e8f9f7 591 int spi_slave_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 592 {
<> 144:ef7eb2e8f9f7 593 return spi_read(obj);
<> 144:ef7eb2e8f9f7 594 }
<> 144:ef7eb2e8f9f7 595
<> 144:ef7eb2e8f9f7 596 void spi_slave_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 597 {
<> 144:ef7eb2e8f9f7 598 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 599 }
<> 144:ef7eb2e8f9f7 600
<> 144:ef7eb2e8f9f7 601 uint32_t spi_event_check(spi_t *obj)
<> 144:ef7eb2e8f9f7 602 {
<> 144:ef7eb2e8f9f7 603 uint32_t requestedEvent = obj->spi.event;
<> 144:ef7eb2e8f9f7 604 uint32_t event = 0;
<> 144:ef7eb2e8f9f7 605 uint8_t quit = spi_buffer_rx_empty(obj) & spi_buffer_tx_empty(obj);
<> 144:ef7eb2e8f9f7 606 if (((requestedEvent & SPI_EVENT_COMPLETE) != 0) && (quit == true)) {
<> 144:ef7eb2e8f9f7 607 event |= SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 608 }
<> 144:ef7eb2e8f9f7 609
<> 144:ef7eb2e8f9f7 610 if(quit == true) {
<> 144:ef7eb2e8f9f7 611 event |= SPI_EVENT_INTERNAL_TRANSFER_COMPLETE;
<> 144:ef7eb2e8f9f7 612 }
<> 144:ef7eb2e8f9f7 613
<> 144:ef7eb2e8f9f7 614 return event;
<> 144:ef7eb2e8f9f7 615 }
<> 144:ef7eb2e8f9f7 616 /******************************************
<> 144:ef7eb2e8f9f7 617 * void transferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 618 *
<> 144:ef7eb2e8f9f7 619 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 620 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 621 ******************************************/
<> 144:ef7eb2e8f9f7 622 void transferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 623 {
<> 144:ef7eb2e8f9f7 624 (void) channel;
<> 144:ef7eb2e8f9f7 625 (void) primary;
<> 144:ef7eb2e8f9f7 626
<> 144:ef7eb2e8f9f7 627 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 628 if (user != NULL) {
<> 144:ef7eb2e8f9f7 629 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 630 }
<> 144:ef7eb2e8f9f7 631 }
<> 144:ef7eb2e8f9f7 632
<> 144:ef7eb2e8f9f7 633 /******************************************
<> 144:ef7eb2e8f9f7 634 * bool spi_allocate_dma(spi_t *obj);
<> 144:ef7eb2e8f9f7 635 * (helper function for spi_enable_dma)
<> 144:ef7eb2e8f9f7 636 *
<> 144:ef7eb2e8f9f7 637 * This function will request two DMA channels from the DMA API if needed
<> 144:ef7eb2e8f9f7 638 * by the hint provided. They will be allocated to the SPI object pointed to.
<> 144:ef7eb2e8f9f7 639 *
<> 144:ef7eb2e8f9f7 640 * return value: whether the channels were acquired successfully (true) or not.
<> 144:ef7eb2e8f9f7 641 ******************************************/
<> 144:ef7eb2e8f9f7 642 bool spi_allocate_dma(spi_t *obj)
<> 144:ef7eb2e8f9f7 643 {
<> 144:ef7eb2e8f9f7 644 int dmaChannelIn, dmaChannelOut;
<> 144:ef7eb2e8f9f7 645 dmaChannelIn = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 646 if (dmaChannelIn == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 647 return false;
<> 144:ef7eb2e8f9f7 648 }
<> 144:ef7eb2e8f9f7 649 dmaChannelOut = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 650 if (dmaChannelOut == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 651 dma_channel_free(dmaChannelIn);
<> 144:ef7eb2e8f9f7 652 return false;
<> 144:ef7eb2e8f9f7 653 }
<> 144:ef7eb2e8f9f7 654
<> 144:ef7eb2e8f9f7 655 obj->spi.dmaOptionsTX.dmaChannel = dmaChannelOut;
<> 144:ef7eb2e8f9f7 656 obj->spi.dmaOptionsRX.dmaChannel = dmaChannelIn;
<> 144:ef7eb2e8f9f7 657 return true;
<> 144:ef7eb2e8f9f7 658 }
<> 144:ef7eb2e8f9f7 659
<> 144:ef7eb2e8f9f7 660 /******************************************
<> 144:ef7eb2e8f9f7 661 * void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 662 *
<> 144:ef7eb2e8f9f7 663 * This function tries to allocate DMA as indicated by the hint (state).
<> 144:ef7eb2e8f9f7 664 * There are three possibilities:
<> 144:ef7eb2e8f9f7 665 * * state = NEVER:
<> 144:ef7eb2e8f9f7 666 * if there were channels allocated by state = ALWAYS, they will be released
<> 144:ef7eb2e8f9f7 667 * * state = OPPORTUNITIC:
<> 144:ef7eb2e8f9f7 668 * if there are channels available, they will get used, but freed upon transfer completion
<> 144:ef7eb2e8f9f7 669 * * state = ALWAYS
<> 144:ef7eb2e8f9f7 670 * if there are channels available, they will get allocated and not be freed until state changes
<> 144:ef7eb2e8f9f7 671 ******************************************/
<> 144:ef7eb2e8f9f7 672 void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 673 {
<> 144:ef7eb2e8f9f7 674 if (state == DMA_USAGE_ALWAYS && obj->spi.dmaOptionsTX.dmaUsageState != DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 675 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 676 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 677 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_ALLOCATED;
<> 144:ef7eb2e8f9f7 678 } else {
<> 144:ef7eb2e8f9f7 679 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 680 }
<> 144:ef7eb2e8f9f7 681 } else if (state == DMA_USAGE_OPPORTUNISTIC) {
<> 144:ef7eb2e8f9f7 682 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 683 /* Channels have already been allocated previously by an ALWAYS state, so after this transfer, we will release them */
<> 144:ef7eb2e8f9f7 684 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 685 } else {
<> 144:ef7eb2e8f9f7 686 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 687 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 688 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 689 } else {
<> 144:ef7eb2e8f9f7 690 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 691 }
<> 144:ef7eb2e8f9f7 692 }
<> 144:ef7eb2e8f9f7 693 } else if (state == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 694 /* If channels are allocated, get rid of them */
<> 144:ef7eb2e8f9f7 695 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 696 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 697 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 698 }
<> 144:ef7eb2e8f9f7 699 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_NEVER;
<> 144:ef7eb2e8f9f7 700 }
<> 144:ef7eb2e8f9f7 701 }
<> 144:ef7eb2e8f9f7 702
<> 144:ef7eb2e8f9f7 703 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 704 /************************************************************************************
<> 144:ef7eb2e8f9f7 705 * DMA helper functions *
<> 144:ef7eb2e8f9f7 706 ************************************************************************************/
<> 144:ef7eb2e8f9f7 707 /******************************************
<> 144:ef7eb2e8f9f7 708 * static void serial_dmaTransferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 709 *
<> 144:ef7eb2e8f9f7 710 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 711 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 712 ******************************************/
<> 144:ef7eb2e8f9f7 713 static void serial_dmaTransferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 714 {
<> 144:ef7eb2e8f9f7 715
<> 144:ef7eb2e8f9f7 716 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 717 if (user != NULL) {
<> 144:ef7eb2e8f9f7 718 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 719 }
<> 144:ef7eb2e8f9f7 720 }
<> 144:ef7eb2e8f9f7 721 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 722 {
<> 144:ef7eb2e8f9f7 723 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 724 }
<> 144:ef7eb2e8f9f7 725 #else
<> 144:ef7eb2e8f9f7 726 /******************************************
<> 144:ef7eb2e8f9f7 727 * void spi_master_dma_channel_setup(spi_t *obj)
<> 144:ef7eb2e8f9f7 728 *
<> 144:ef7eb2e8f9f7 729 * This function will setup the DMA configuration for SPI transfers
<> 144:ef7eb2e8f9f7 730 *
<> 144:ef7eb2e8f9f7 731 * The channel numbers are fetched from the SPI instance, so this function
<> 144:ef7eb2e8f9f7 732 * should only be called when those channels have actually been allocated.
<> 144:ef7eb2e8f9f7 733 ******************************************/
<> 144:ef7eb2e8f9f7 734 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 735 {
<> 144:ef7eb2e8f9f7 736 DMA_CfgChannel_TypeDef rxChnlCfg;
<> 144:ef7eb2e8f9f7 737 DMA_CfgChannel_TypeDef txChnlCfg;
<> 144:ef7eb2e8f9f7 738
<> 144:ef7eb2e8f9f7 739 /* Setting up channel for rx. */
<> 144:ef7eb2e8f9f7 740 obj->spi.dmaOptionsRX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 741 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 742
<> 144:ef7eb2e8f9f7 743 rxChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 744 rxChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 745 rxChnlCfg.cb = &(obj->spi.dmaOptionsRX.dmaCallback);
<> 144:ef7eb2e8f9f7 746
<> 144:ef7eb2e8f9f7 747 /* Setting up channel for tx. */
<> 144:ef7eb2e8f9f7 748 obj->spi.dmaOptionsTX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 749 obj->spi.dmaOptionsTX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 750
<> 144:ef7eb2e8f9f7 751 txChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 752 txChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 753 txChnlCfg.cb = &(obj->spi.dmaOptionsTX.dmaCallback);
<> 144:ef7eb2e8f9f7 754
<> 144:ef7eb2e8f9f7 755 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 756 #ifdef USART0
<> 144:ef7eb2e8f9f7 757 case SPI_0:
<> 144:ef7eb2e8f9f7 758 rxChnlCfg.select = DMAREQ_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 759 txChnlCfg.select = DMAREQ_USART0_TXEMPTY;
<> 144:ef7eb2e8f9f7 760 break;
<> 144:ef7eb2e8f9f7 761 #endif
<> 144:ef7eb2e8f9f7 762 #ifdef USART1
<> 144:ef7eb2e8f9f7 763 case SPI_1:
<> 144:ef7eb2e8f9f7 764 rxChnlCfg.select = DMAREQ_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 765 txChnlCfg.select = DMAREQ_USART1_TXEMPTY;
<> 144:ef7eb2e8f9f7 766 break;
<> 144:ef7eb2e8f9f7 767 #endif
<> 144:ef7eb2e8f9f7 768 #ifdef USART2
<> 144:ef7eb2e8f9f7 769 case SPI_2:
<> 144:ef7eb2e8f9f7 770 rxChnlCfg.select = DMAREQ_USART2_RXDATAV;
<> 144:ef7eb2e8f9f7 771 txChnlCfg.select = DMAREQ_USART2_TXEMPTY;
<> 144:ef7eb2e8f9f7 772 break;
<> 144:ef7eb2e8f9f7 773 #endif
<> 144:ef7eb2e8f9f7 774 default:
<> 144:ef7eb2e8f9f7 775 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 776 break;
<> 144:ef7eb2e8f9f7 777 }
<> 144:ef7eb2e8f9f7 778 DMA_CfgChannel(obj->spi.dmaOptionsRX.dmaChannel, &rxChnlCfg);
<> 144:ef7eb2e8f9f7 779 DMA_CfgChannel(obj->spi.dmaOptionsTX.dmaChannel, &txChnlCfg);
<> 144:ef7eb2e8f9f7 780 }
<> 144:ef7eb2e8f9f7 781 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 782 /******************************************
<> 144:ef7eb2e8f9f7 783 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 784 *
<> 144:ef7eb2e8f9f7 785 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 786 *
<> 144:ef7eb2e8f9f7 787 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 788 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 789 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 790 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 791 ******************************************/
<> 144:ef7eb2e8f9f7 792 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 793 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 794 {
<> 144:ef7eb2e8f9f7 795 LDMA_PeripheralSignal_t dma_periph;
<> 144:ef7eb2e8f9f7 796
<> 144:ef7eb2e8f9f7 797 if(rxdata) {
<> 144:ef7eb2e8f9f7 798 volatile const void *source_addr;
<> 144:ef7eb2e8f9f7 799 /* Select RX source address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 800 10 bit and larger frame requires to use RXDOUBLE register. */
<> 144:ef7eb2e8f9f7 801 switch((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 802 case USART_0:
<> 144:ef7eb2e8f9f7 803 dma_periph = ldmaPeripheralSignal_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 804 break;
<> 144:ef7eb2e8f9f7 805 case USART_1:
<> 144:ef7eb2e8f9f7 806 dma_periph = ldmaPeripheralSignal_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 807 break;
<> 144:ef7eb2e8f9f7 808 default:
<> 144:ef7eb2e8f9f7 809 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 810 while(1);
<> 144:ef7eb2e8f9f7 811 break;
<> 144:ef7eb2e8f9f7 812 }
<> 144:ef7eb2e8f9f7 813
<> 144:ef7eb2e8f9f7 814 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 815 source_addr = &obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 816 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 817 source_addr = &obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 818 } else {
<> 144:ef7eb2e8f9f7 819 source_addr = &obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 820 }
<> 144:ef7eb2e8f9f7 821
<> 144:ef7eb2e8f9f7 822 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 823 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_P2M_BYTE(source_addr, rxdata, rx_length);
<> 144:ef7eb2e8f9f7 824
<> 144:ef7eb2e8f9f7 825 if(obj->spi.bits >= 9){
<> 144:ef7eb2e8f9f7 826 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 827 }
<> 144:ef7eb2e8f9f7 828
<> 144:ef7eb2e8f9f7 829 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 830 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 831 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 832 } else {
<> 144:ef7eb2e8f9f7 833 desc.xfer.dstInc = ldmaCtrlDstIncFour;
<> 144:ef7eb2e8f9f7 834 }
<> 144:ef7eb2e8f9f7 835 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 836 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 837 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 838 } else {
<> 144:ef7eb2e8f9f7 839 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 840 }
<> 144:ef7eb2e8f9f7 841 } else {
<> 144:ef7eb2e8f9f7 842 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 843 }
<> 144:ef7eb2e8f9f7 844
<> 144:ef7eb2e8f9f7 845 LDMAx_StartTransfer(obj->spi.dmaOptionsRX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsRX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 846 }
<> 144:ef7eb2e8f9f7 847
<> 144:ef7eb2e8f9f7 848 volatile void *target_addr;
<> 144:ef7eb2e8f9f7 849
<> 144:ef7eb2e8f9f7 850 /* Select TX target address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 851 10 bit and larger frame requires to use TXDOUBLE register. */
<> 144:ef7eb2e8f9f7 852 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 853 case USART_0:
<> 144:ef7eb2e8f9f7 854 dma_periph = ldmaPeripheralSignal_USART0_TXBL;
<> 144:ef7eb2e8f9f7 855 break;
<> 144:ef7eb2e8f9f7 856 case USART_1:
<> 144:ef7eb2e8f9f7 857 dma_periph = ldmaPeripheralSignal_USART1_TXBL;
<> 144:ef7eb2e8f9f7 858 break;
<> 144:ef7eb2e8f9f7 859 default:
<> 144:ef7eb2e8f9f7 860 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 861 while(1);
<> 144:ef7eb2e8f9f7 862 break;
<> 144:ef7eb2e8f9f7 863 }
<> 144:ef7eb2e8f9f7 864
<> 144:ef7eb2e8f9f7 865 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 866 target_addr = &obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 867 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 868 target_addr = &obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 869 } else {
<> 144:ef7eb2e8f9f7 870 target_addr = &obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 871 }
<> 144:ef7eb2e8f9f7 872
<> 144:ef7eb2e8f9f7 873 /* Check the transmit length, and split long transfers to smaller ones */
<> 144:ef7eb2e8f9f7 874 int max_length = 1024;
<> 144:ef7eb2e8f9f7 875 #ifdef _LDMA_CH_CTRL_XFERCNT_MASK
<> 144:ef7eb2e8f9f7 876 max_length = (_LDMA_CH_CTRL_XFERCNT_MASK>>_LDMA_CH_CTRL_XFERCNT_SHIFT)+1;
<> 144:ef7eb2e8f9f7 877 #endif
<> 144:ef7eb2e8f9f7 878 if (tx_length > max_length) {
<> 144:ef7eb2e8f9f7 879 tx_length = max_length;
<> 144:ef7eb2e8f9f7 880 }
<> 144:ef7eb2e8f9f7 881
<> 144:ef7eb2e8f9f7 882 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 883 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 884
<> 144:ef7eb2e8f9f7 885 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 886 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_M2P_BYTE((txdata ? txdata : &fill_word), target_addr, tx_length);
<> 144:ef7eb2e8f9f7 887
<> 144:ef7eb2e8f9f7 888 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 889 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 890 }
<> 144:ef7eb2e8f9f7 891
<> 144:ef7eb2e8f9f7 892 if (!txdata) {
<> 144:ef7eb2e8f9f7 893 desc.xfer.srcInc = ldmaCtrlSrcIncNone;
<> 144:ef7eb2e8f9f7 894 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 895 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 896 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 897 } else {
<> 144:ef7eb2e8f9f7 898 desc.xfer.srcInc = ldmaCtrlSrcIncFour;
<> 144:ef7eb2e8f9f7 899 }
<> 144:ef7eb2e8f9f7 900 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 901 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 902 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 903 } else {
<> 144:ef7eb2e8f9f7 904 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 905 }
<> 144:ef7eb2e8f9f7 906 } else {
<> 144:ef7eb2e8f9f7 907 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 908 }
<> 144:ef7eb2e8f9f7 909
<> 144:ef7eb2e8f9f7 910 // Kick off DMA TX
<> 144:ef7eb2e8f9f7 911 LDMAx_StartTransfer(obj->spi.dmaOptionsTX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsTX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 912 }
<> 144:ef7eb2e8f9f7 913
<> 144:ef7eb2e8f9f7 914 #else
<> 144:ef7eb2e8f9f7 915 /******************************************
<> 144:ef7eb2e8f9f7 916 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 917 *
<> 144:ef7eb2e8f9f7 918 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 919 *
<> 144:ef7eb2e8f9f7 920 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 921 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 922 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 923 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 924 ******************************************/
<> 144:ef7eb2e8f9f7 925 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 926 {
<> 144:ef7eb2e8f9f7 927 /* DMA descriptors */
<> 144:ef7eb2e8f9f7 928 DMA_CfgDescr_TypeDef rxDescrCfg;
<> 144:ef7eb2e8f9f7 929 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 930
<> 144:ef7eb2e8f9f7 931 /* Split up transfers if the length is larger than what the DMA supports. */
<> 144:ef7eb2e8f9f7 932 const int DMA_MAX_TRANSFER = (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT);
<> 144:ef7eb2e8f9f7 933
<> 144:ef7eb2e8f9f7 934 if (tx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 935 tx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 936 }
<> 144:ef7eb2e8f9f7 937 if (rx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 938 rx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 939 }
<> 144:ef7eb2e8f9f7 940
<> 144:ef7eb2e8f9f7 941 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 942 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 943 obj->rx_buff.pos += rx_length;
<> 144:ef7eb2e8f9f7 944
<> 144:ef7eb2e8f9f7 945 /* Only activate RX DMA if a receive buffer is specified */
<> 144:ef7eb2e8f9f7 946 if (rxdata != NULL) {
<> 144:ef7eb2e8f9f7 947 // Setting up channel descriptor
<> 144:ef7eb2e8f9f7 948 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 949 rxDescrCfg.dstInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 950 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 951 rxDescrCfg.dstInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 952 } else {
<> 144:ef7eb2e8f9f7 953 rxDescrCfg.dstInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 954 }
<> 144:ef7eb2e8f9f7 955 rxDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 956 rxDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use RXDOUBLE
<> 144:ef7eb2e8f9f7 957 rxDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 958 rxDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 959 DMA_CfgDescr(obj->spi.dmaOptionsRX.dmaChannel, true, &rxDescrCfg);
<> 144:ef7eb2e8f9f7 960
<> 144:ef7eb2e8f9f7 961 void * rx_reg;
<> 144:ef7eb2e8f9f7 962 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 963 rx_reg = (void *)&obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 964 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 965 rx_reg = (void *)&obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 966 } else {
<> 144:ef7eb2e8f9f7 967 rx_reg = (void *)&obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 968 }
<> 144:ef7eb2e8f9f7 969
<> 144:ef7eb2e8f9f7 970 /* Activate RX channel */
<> 144:ef7eb2e8f9f7 971 DMA_ActivateBasic(obj->spi.dmaOptionsRX.dmaChannel,
<> 144:ef7eb2e8f9f7 972 true,
<> 144:ef7eb2e8f9f7 973 false,
<> 144:ef7eb2e8f9f7 974 rxdata,
<> 144:ef7eb2e8f9f7 975 rx_reg,
<> 144:ef7eb2e8f9f7 976 rx_length - 1);
<> 144:ef7eb2e8f9f7 977 }
<> 144:ef7eb2e8f9f7 978
<> 144:ef7eb2e8f9f7 979 // buffer with all FFs.
<> 144:ef7eb2e8f9f7 980 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 981 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 982 if (txdata == 0) {
<> 144:ef7eb2e8f9f7 983 // Don't increment source when there is no transmit buffer
<> 144:ef7eb2e8f9f7 984 txDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 985 } else {
<> 144:ef7eb2e8f9f7 986 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 987 txDescrCfg.srcInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 988 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 989 txDescrCfg.srcInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 990 } else {
<> 144:ef7eb2e8f9f7 991 txDescrCfg.srcInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 992 }
<> 144:ef7eb2e8f9f7 993 }
<> 144:ef7eb2e8f9f7 994 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use TXDOUBLE
<> 144:ef7eb2e8f9f7 995 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 996 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 997 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 998
<> 144:ef7eb2e8f9f7 999 void * tx_reg;
<> 144:ef7eb2e8f9f7 1000 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1001 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 1002 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1003 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 1004 } else {
<> 144:ef7eb2e8f9f7 1005 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 1006 }
<> 144:ef7eb2e8f9f7 1007
<> 144:ef7eb2e8f9f7 1008 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 1009 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 1010 true,
<> 144:ef7eb2e8f9f7 1011 false,
<> 144:ef7eb2e8f9f7 1012 tx_reg,
<> 144:ef7eb2e8f9f7 1013 (txdata == 0 ? &fill_word : (void *)txdata), // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 1014 (tx_length - 1));
<> 144:ef7eb2e8f9f7 1015 }
<> 144:ef7eb2e8f9f7 1016 #endif //LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1017 /********************************************************************
<> 144:ef7eb2e8f9f7 1018 * spi_master_transfer_dma(spi_t *obj, void *rxdata, void *txdata, int length, DMACallback cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1019 *
<> 144:ef7eb2e8f9f7 1020 * Start an SPI transfer by using DMA and the supplied hint for DMA useage
<> 144:ef7eb2e8f9f7 1021 *
<> 144:ef7eb2e8f9f7 1022 * * obj: pointer to specific SPI instance
<> 144:ef7eb2e8f9f7 1023 * * rxdata: pointer to rx buffer. If null, we will assume only TX is relevant, and RX will be ignored.
<> 144:ef7eb2e8f9f7 1024 * * txdata: pointer to TX buffer. If null, we will assume only the read is relevant, and will send FF's for reading back.
<> 144:ef7eb2e8f9f7 1025 * * length: How many bytes should be written/read.
<> 144:ef7eb2e8f9f7 1026 * * cb: thunk pointer into CPP-land to get the spi object
<> 144:ef7eb2e8f9f7 1027 * * hint: hint for the requested DMA useage.
<> 144:ef7eb2e8f9f7 1028 * * NEVER: do not use DMA, but use IRQ instead
<> 144:ef7eb2e8f9f7 1029 * * OPPORTUNISTIC: use DMA if there are channels available, but return them after the transfer.
<> 144:ef7eb2e8f9f7 1030 * * ALWAYS: use DMA if channels are available, and hold on to the channels after the transfer.
<> 144:ef7eb2e8f9f7 1031 * If the previous transfer has kept the channel, that channel will continue to get used.
<> 144:ef7eb2e8f9f7 1032 *
<> 144:ef7eb2e8f9f7 1033 ********************************************************************/
<> 144:ef7eb2e8f9f7 1034 void spi_master_transfer_dma(spi_t *obj, const void *txdata, void *rxdata, int tx_length, int rx_length, void* cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1035 {
<> 144:ef7eb2e8f9f7 1036 /* Init DMA here to include it in the power figure */
<> 144:ef7eb2e8f9f7 1037 dma_init();
<> 144:ef7eb2e8f9f7 1038 /* Clear TX and RX registers */
<> 144:ef7eb2e8f9f7 1039 obj->spi.spi->CMD = USART_CMD_CLEARTX;
<> 144:ef7eb2e8f9f7 1040 obj->spi.spi->CMD = USART_CMD_CLEARRX;
<> 144:ef7eb2e8f9f7 1041 /* If the DMA channels are already allocated, we can assume they have been setup already */
<> 144:ef7eb2e8f9f7 1042 if (hint != DMA_USAGE_NEVER && obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1043 /* setup has already been done, so just activate the transfer */
<> 144:ef7eb2e8f9f7 1044 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1045 } else if (hint == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 1046 /* use IRQ */
<> 144:ef7eb2e8f9f7 1047 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1048 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1049 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1050 } else {
<> 144:ef7eb2e8f9f7 1051 /* try to acquire channels */
<> 144:ef7eb2e8f9f7 1052 dma_init();
<> 144:ef7eb2e8f9f7 1053 spi_enable_dma(obj, hint);
<> 144:ef7eb2e8f9f7 1054
<> 144:ef7eb2e8f9f7 1055 /* decide between DMA and IRQ */
<> 144:ef7eb2e8f9f7 1056 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1057 /* disable the interrupts that may have been left open previously */
<> 144:ef7eb2e8f9f7 1058 spi_enable_interrupt(obj, (uint32_t)cb, false);
<> 144:ef7eb2e8f9f7 1059
<> 144:ef7eb2e8f9f7 1060 /* DMA channels are allocated, so do their setup */
<> 144:ef7eb2e8f9f7 1061 spi_master_dma_channel_setup(obj, cb);
<> 144:ef7eb2e8f9f7 1062 /* and activate the transfer */
<> 144:ef7eb2e8f9f7 1063 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1064 } else {
<> 144:ef7eb2e8f9f7 1065 /* DMA is unavailable, so fall back to IRQ */
<> 144:ef7eb2e8f9f7 1066 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1067 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1068 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1069 }
<> 144:ef7eb2e8f9f7 1070 }
<> 144:ef7eb2e8f9f7 1071 }
<> 144:ef7eb2e8f9f7 1072
<> 144:ef7eb2e8f9f7 1073 /** Begin the SPI transfer. Buffer pointers and lengths are specified in tx_buff and rx_buff
<> 144:ef7eb2e8f9f7 1074 *
<> 144:ef7eb2e8f9f7 1075 * @param[in] obj The SPI object which holds the transfer information
<> 144:ef7eb2e8f9f7 1076 * @param[in] tx The buffer to send
<> 144:ef7eb2e8f9f7 1077 * @param[in] tx_length The number of words to transmit
<> 144:ef7eb2e8f9f7 1078 * @param[in] rx The buffer to receive
<> 144:ef7eb2e8f9f7 1079 * @param[in] rx_length The number of words to receive
<> 144:ef7eb2e8f9f7 1080 * @param[in] bit_width The bit width of buffer words
<> 144:ef7eb2e8f9f7 1081 * @param[in] event The logical OR of events to be registered
<> 144:ef7eb2e8f9f7 1082 * @param[in] handler SPI interrupt handler
<> 144:ef7eb2e8f9f7 1083 * @param[in] hint A suggestion for how to use DMA with this transfer
<> 144:ef7eb2e8f9f7 1084 */
<> 144:ef7eb2e8f9f7 1085 void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1086 {
<> 144:ef7eb2e8f9f7 1087 if( spi_active(obj) ) return;
<> 144:ef7eb2e8f9f7 1088
<> 144:ef7eb2e8f9f7 1089 /* update fill word if on 9-bit frame size */
<> 144:ef7eb2e8f9f7 1090 if(obj->spi.bits == 9) fill_word = SPI_FILL_WORD & 0x1FF;
<> 144:ef7eb2e8f9f7 1091 else fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1092
<> 144:ef7eb2e8f9f7 1093 /* check corner case */
<> 144:ef7eb2e8f9f7 1094 if(tx_length == 0) {
<> 144:ef7eb2e8f9f7 1095 tx_length = rx_length;
<> 144:ef7eb2e8f9f7 1096 tx = (void*) 0;
<> 144:ef7eb2e8f9f7 1097 }
<> 144:ef7eb2e8f9f7 1098
<> 144:ef7eb2e8f9f7 1099 /* First, set the buffer */
<> 144:ef7eb2e8f9f7 1100 spi_buffer_set(obj, tx, tx_length, rx, rx_length, bit_width);
<> 144:ef7eb2e8f9f7 1101
<> 144:ef7eb2e8f9f7 1102 /* Then, enable the events */
<> 144:ef7eb2e8f9f7 1103 spi_enable_event(obj, SPI_EVENT_ALL, false);
<> 144:ef7eb2e8f9f7 1104 spi_enable_event(obj, event, true);
<> 144:ef7eb2e8f9f7 1105
<> 144:ef7eb2e8f9f7 1106 // Set the sleep mode
<> 144:ef7eb2e8f9f7 1107 blockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1108
<> 144:ef7eb2e8f9f7 1109 /* And kick off the transfer */
<> 144:ef7eb2e8f9f7 1110 spi_master_transfer_dma(obj, tx, rx, tx_length, rx_length, (void*)handler, hint);
<> 144:ef7eb2e8f9f7 1111 }
<> 144:ef7eb2e8f9f7 1112
<> 144:ef7eb2e8f9f7 1113
<> 144:ef7eb2e8f9f7 1114 /********************************************************************
<> 144:ef7eb2e8f9f7 1115 * uint32_t spi_irq_handler_generic(spi_t* obj)
<> 144:ef7eb2e8f9f7 1116 *
<> 144:ef7eb2e8f9f7 1117 * handler which should get called by CPP-land when either a DMA or SPI IRQ gets fired for a SPI transaction.
<> 144:ef7eb2e8f9f7 1118 *
<> 144:ef7eb2e8f9f7 1119 * * obj: pointer to the specific SPI instance
<> 144:ef7eb2e8f9f7 1120 *
<> 144:ef7eb2e8f9f7 1121 * return: event mask. Currently only 0 or SPI_EVENT_COMPLETE upon transfer completion.
<> 144:ef7eb2e8f9f7 1122 *
<> 144:ef7eb2e8f9f7 1123 ********************************************************************/
<> 144:ef7eb2e8f9f7 1124 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1125 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1126 {
<> 144:ef7eb2e8f9f7 1127 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1128 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1129 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1130 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1131 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1132 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1133 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1134
<> 144:ef7eb2e8f9f7 1135 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1136 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1137
<> 144:ef7eb2e8f9f7 1138 return 0;
<> 144:ef7eb2e8f9f7 1139 }
<> 144:ef7eb2e8f9f7 1140 /* If there is an RX transfer ongoing, wait for it to finish */
<> 144:ef7eb2e8f9f7 1141 if (LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1142 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1143 if (LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel) && (obj->tx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1144 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1145 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1146 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1147 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1148 } else return 0;
<> 144:ef7eb2e8f9f7 1149 }
<> 144:ef7eb2e8f9f7 1150 /* If there is still a TX transfer ongoing (tx_length > rx_length), wait for it to finish */
<> 144:ef7eb2e8f9f7 1151 if (!LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1152 return 0;
<> 144:ef7eb2e8f9f7 1153 }
<> 144:ef7eb2e8f9f7 1154 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1155 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1156 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1157 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1158 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1159 }
<> 144:ef7eb2e8f9f7 1160
<> 144:ef7eb2e8f9f7 1161 /* Wait transmit to complete, before user code is indicated*/
<> 144:ef7eb2e8f9f7 1162 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1163 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1164 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1165 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1166 } else {
<> 144:ef7eb2e8f9f7 1167 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1168 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1169 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1170 }
<> 144:ef7eb2e8f9f7 1171
<> 144:ef7eb2e8f9f7 1172 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1173 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1174 }
<> 144:ef7eb2e8f9f7 1175
<> 144:ef7eb2e8f9f7 1176 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1177 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1178 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1179 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1180
<> 144:ef7eb2e8f9f7 1181 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1182 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1183 return event;
<> 144:ef7eb2e8f9f7 1184 }
<> 144:ef7eb2e8f9f7 1185
<> 144:ef7eb2e8f9f7 1186 return 0;
<> 144:ef7eb2e8f9f7 1187 }
<> 144:ef7eb2e8f9f7 1188 }
<> 144:ef7eb2e8f9f7 1189 #else
<> 144:ef7eb2e8f9f7 1190 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1191 {
<> 144:ef7eb2e8f9f7 1192
<> 144:ef7eb2e8f9f7 1193 /* Determine whether the current scenario is DMA or IRQ, and act accordingly */
<> 144:ef7eb2e8f9f7 1194
<> 144:ef7eb2e8f9f7 1195 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1196 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1197
<> 144:ef7eb2e8f9f7 1198 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1199 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1200 /* If there is still a TX transfer ongoing, let it finish
<> 144:ef7eb2e8f9f7 1201 * before (if necessary) kicking off a new transfer */
<> 144:ef7eb2e8f9f7 1202 if (DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1203 return 0;
<> 144:ef7eb2e8f9f7 1204 }
<> 144:ef7eb2e8f9f7 1205 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1206 void * tx_pointer;
<> 144:ef7eb2e8f9f7 1207 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1208 tx_pointer = ((uint32_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1209 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1210 tx_pointer = ((uint16_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1211 } else {
<> 144:ef7eb2e8f9f7 1212 tx_pointer = ((uint8_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1213 }
<> 144:ef7eb2e8f9f7 1214 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1215
<> 144:ef7eb2e8f9f7 1216 /* Refresh RX transfer too if it exists */
<> 144:ef7eb2e8f9f7 1217 void * rx_pointer = NULL;
<> 144:ef7eb2e8f9f7 1218 if (obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 1219 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1220 rx_pointer = ((uint32_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1221 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1222 rx_pointer = ((uint16_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1223 } else {
<> 144:ef7eb2e8f9f7 1224 rx_pointer = ((uint8_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1225 }
<> 144:ef7eb2e8f9f7 1226 }
<> 144:ef7eb2e8f9f7 1227 uint32_t rx_length = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1228
<> 144:ef7eb2e8f9f7 1229 /* Wait for the previous transfer to complete. */
<> 144:ef7eb2e8f9f7 1230 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1231
<> 144:ef7eb2e8f9f7 1232 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1233 spi_activate_dma(obj, rx_pointer, tx_pointer, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1234
<> 144:ef7eb2e8f9f7 1235 return 0;
<> 144:ef7eb2e8f9f7 1236 }
<> 144:ef7eb2e8f9f7 1237
<> 144:ef7eb2e8f9f7 1238 /* If an RX transfer is ongoing, continue processing RX data */
<> 144:ef7eb2e8f9f7 1239 if (DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1240 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1241 if (!DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1242 //Save state of TX transfer amount
<> 144:ef7eb2e8f9f7 1243 int length_diff = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1244 obj->tx_buff.pos = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 1245
<> 144:ef7eb2e8f9f7 1246 //Kick off a new DMA transfer
<> 144:ef7eb2e8f9f7 1247 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 1248
<> 144:ef7eb2e8f9f7 1249 fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1250 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 1251 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1252 txDescrCfg.srcInc = dmaDataIncNone; //Do not increment source pointer when there is no transmit buffer
<> 144:ef7eb2e8f9f7 1253 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size > 9, we can use TXDOUBLE to save bandwidth
<> 144:ef7eb2e8f9f7 1254 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 1255 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 1256 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 1257
<> 144:ef7eb2e8f9f7 1258 void * tx_reg;
<> 144:ef7eb2e8f9f7 1259 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1260 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 1261 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1262 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 1263 } else {
<> 144:ef7eb2e8f9f7 1264 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 1265 }
<> 144:ef7eb2e8f9f7 1266
<> 144:ef7eb2e8f9f7 1267 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 1268 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 1269 true,
<> 144:ef7eb2e8f9f7 1270 false,
<> 144:ef7eb2e8f9f7 1271 tx_reg, //When frame size > 9, point to TXDOUBLE
<> 144:ef7eb2e8f9f7 1272 &fill_word, // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 1273 length_diff - 1);
<> 144:ef7eb2e8f9f7 1274 } else {
<> 144:ef7eb2e8f9f7 1275 /* Nothing to do */
<> 144:ef7eb2e8f9f7 1276 return 0;
<> 144:ef7eb2e8f9f7 1277 }
<> 144:ef7eb2e8f9f7 1278 }
<> 144:ef7eb2e8f9f7 1279
<> 144:ef7eb2e8f9f7 1280 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1281 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1282 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1283 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1284 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1285 }
<> 144:ef7eb2e8f9f7 1286
<> 144:ef7eb2e8f9f7 1287 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1288 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1289 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1290
<> 144:ef7eb2e8f9f7 1291 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1292 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1293 } else {
<> 144:ef7eb2e8f9f7 1294 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1295 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1296 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1297 }
<> 144:ef7eb2e8f9f7 1298
<> 144:ef7eb2e8f9f7 1299 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1300 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1301 }
<> 144:ef7eb2e8f9f7 1302
<> 144:ef7eb2e8f9f7 1303 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1304 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1305 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1306 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1307
<> 144:ef7eb2e8f9f7 1308 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1309 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1310 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1311
<> 144:ef7eb2e8f9f7 1312 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1313 return event;
<> 144:ef7eb2e8f9f7 1314 }
<> 144:ef7eb2e8f9f7 1315
<> 144:ef7eb2e8f9f7 1316 return 0;
<> 144:ef7eb2e8f9f7 1317 }
<> 144:ef7eb2e8f9f7 1318 }
<> 144:ef7eb2e8f9f7 1319 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1320 /** Abort an SPI transfer
<> 144:ef7eb2e8f9f7 1321 *
<> 144:ef7eb2e8f9f7 1322 * @param obj The SPI peripheral to stop
<> 144:ef7eb2e8f9f7 1323 */
<> 144:ef7eb2e8f9f7 1324 void spi_abort_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 1325 {
<> 144:ef7eb2e8f9f7 1326 // If we're not currently transferring, then there's nothing to do here
<> 144:ef7eb2e8f9f7 1327 if(spi_active(obj) != 0) return;
<> 144:ef7eb2e8f9f7 1328
<> 144:ef7eb2e8f9f7 1329 // Determine whether we're running DMA or interrupt
<> 144:ef7eb2e8f9f7 1330 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1331 // Cancel the DMA transfers
<> 144:ef7eb2e8f9f7 1332 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1333 LDMA_StopTransfer(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1334 LDMA_StopTransfer(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1335 #else
<> 144:ef7eb2e8f9f7 1336 DMA_ChannelEnable(obj->spi.dmaOptionsTX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1337 DMA_ChannelEnable(obj->spi.dmaOptionsRX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1338 #endif
<> 144:ef7eb2e8f9f7 1339 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1340 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1341 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1342 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1343 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1344 }
<> 144:ef7eb2e8f9f7 1345
<> 144:ef7eb2e8f9f7 1346 } else {
<> 144:ef7eb2e8f9f7 1347 // Interrupt implementation: switch off interrupts
<> 144:ef7eb2e8f9f7 1348 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1349 }
<> 144:ef7eb2e8f9f7 1350
<> 144:ef7eb2e8f9f7 1351 // Release sleep mode block
<> 144:ef7eb2e8f9f7 1352 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1353 }
<> 144:ef7eb2e8f9f7 1354
<> 144:ef7eb2e8f9f7 1355 #endif