mbed library sources. Supersedes mbed-src.

Dependents:   Nucleo_Hello_Encoder BLE_iBeaconScan AM1805_DEMO DISCO-F429ZI_ExportTemplate1 ... more

Committer:
AnnaBridge
Date:
Thu Dec 07 14:01:42 2017 +0000
Revision:
179:b0033dcd6934
Parent:
170:19eb464bc2be
Child:
180:96ed750bd169
mbed-dev library. Release version 157

Who changed what in which revision?

UserRevisionLine numberNew contents of line
<> 144:ef7eb2e8f9f7 1 /***************************************************************************//**
<> 144:ef7eb2e8f9f7 2 * @file spi_api.c
<> 144:ef7eb2e8f9f7 3 *******************************************************************************
<> 144:ef7eb2e8f9f7 4 * @section License
<> 144:ef7eb2e8f9f7 5 * <b>(C) Copyright 2015 Silicon Labs, http://www.silabs.com</b>
<> 144:ef7eb2e8f9f7 6 *******************************************************************************
<> 144:ef7eb2e8f9f7 7 *
<> 144:ef7eb2e8f9f7 8 * SPDX-License-Identifier: Apache-2.0
<> 144:ef7eb2e8f9f7 9 *
<> 144:ef7eb2e8f9f7 10 * Licensed under the Apache License, Version 2.0 (the "License"); you may
<> 144:ef7eb2e8f9f7 11 * not use this file except in compliance with the License.
<> 144:ef7eb2e8f9f7 12 * You may obtain a copy of the License at
<> 144:ef7eb2e8f9f7 13 *
<> 144:ef7eb2e8f9f7 14 * http://www.apache.org/licenses/LICENSE-2.0
<> 144:ef7eb2e8f9f7 15 *
<> 144:ef7eb2e8f9f7 16 * Unless required by applicable law or agreed to in writing, software
<> 144:ef7eb2e8f9f7 17 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
<> 144:ef7eb2e8f9f7 18 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<> 144:ef7eb2e8f9f7 19 * See the License for the specific language governing permissions and
<> 144:ef7eb2e8f9f7 20 * limitations under the License.
<> 144:ef7eb2e8f9f7 21 *
<> 144:ef7eb2e8f9f7 22 ******************************************************************************/
<> 144:ef7eb2e8f9f7 23
<> 144:ef7eb2e8f9f7 24 #include "device.h"
<> 144:ef7eb2e8f9f7 25 #include "clocking.h"
<> 144:ef7eb2e8f9f7 26 #if DEVICE_SPI
<> 144:ef7eb2e8f9f7 27
<> 144:ef7eb2e8f9f7 28 #include "mbed_assert.h"
<> 144:ef7eb2e8f9f7 29 #include "PeripheralPins.h"
<> 144:ef7eb2e8f9f7 30 #include "pinmap.h"
<> 144:ef7eb2e8f9f7 31 #include "pinmap_function.h"
<> 150:02e0a0aed4ec 32 #include "mbed_error.h"
<> 144:ef7eb2e8f9f7 33
<> 144:ef7eb2e8f9f7 34 #include "dma_api.h"
<> 144:ef7eb2e8f9f7 35 #include "dma_api_HAL.h"
<> 144:ef7eb2e8f9f7 36 #include "serial_api_HAL.h"
<> 144:ef7eb2e8f9f7 37 #include "spi_api.h"
<> 144:ef7eb2e8f9f7 38 #include "em_usart.h"
<> 144:ef7eb2e8f9f7 39 #include "em_cmu.h"
<> 144:ef7eb2e8f9f7 40 #include "em_dma.h"
<> 144:ef7eb2e8f9f7 41 #include "sleep_api.h"
<> 144:ef7eb2e8f9f7 42 #include "sleepmodes.h"
<> 144:ef7eb2e8f9f7 43
<> 144:ef7eb2e8f9f7 44 static uint16_t fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 45
<> 144:ef7eb2e8f9f7 46 #define SPI_LEAST_ACTIVE_SLEEPMODE EM1
<> 144:ef7eb2e8f9f7 47
<> 144:ef7eb2e8f9f7 48 static inline CMU_Clock_TypeDef spi_get_clock_tree(spi_t *obj)
<> 144:ef7eb2e8f9f7 49 {
<> 144:ef7eb2e8f9f7 50 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 51 #ifdef USART0
<> 144:ef7eb2e8f9f7 52 case SPI_0:
<> 144:ef7eb2e8f9f7 53 return cmuClock_USART0;
<> 144:ef7eb2e8f9f7 54 #endif
<> 144:ef7eb2e8f9f7 55 #ifdef USART1
<> 144:ef7eb2e8f9f7 56 case SPI_1:
<> 144:ef7eb2e8f9f7 57 return cmuClock_USART1;
<> 144:ef7eb2e8f9f7 58 #endif
<> 144:ef7eb2e8f9f7 59 #ifdef USART2
<> 144:ef7eb2e8f9f7 60 case SPI_2:
<> 144:ef7eb2e8f9f7 61 return cmuClock_USART2;
<> 144:ef7eb2e8f9f7 62 #endif
AnnaBridge 179:b0033dcd6934 63 #ifdef USART3
AnnaBridge 179:b0033dcd6934 64 case SPI_3:
AnnaBridge 179:b0033dcd6934 65 return cmuClock_USART3;
AnnaBridge 179:b0033dcd6934 66 #endif
AnnaBridge 179:b0033dcd6934 67 #ifdef USART4
AnnaBridge 179:b0033dcd6934 68 case SPI_4:
AnnaBridge 179:b0033dcd6934 69 return cmuClock_USART4;
AnnaBridge 179:b0033dcd6934 70 #endif
AnnaBridge 179:b0033dcd6934 71 #ifdef USART5
AnnaBridge 179:b0033dcd6934 72 case SPI_5:
AnnaBridge 179:b0033dcd6934 73 return cmuClock_USART5;
AnnaBridge 179:b0033dcd6934 74 #endif
<> 144:ef7eb2e8f9f7 75 default:
<> 144:ef7eb2e8f9f7 76 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 77 return cmuClock_HFPER;
<> 144:ef7eb2e8f9f7 78 }
<> 144:ef7eb2e8f9f7 79 }
<> 144:ef7eb2e8f9f7 80
<> 144:ef7eb2e8f9f7 81 static inline uint8_t spi_get_index(spi_t *obj)
<> 144:ef7eb2e8f9f7 82 {
<> 144:ef7eb2e8f9f7 83 uint8_t index = 0;
<> 144:ef7eb2e8f9f7 84 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 85 #ifdef USART0
<> 144:ef7eb2e8f9f7 86 case SPI_0:
<> 144:ef7eb2e8f9f7 87 index = 0;
<> 144:ef7eb2e8f9f7 88 break;
<> 144:ef7eb2e8f9f7 89 #endif
<> 144:ef7eb2e8f9f7 90 #ifdef USART1
<> 144:ef7eb2e8f9f7 91 case SPI_1:
<> 144:ef7eb2e8f9f7 92 index = 1;
<> 144:ef7eb2e8f9f7 93 break;
<> 144:ef7eb2e8f9f7 94 #endif
<> 144:ef7eb2e8f9f7 95 #ifdef USART2
<> 144:ef7eb2e8f9f7 96 case SPI_2:
<> 144:ef7eb2e8f9f7 97 index = 2;
<> 144:ef7eb2e8f9f7 98 break;
<> 144:ef7eb2e8f9f7 99 #endif
AnnaBridge 179:b0033dcd6934 100 #ifdef USART3
AnnaBridge 179:b0033dcd6934 101 case SPI_3:
AnnaBridge 179:b0033dcd6934 102 index = 3;
AnnaBridge 179:b0033dcd6934 103 break;
AnnaBridge 179:b0033dcd6934 104 #endif
AnnaBridge 179:b0033dcd6934 105 #ifdef USART4
AnnaBridge 179:b0033dcd6934 106 case SPI_4:
AnnaBridge 179:b0033dcd6934 107 index = 4;
AnnaBridge 179:b0033dcd6934 108 break;
AnnaBridge 179:b0033dcd6934 109 #endif
AnnaBridge 179:b0033dcd6934 110 #ifdef USART5
AnnaBridge 179:b0033dcd6934 111 case SPI_5:
AnnaBridge 179:b0033dcd6934 112 index = 5;
AnnaBridge 179:b0033dcd6934 113 break;
AnnaBridge 179:b0033dcd6934 114 #endif
<> 144:ef7eb2e8f9f7 115 default:
<> 144:ef7eb2e8f9f7 116 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 117 break;
<> 144:ef7eb2e8f9f7 118 }
<> 144:ef7eb2e8f9f7 119 return index;
<> 144:ef7eb2e8f9f7 120 }
<> 144:ef7eb2e8f9f7 121
<> 144:ef7eb2e8f9f7 122 uint8_t spi_get_module(spi_t *obj)
<> 144:ef7eb2e8f9f7 123 {
<> 144:ef7eb2e8f9f7 124 return spi_get_index(obj);
<> 144:ef7eb2e8f9f7 125 }
<> 144:ef7eb2e8f9f7 126
<> 144:ef7eb2e8f9f7 127 static void usart_init(spi_t *obj, uint32_t baudrate, USART_Databits_TypeDef databits, bool master, USART_ClockMode_TypeDef clockMode )
<> 144:ef7eb2e8f9f7 128 {
<> 144:ef7eb2e8f9f7 129 USART_InitSync_TypeDef init = USART_INITSYNC_DEFAULT;
<> 144:ef7eb2e8f9f7 130 init.enable = usartDisable;
<> 144:ef7eb2e8f9f7 131 init.baudrate = baudrate;
<> 144:ef7eb2e8f9f7 132 init.databits = databits;
<> 144:ef7eb2e8f9f7 133 init.master = master;
<> 144:ef7eb2e8f9f7 134 init.msbf = 1;
<> 144:ef7eb2e8f9f7 135 init.clockMode = clockMode;
<> 144:ef7eb2e8f9f7 136
<> 144:ef7eb2e8f9f7 137 /* Determine the reference clock, because the correct clock may not be set up at init time (e.g. before main()) */
<> 144:ef7eb2e8f9f7 138 init.refFreq = REFERENCE_FREQUENCY;
<> 144:ef7eb2e8f9f7 139
<> 144:ef7eb2e8f9f7 140 USART_InitSync(obj->spi.spi, &init);
<> 144:ef7eb2e8f9f7 141 }
<> 144:ef7eb2e8f9f7 142
<> 144:ef7eb2e8f9f7 143 void spi_preinit(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 144 {
<> 144:ef7eb2e8f9f7 145 SPIName spi_mosi = (SPIName) pinmap_peripheral(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 146 SPIName spi_miso = (SPIName) pinmap_peripheral(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 147 SPIName spi_clk = (SPIName) pinmap_peripheral(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 148 SPIName spi_cs = (SPIName) pinmap_peripheral(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 149 SPIName spi_data = (SPIName) pinmap_merge(spi_mosi, spi_miso);
<> 144:ef7eb2e8f9f7 150 SPIName spi_ctrl = (SPIName) pinmap_merge(spi_clk, spi_cs);
<> 144:ef7eb2e8f9f7 151
<> 144:ef7eb2e8f9f7 152 obj->spi.spi = (USART_TypeDef *) pinmap_merge(spi_data, spi_ctrl);
AnnaBridge 179:b0033dcd6934 153 MBED_ASSERT((unsigned int) obj->spi.spi != NC);
<> 144:ef7eb2e8f9f7 154
<> 144:ef7eb2e8f9f7 155 if (cs != NC) { /* Slave mode */
<> 144:ef7eb2e8f9f7 156 obj->spi.master = false;
<> 144:ef7eb2e8f9f7 157 } else {
<> 144:ef7eb2e8f9f7 158 obj->spi.master = true;
<> 144:ef7eb2e8f9f7 159 }
<> 144:ef7eb2e8f9f7 160
<> 144:ef7eb2e8f9f7 161 #if defined(_SILICON_LABS_32B_PLATFORM_1)
<> 144:ef7eb2e8f9f7 162 // On P1, we need to ensure all pins are on same location
<> 144:ef7eb2e8f9f7 163 uint32_t loc_mosi = pin_location(mosi, PinMap_SPI_MOSI);
<> 144:ef7eb2e8f9f7 164 uint32_t loc_miso = pin_location(miso, PinMap_SPI_MISO);
<> 144:ef7eb2e8f9f7 165 uint32_t loc_clk = pin_location(clk, PinMap_SPI_CLK);
<> 144:ef7eb2e8f9f7 166 uint32_t loc_cs = pin_location(cs, PinMap_SPI_CS);
<> 144:ef7eb2e8f9f7 167 uint32_t loc_data = pinmap_merge(loc_mosi, loc_miso);
<> 144:ef7eb2e8f9f7 168 uint32_t loc_ctrl = pinmap_merge(loc_clk, loc_cs);
<> 144:ef7eb2e8f9f7 169 obj->spi.location = pinmap_merge(loc_data, loc_ctrl);
<> 144:ef7eb2e8f9f7 170 MBED_ASSERT(obj->spi.location != NC);
<> 144:ef7eb2e8f9f7 171 #endif
<> 144:ef7eb2e8f9f7 172
<> 144:ef7eb2e8f9f7 173 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 174 }
<> 144:ef7eb2e8f9f7 175
<> 144:ef7eb2e8f9f7 176 void spi_enable_pins(spi_t *obj, uint8_t enable, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 177 {
<> 144:ef7eb2e8f9f7 178 if (enable) {
<> 144:ef7eb2e8f9f7 179 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 180 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 181 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 182 pin_mode(mosi, PushPull);
<> 144:ef7eb2e8f9f7 183 }
<> 144:ef7eb2e8f9f7 184 if (miso != NC) {
<> 144:ef7eb2e8f9f7 185 pin_mode(miso, Input);
<> 144:ef7eb2e8f9f7 186 }
<> 144:ef7eb2e8f9f7 187 pin_mode(clk, PushPull);
<> 144:ef7eb2e8f9f7 188 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 189 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 190 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 191 pin_mode(mosi, Input);
<> 144:ef7eb2e8f9f7 192 }
<> 144:ef7eb2e8f9f7 193 if (miso != NC) {
<> 144:ef7eb2e8f9f7 194 pin_mode(miso, PushPull);
<> 144:ef7eb2e8f9f7 195 }
<> 144:ef7eb2e8f9f7 196 pin_mode(clk, Input);
<> 144:ef7eb2e8f9f7 197 pin_mode(cs, Input);
<> 144:ef7eb2e8f9f7 198 }
<> 144:ef7eb2e8f9f7 199 } else {
<> 144:ef7eb2e8f9f7 200 // TODO_LP return PinMode to the previous state
<> 144:ef7eb2e8f9f7 201 if (obj->spi.master) { /* Master mode */
<> 144:ef7eb2e8f9f7 202 /* Either mosi or miso can be NC */
<> 144:ef7eb2e8f9f7 203 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 204 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 205 }
<> 144:ef7eb2e8f9f7 206 if (miso != NC) {
<> 144:ef7eb2e8f9f7 207 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 208 }
<> 144:ef7eb2e8f9f7 209 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 210 /* Don't set cs pin, since we toggle it manually */
<> 144:ef7eb2e8f9f7 211 } else { /* Slave mode */
<> 144:ef7eb2e8f9f7 212 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 213 pin_mode(mosi, Disabled);
<> 144:ef7eb2e8f9f7 214 }
<> 144:ef7eb2e8f9f7 215 if (miso != NC) {
<> 144:ef7eb2e8f9f7 216 pin_mode(miso, Disabled);
<> 144:ef7eb2e8f9f7 217 }
<> 144:ef7eb2e8f9f7 218 pin_mode(clk, Disabled);
<> 144:ef7eb2e8f9f7 219 pin_mode(cs, Disabled);
<> 144:ef7eb2e8f9f7 220 }
<> 144:ef7eb2e8f9f7 221 }
<> 144:ef7eb2e8f9f7 222
<> 144:ef7eb2e8f9f7 223 /* Enabling pins and setting location */
<> 144:ef7eb2e8f9f7 224 #ifdef _USART_ROUTEPEN_RESETVALUE
<> 144:ef7eb2e8f9f7 225 uint32_t route = USART_ROUTEPEN_CLKPEN;
AnnaBridge 167:e84263d55307 226
<> 144:ef7eb2e8f9f7 227 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CLKLOC_MASK;
<> 144:ef7eb2e8f9f7 228 obj->spi.spi->ROUTELOC0 |= pin_location(clk, PinMap_SPI_CLK)<<_USART_ROUTELOC0_CLKLOC_SHIFT;
<> 144:ef7eb2e8f9f7 229 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 230 route |= USART_ROUTEPEN_TXPEN;
<> 144:ef7eb2e8f9f7 231 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_TXLOC_MASK;
<> 144:ef7eb2e8f9f7 232 obj->spi.spi->ROUTELOC0 |= pin_location(mosi, PinMap_SPI_MOSI)<<_USART_ROUTELOC0_TXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 233 }
<> 144:ef7eb2e8f9f7 234 if (miso != NC) {
<> 144:ef7eb2e8f9f7 235 route |= USART_ROUTEPEN_RXPEN;
<> 144:ef7eb2e8f9f7 236 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_RXLOC_MASK;
AnnaBridge 167:e84263d55307 237 obj->spi.spi->ROUTELOC0 |= pin_location(miso, PinMap_SPI_MISO)<<_USART_ROUTELOC0_RXLOC_SHIFT;
<> 144:ef7eb2e8f9f7 238 }
<> 144:ef7eb2e8f9f7 239 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 240 route |= USART_ROUTEPEN_CSPEN;
<> 144:ef7eb2e8f9f7 241 obj->spi.spi->ROUTELOC0 &= ~_USART_ROUTELOC0_CSLOC_MASK;
AnnaBridge 167:e84263d55307 242 obj->spi.spi->ROUTELOC0 |= pin_location(cs, PinMap_SPI_CS)<<_USART_ROUTELOC0_CSLOC_SHIFT;
<> 144:ef7eb2e8f9f7 243 }
Anna Bridge 163:74e0ce7f98e8 244 obj->spi.location = obj->spi.spi->ROUTELOC0;
Anna Bridge 163:74e0ce7f98e8 245 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 246 obj->spi.spi->ROUTEPEN = route;
<> 144:ef7eb2e8f9f7 247 }
<> 144:ef7eb2e8f9f7 248 #else
Anna Bridge 163:74e0ce7f98e8 249 uint32_t route = USART_ROUTE_CLKPEN;
<> 144:ef7eb2e8f9f7 250
<> 144:ef7eb2e8f9f7 251 if (mosi != NC) {
<> 144:ef7eb2e8f9f7 252 route |= USART_ROUTE_TXPEN;
<> 144:ef7eb2e8f9f7 253 }
<> 144:ef7eb2e8f9f7 254 if (miso != NC) {
<> 144:ef7eb2e8f9f7 255 route |= USART_ROUTE_RXPEN;
<> 144:ef7eb2e8f9f7 256 }
<> 144:ef7eb2e8f9f7 257 if (!obj->spi.master) {
<> 144:ef7eb2e8f9f7 258 route |= USART_ROUTE_CSPEN;
<> 144:ef7eb2e8f9f7 259 }
Anna Bridge 163:74e0ce7f98e8 260 route |= obj->spi.location << _USART_ROUTE_LOCATION_SHIFT;
<> 144:ef7eb2e8f9f7 261 obj->spi.spi->ROUTE = route;
Anna Bridge 163:74e0ce7f98e8 262 obj->spi.route = route;
<> 144:ef7eb2e8f9f7 263 }
<> 144:ef7eb2e8f9f7 264 #endif
<> 144:ef7eb2e8f9f7 265 void spi_enable(spi_t *obj, uint8_t enable)
<> 144:ef7eb2e8f9f7 266 {
<> 144:ef7eb2e8f9f7 267 USART_Enable(obj->spi.spi, (enable ? usartEnable : usartDisable));
<> 144:ef7eb2e8f9f7 268 }
<> 144:ef7eb2e8f9f7 269
<> 144:ef7eb2e8f9f7 270 void spi_init(spi_t *obj, PinName mosi, PinName miso, PinName clk, PinName cs)
<> 144:ef7eb2e8f9f7 271 {
<> 144:ef7eb2e8f9f7 272 CMU_ClockEnable(cmuClock_HFPER, true);
<> 144:ef7eb2e8f9f7 273 spi_preinit(obj, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 274 CMU_ClockEnable(spi_get_clock_tree(obj), true);
<> 144:ef7eb2e8f9f7 275 usart_init(obj, 100000, usartDatabits8, true, usartClockMode0);
<> 144:ef7eb2e8f9f7 276
<> 144:ef7eb2e8f9f7 277 spi_enable_pins(obj, true, mosi, miso, clk, cs);
<> 144:ef7eb2e8f9f7 278 spi_enable(obj, true);
<> 144:ef7eb2e8f9f7 279 }
<> 144:ef7eb2e8f9f7 280
<> 144:ef7eb2e8f9f7 281 void spi_enable_event(spi_t *obj, uint32_t event, uint8_t enable)
<> 144:ef7eb2e8f9f7 282 {
<> 144:ef7eb2e8f9f7 283 if(enable) obj->spi.event |= event;
<> 144:ef7eb2e8f9f7 284 else obj->spi.event &= ~event;
<> 144:ef7eb2e8f9f7 285 }
<> 144:ef7eb2e8f9f7 286
<> 144:ef7eb2e8f9f7 287 /****************************************************************************
<> 144:ef7eb2e8f9f7 288 * void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 289 *
<> 144:ef7eb2e8f9f7 290 * This will enable the interrupt in NVIC for the associated USART RX channel
<> 144:ef7eb2e8f9f7 291 *
<> 144:ef7eb2e8f9f7 292 * * obj: pointer to spi object
<> 144:ef7eb2e8f9f7 293 * * handler: pointer to interrupt handler for this channel
<> 144:ef7eb2e8f9f7 294 * * enable: Whether to enable (true) or disable (false) the interrupt
<> 144:ef7eb2e8f9f7 295 *
<> 144:ef7eb2e8f9f7 296 ****************************************************************************/
<> 144:ef7eb2e8f9f7 297 void spi_enable_interrupt(spi_t *obj, uint32_t handler, uint8_t enable)
<> 144:ef7eb2e8f9f7 298 {
<> 144:ef7eb2e8f9f7 299 IRQn_Type IRQvector;
<> 144:ef7eb2e8f9f7 300
<> 144:ef7eb2e8f9f7 301 switch ((uint32_t)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 302 #ifdef USART0
<> 144:ef7eb2e8f9f7 303 case USART_0:
<> 144:ef7eb2e8f9f7 304 IRQvector = USART0_RX_IRQn;
<> 144:ef7eb2e8f9f7 305 break;
<> 144:ef7eb2e8f9f7 306 #endif
<> 144:ef7eb2e8f9f7 307 #ifdef USART1
<> 144:ef7eb2e8f9f7 308 case USART_1:
<> 144:ef7eb2e8f9f7 309 IRQvector = USART1_RX_IRQn;
<> 144:ef7eb2e8f9f7 310 break;
<> 144:ef7eb2e8f9f7 311 #endif
<> 144:ef7eb2e8f9f7 312 #ifdef USART2
<> 144:ef7eb2e8f9f7 313 case USART_2:
<> 144:ef7eb2e8f9f7 314 IRQvector = USART2_RX_IRQn;
<> 144:ef7eb2e8f9f7 315 break;
<> 144:ef7eb2e8f9f7 316 #endif
AnnaBridge 179:b0033dcd6934 317 #ifdef USART3
AnnaBridge 179:b0033dcd6934 318 case USART_3:
AnnaBridge 179:b0033dcd6934 319 IRQvector = USART3_RX_IRQn;
AnnaBridge 179:b0033dcd6934 320 break;
AnnaBridge 179:b0033dcd6934 321 #endif
AnnaBridge 179:b0033dcd6934 322 #ifdef USART4
AnnaBridge 179:b0033dcd6934 323 case USART_4:
AnnaBridge 179:b0033dcd6934 324 IRQvector = USART4_RX_IRQn;
AnnaBridge 179:b0033dcd6934 325 break;
AnnaBridge 179:b0033dcd6934 326 #endif
AnnaBridge 179:b0033dcd6934 327 #ifdef USART5
AnnaBridge 179:b0033dcd6934 328 case USART_5:
AnnaBridge 179:b0033dcd6934 329 IRQvector = USART5_RX_IRQn;
AnnaBridge 179:b0033dcd6934 330 break;
AnnaBridge 179:b0033dcd6934 331 #endif
<> 144:ef7eb2e8f9f7 332 default:
<> 144:ef7eb2e8f9f7 333 error("Undefined SPI peripheral");
<> 144:ef7eb2e8f9f7 334 return;
<> 144:ef7eb2e8f9f7 335 }
<> 144:ef7eb2e8f9f7 336
<> 144:ef7eb2e8f9f7 337 if (enable == true) {
<> 144:ef7eb2e8f9f7 338 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 339 USART_IntEnable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 340 NVIC_EnableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 341 } else {
<> 144:ef7eb2e8f9f7 342 NVIC_SetVector(IRQvector, handler);
<> 144:ef7eb2e8f9f7 343 USART_IntDisable(obj->spi.spi, USART_IEN_RXDATAV);
<> 144:ef7eb2e8f9f7 344 NVIC_DisableIRQ(IRQvector);
<> 144:ef7eb2e8f9f7 345 }
<> 144:ef7eb2e8f9f7 346 }
<> 144:ef7eb2e8f9f7 347
<> 144:ef7eb2e8f9f7 348 void spi_format(spi_t *obj, int bits, int mode, int slave)
<> 144:ef7eb2e8f9f7 349 {
<> 144:ef7eb2e8f9f7 350 /* Bits: values between 4 and 16 are valid */
<> 144:ef7eb2e8f9f7 351 MBED_ASSERT(bits >= 4 && bits <= 16);
<> 144:ef7eb2e8f9f7 352 obj->spi.bits = bits;
<> 144:ef7eb2e8f9f7 353 /* 0x01 = usartDatabits4, etc, up to 0x0D = usartDatabits16 */
<> 144:ef7eb2e8f9f7 354 USART_Databits_TypeDef databits = (USART_Databits_TypeDef) (bits - 3);
<> 144:ef7eb2e8f9f7 355
<> 144:ef7eb2e8f9f7 356 USART_ClockMode_TypeDef clockMode;
<> 144:ef7eb2e8f9f7 357 MBED_ASSERT(mode >= 0 && mode <= 3);
<> 144:ef7eb2e8f9f7 358 switch (mode) {
<> 144:ef7eb2e8f9f7 359 case 0:
<> 144:ef7eb2e8f9f7 360 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 361 break;
<> 144:ef7eb2e8f9f7 362 case 1:
<> 144:ef7eb2e8f9f7 363 clockMode = usartClockMode1;
<> 144:ef7eb2e8f9f7 364 break;
<> 144:ef7eb2e8f9f7 365 case 2:
<> 144:ef7eb2e8f9f7 366 clockMode = usartClockMode2;
<> 144:ef7eb2e8f9f7 367 break;
<> 144:ef7eb2e8f9f7 368 case 3:
<> 144:ef7eb2e8f9f7 369 clockMode = usartClockMode3;
<> 144:ef7eb2e8f9f7 370 break;
<> 144:ef7eb2e8f9f7 371 default:
<> 144:ef7eb2e8f9f7 372 clockMode = usartClockMode0;
<> 144:ef7eb2e8f9f7 373 }
<> 144:ef7eb2e8f9f7 374 uint32_t iflags = obj->spi.spi->IEN;
<> 144:ef7eb2e8f9f7 375 bool enabled = (obj->spi.spi->STATUS & (USART_STATUS_RXENS | USART_STATUS_TXENS)) != 0;
<> 144:ef7eb2e8f9f7 376
<> 144:ef7eb2e8f9f7 377 usart_init(obj, 100000, databits, (slave ? false : true), clockMode);
<> 144:ef7eb2e8f9f7 378
<> 144:ef7eb2e8f9f7 379 //restore state
<> 144:ef7eb2e8f9f7 380 #ifdef _USART_ROUTEPEN_RESETVALUE
Anna Bridge 163:74e0ce7f98e8 381 obj->spi.spi->ROUTEPEN = obj->spi.route;
Anna Bridge 163:74e0ce7f98e8 382 obj->spi.spi->ROUTELOC0 = obj->spi.location;
<> 144:ef7eb2e8f9f7 383 #else
Anna Bridge 163:74e0ce7f98e8 384 obj->spi.spi->ROUTE = obj->spi.route;
<> 144:ef7eb2e8f9f7 385 #endif
<> 144:ef7eb2e8f9f7 386 obj->spi.spi->IEN = iflags;
<> 144:ef7eb2e8f9f7 387
<> 144:ef7eb2e8f9f7 388 if(enabled) spi_enable(obj, enabled);
<> 144:ef7eb2e8f9f7 389 }
<> 144:ef7eb2e8f9f7 390
<> 144:ef7eb2e8f9f7 391 void spi_frequency(spi_t *obj, int hz)
<> 144:ef7eb2e8f9f7 392 {
<> 144:ef7eb2e8f9f7 393 USART_BaudrateSyncSet(obj->spi.spi, REFERENCE_FREQUENCY, hz);
<> 144:ef7eb2e8f9f7 394 }
<> 144:ef7eb2e8f9f7 395
<> 144:ef7eb2e8f9f7 396 /* Read/Write */
<> 144:ef7eb2e8f9f7 397
<> 144:ef7eb2e8f9f7 398 void spi_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 399 {
<> 144:ef7eb2e8f9f7 400 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 401 USART_Tx(obj->spi.spi, (uint8_t) value);
<> 144:ef7eb2e8f9f7 402 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 403 USART_TxExt(obj->spi.spi, (uint16_t) value & 0x1FF);
<> 144:ef7eb2e8f9f7 404 } else {
<> 144:ef7eb2e8f9f7 405 USART_TxDouble(obj->spi.spi, (uint16_t) value);
<> 144:ef7eb2e8f9f7 406 }
<> 144:ef7eb2e8f9f7 407 }
<> 144:ef7eb2e8f9f7 408
<> 144:ef7eb2e8f9f7 409 int spi_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 410 {
<> 144:ef7eb2e8f9f7 411 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 412 return (int) obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 413 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 414 return (int) obj->spi.spi->RXDATAX & 0x1FF;
<> 144:ef7eb2e8f9f7 415 } else {
<> 144:ef7eb2e8f9f7 416 return (int) obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 417 }
<> 144:ef7eb2e8f9f7 418 }
<> 144:ef7eb2e8f9f7 419
<> 144:ef7eb2e8f9f7 420 int spi_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 421 {
<> 144:ef7eb2e8f9f7 422 return spi_read(obj);
<> 144:ef7eb2e8f9f7 423 }
<> 144:ef7eb2e8f9f7 424
<> 144:ef7eb2e8f9f7 425 int spi_master_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 426 {
<> 144:ef7eb2e8f9f7 427 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 428
<> 144:ef7eb2e8f9f7 429 /* Wait for transmission of last byte */
<> 144:ef7eb2e8f9f7 430 while (!(obj->spi.spi->STATUS & USART_STATUS_TXC)) {
<> 144:ef7eb2e8f9f7 431 }
<> 144:ef7eb2e8f9f7 432
<> 144:ef7eb2e8f9f7 433 return spi_read(obj);
<> 144:ef7eb2e8f9f7 434 }
<> 144:ef7eb2e8f9f7 435
Kojto 170:19eb464bc2be 436 int spi_master_block_write(spi_t *obj, const char *tx_buffer, int tx_length,
Kojto 170:19eb464bc2be 437 char *rx_buffer, int rx_length, char write_fill) {
AnnaBridge 167:e84263d55307 438 int total = (tx_length > rx_length) ? tx_length : rx_length;
AnnaBridge 167:e84263d55307 439
AnnaBridge 167:e84263d55307 440 for (int i = 0; i < total; i++) {
Kojto 170:19eb464bc2be 441 char out = (i < tx_length) ? tx_buffer[i] : write_fill;
AnnaBridge 167:e84263d55307 442 char in = spi_master_write(obj, out);
AnnaBridge 167:e84263d55307 443 if (i < rx_length) {
AnnaBridge 167:e84263d55307 444 rx_buffer[i] = in;
AnnaBridge 167:e84263d55307 445 }
AnnaBridge 167:e84263d55307 446 }
AnnaBridge 167:e84263d55307 447
AnnaBridge 167:e84263d55307 448 return total;
AnnaBridge 167:e84263d55307 449 }
AnnaBridge 167:e84263d55307 450
<> 144:ef7eb2e8f9f7 451 inline uint8_t spi_master_tx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 452 {
<> 144:ef7eb2e8f9f7 453 return (obj->spi.spi->STATUS & USART_STATUS_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 454 }
<> 144:ef7eb2e8f9f7 455
<> 144:ef7eb2e8f9f7 456 uint8_t spi_master_rx_ready(spi_t *obj)
<> 144:ef7eb2e8f9f7 457 {
<> 144:ef7eb2e8f9f7 458 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? true : false;
<> 144:ef7eb2e8f9f7 459 }
<> 144:ef7eb2e8f9f7 460
<> 144:ef7eb2e8f9f7 461 uint8_t spi_master_tx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 462 {
<> 144:ef7eb2e8f9f7 463 return (obj->spi.spi->IF & USART_IF_TXBL) ? true : false;
<> 144:ef7eb2e8f9f7 464 }
<> 144:ef7eb2e8f9f7 465
<> 144:ef7eb2e8f9f7 466 uint8_t spi_master_rx_int_flag(spi_t *obj)
<> 144:ef7eb2e8f9f7 467 {
<> 144:ef7eb2e8f9f7 468 return (obj->spi.spi->IF & (USART_IF_RXDATAV | USART_IF_RXFULL)) ? true : false;
<> 144:ef7eb2e8f9f7 469 }
<> 144:ef7eb2e8f9f7 470
<> 144:ef7eb2e8f9f7 471 void spi_master_read_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 472 {
<> 144:ef7eb2e8f9f7 473 obj->spi.spi->IFC = USART_IFC_RXFULL; // in case it got full
<> 144:ef7eb2e8f9f7 474 }
<> 144:ef7eb2e8f9f7 475
<> 144:ef7eb2e8f9f7 476 void spi_master_write_asynch_complete(spi_t *obj)
<> 144:ef7eb2e8f9f7 477 {
<> 144:ef7eb2e8f9f7 478 obj->spi.spi->IFC = USART_IFC_TXC;
<> 144:ef7eb2e8f9f7 479 }
<> 144:ef7eb2e8f9f7 480
<> 144:ef7eb2e8f9f7 481 void spi_irq_handler(spi_t *obj)
<> 144:ef7eb2e8f9f7 482 {
<> 144:ef7eb2e8f9f7 483 spi_read(obj); //TODO_LP store data to the object?
<> 144:ef7eb2e8f9f7 484 }
<> 144:ef7eb2e8f9f7 485
<> 144:ef7eb2e8f9f7 486 uint8_t spi_active(spi_t *obj)
<> 144:ef7eb2e8f9f7 487 {
<> 144:ef7eb2e8f9f7 488 switch(obj->spi.dmaOptionsTX.dmaUsageState) {
<> 144:ef7eb2e8f9f7 489 case DMA_USAGE_TEMPORARY_ALLOCATED:
<> 144:ef7eb2e8f9f7 490 return true;
<> 144:ef7eb2e8f9f7 491 case DMA_USAGE_ALLOCATED:
<> 144:ef7eb2e8f9f7 492 /* Check whether the allocated DMA channel is active */
<> 144:ef7eb2e8f9f7 493 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 494 return(LDMAx_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 495 #else
<> 144:ef7eb2e8f9f7 496 return(DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) || DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel));
<> 144:ef7eb2e8f9f7 497 #endif
<> 144:ef7eb2e8f9f7 498 default:
<> 144:ef7eb2e8f9f7 499 /* Check whether interrupt for spi is enabled */
<> 144:ef7eb2e8f9f7 500 return (obj->spi.spi->IEN & (USART_IEN_RXDATAV | USART_IEN_TXBL)) ? true : false;
<> 144:ef7eb2e8f9f7 501 }
<> 144:ef7eb2e8f9f7 502 }
<> 144:ef7eb2e8f9f7 503
<> 144:ef7eb2e8f9f7 504 void spi_buffer_set(spi_t *obj, const void *tx, uint32_t tx_length, void *rx, uint32_t rx_length, uint8_t bit_width)
<> 144:ef7eb2e8f9f7 505 {
<> 144:ef7eb2e8f9f7 506 uint32_t i;
<> 144:ef7eb2e8f9f7 507 uint16_t *tx_ptr = (uint16_t *) tx;
<> 144:ef7eb2e8f9f7 508
<> 144:ef7eb2e8f9f7 509 obj->tx_buff.buffer = (void *)tx;
<> 144:ef7eb2e8f9f7 510 obj->rx_buff.buffer = rx;
<> 144:ef7eb2e8f9f7 511 obj->tx_buff.length = tx_length;
<> 144:ef7eb2e8f9f7 512 obj->rx_buff.length = rx_length;
<> 144:ef7eb2e8f9f7 513 obj->tx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 514 obj->rx_buff.pos = 0;
<> 144:ef7eb2e8f9f7 515 obj->tx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 516 obj->rx_buff.width = bit_width;
<> 144:ef7eb2e8f9f7 517
<> 144:ef7eb2e8f9f7 518 if((obj->spi.bits == 9) && (tx != 0)) {
<> 144:ef7eb2e8f9f7 519 // Make sure we don't have inadvertent non-zero bits outside 9-bit frames which could trigger unwanted operation
<> 144:ef7eb2e8f9f7 520 for(i = 0; i < (tx_length / 2); i++) {
<> 144:ef7eb2e8f9f7 521 tx_ptr[i] &= 0x1FF;
<> 144:ef7eb2e8f9f7 522 }
<> 144:ef7eb2e8f9f7 523 }
<> 144:ef7eb2e8f9f7 524 }
<> 144:ef7eb2e8f9f7 525
<> 144:ef7eb2e8f9f7 526 static void spi_buffer_tx_write(spi_t *obj)
<> 144:ef7eb2e8f9f7 527 {
<> 144:ef7eb2e8f9f7 528 uint32_t data = 0;
<> 144:ef7eb2e8f9f7 529
<> 144:ef7eb2e8f9f7 530 // Interpret buffer according to declared width
<> 144:ef7eb2e8f9f7 531 if (!obj->tx_buff.buffer) {
<> 144:ef7eb2e8f9f7 532 data = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 533 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 534 uint32_t * tx = (uint32_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 535 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 536 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 537 uint16_t * tx = (uint16_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 538 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 539 } else {
<> 144:ef7eb2e8f9f7 540 uint8_t * tx = (uint8_t *)obj->tx_buff.buffer;
<> 144:ef7eb2e8f9f7 541 data = tx[obj->tx_buff.pos];
<> 144:ef7eb2e8f9f7 542 }
<> 144:ef7eb2e8f9f7 543 obj->tx_buff.pos++;
<> 144:ef7eb2e8f9f7 544
<> 144:ef7eb2e8f9f7 545 // Send buffer
<> 144:ef7eb2e8f9f7 546 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 547 obj->spi.spi->TXDOUBLE = data;
<> 144:ef7eb2e8f9f7 548 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 549 obj->spi.spi->TXDATAX = data;
<> 144:ef7eb2e8f9f7 550 } else {
<> 144:ef7eb2e8f9f7 551 obj->spi.spi->TXDATA = data;
<> 144:ef7eb2e8f9f7 552 }
<> 144:ef7eb2e8f9f7 553 }
<> 144:ef7eb2e8f9f7 554
<> 144:ef7eb2e8f9f7 555 static void spi_buffer_rx_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 556 {
<> 144:ef7eb2e8f9f7 557 uint32_t data;
<> 144:ef7eb2e8f9f7 558
<> 144:ef7eb2e8f9f7 559 if (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) {
<> 144:ef7eb2e8f9f7 560 // Read from the FIFO
<> 144:ef7eb2e8f9f7 561 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 562 data = obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 563 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 564 data = obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 565 } else {
<> 144:ef7eb2e8f9f7 566 data = obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 567 }
<> 144:ef7eb2e8f9f7 568
<> 144:ef7eb2e8f9f7 569 // If there is room in the buffer, store the data
<> 144:ef7eb2e8f9f7 570 if (obj->rx_buff.buffer && obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 571 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 572 uint32_t * rx = (uint32_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 573 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 574 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 575 uint16_t * rx = (uint16_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 576 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 577 } else {
<> 144:ef7eb2e8f9f7 578 uint8_t * rx = (uint8_t *)(obj->rx_buff.buffer);
<> 144:ef7eb2e8f9f7 579 rx[obj->rx_buff.pos] = data;
<> 144:ef7eb2e8f9f7 580 }
<> 144:ef7eb2e8f9f7 581 obj->rx_buff.pos++;
<> 144:ef7eb2e8f9f7 582 }
<> 144:ef7eb2e8f9f7 583 }
<> 144:ef7eb2e8f9f7 584 }
<> 144:ef7eb2e8f9f7 585
<> 144:ef7eb2e8f9f7 586 int spi_master_write_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 587 {
<> 144:ef7eb2e8f9f7 588 int ndata = 0;
<> 144:ef7eb2e8f9f7 589 while ((obj->tx_buff.pos < obj->tx_buff.length) && (obj->spi.spi->STATUS & USART_STATUS_TXBL)) {
<> 144:ef7eb2e8f9f7 590 spi_buffer_tx_write(obj);
<> 144:ef7eb2e8f9f7 591 ndata++;
<> 144:ef7eb2e8f9f7 592 }
<> 144:ef7eb2e8f9f7 593 return ndata;
<> 144:ef7eb2e8f9f7 594 }
<> 144:ef7eb2e8f9f7 595
<> 144:ef7eb2e8f9f7 596 int spi_master_read_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 597 {
<> 144:ef7eb2e8f9f7 598 int ndata = 0;
<> 144:ef7eb2e8f9f7 599 while ((obj->rx_buff.pos < obj->rx_buff.length) && (obj->spi.spi->STATUS & (USART_STATUS_RXDATAV | USART_STATUS_RXFULL))) {
<> 144:ef7eb2e8f9f7 600 spi_buffer_rx_read(obj);
<> 144:ef7eb2e8f9f7 601 ndata++;
<> 144:ef7eb2e8f9f7 602 }
<> 144:ef7eb2e8f9f7 603 // all sent but still more to receive? need to align tx buffer
<> 144:ef7eb2e8f9f7 604 if ((obj->tx_buff.pos >= obj->tx_buff.length) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 605 obj->tx_buff.buffer = (void *)0;
<> 144:ef7eb2e8f9f7 606 obj->tx_buff.length = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 607 }
<> 144:ef7eb2e8f9f7 608
<> 144:ef7eb2e8f9f7 609 return ndata;
<> 144:ef7eb2e8f9f7 610 }
<> 144:ef7eb2e8f9f7 611
<> 144:ef7eb2e8f9f7 612 uint8_t spi_buffer_rx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 613 {
<> 144:ef7eb2e8f9f7 614 return (obj->rx_buff.pos >= obj->rx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 615 }
<> 144:ef7eb2e8f9f7 616
<> 144:ef7eb2e8f9f7 617 uint8_t spi_buffer_tx_empty(spi_t *obj)
<> 144:ef7eb2e8f9f7 618 {
<> 144:ef7eb2e8f9f7 619 return (obj->tx_buff.pos >= obj->tx_buff.length ? true : false );
<> 144:ef7eb2e8f9f7 620 }
<> 144:ef7eb2e8f9f7 621
<> 144:ef7eb2e8f9f7 622 //TODO_LP implement slave
<> 144:ef7eb2e8f9f7 623
<> 144:ef7eb2e8f9f7 624 int spi_slave_receive(spi_t *obj)
<> 144:ef7eb2e8f9f7 625 {
<> 144:ef7eb2e8f9f7 626 if (obj->spi.bits <= 9) {
<> 144:ef7eb2e8f9f7 627 return (obj->spi.spi->STATUS & USART_STATUS_RXDATAV) ? 1 : 0;
<> 144:ef7eb2e8f9f7 628 } else {
<> 144:ef7eb2e8f9f7 629 return (obj->spi.spi->STATUS & USART_STATUS_RXFULL) ? 1 : 0;
<> 144:ef7eb2e8f9f7 630 }
<> 144:ef7eb2e8f9f7 631 }
<> 144:ef7eb2e8f9f7 632
<> 144:ef7eb2e8f9f7 633 int spi_slave_read(spi_t *obj)
<> 144:ef7eb2e8f9f7 634 {
<> 144:ef7eb2e8f9f7 635 return spi_read(obj);
<> 144:ef7eb2e8f9f7 636 }
<> 144:ef7eb2e8f9f7 637
<> 144:ef7eb2e8f9f7 638 void spi_slave_write(spi_t *obj, int value)
<> 144:ef7eb2e8f9f7 639 {
<> 144:ef7eb2e8f9f7 640 spi_write(obj, value);
<> 144:ef7eb2e8f9f7 641 }
<> 144:ef7eb2e8f9f7 642
<> 144:ef7eb2e8f9f7 643 uint32_t spi_event_check(spi_t *obj)
<> 144:ef7eb2e8f9f7 644 {
<> 144:ef7eb2e8f9f7 645 uint32_t requestedEvent = obj->spi.event;
<> 144:ef7eb2e8f9f7 646 uint32_t event = 0;
<> 144:ef7eb2e8f9f7 647 uint8_t quit = spi_buffer_rx_empty(obj) & spi_buffer_tx_empty(obj);
<> 144:ef7eb2e8f9f7 648 if (((requestedEvent & SPI_EVENT_COMPLETE) != 0) && (quit == true)) {
<> 144:ef7eb2e8f9f7 649 event |= SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 650 }
<> 144:ef7eb2e8f9f7 651
<> 144:ef7eb2e8f9f7 652 if(quit == true) {
<> 144:ef7eb2e8f9f7 653 event |= SPI_EVENT_INTERNAL_TRANSFER_COMPLETE;
<> 144:ef7eb2e8f9f7 654 }
<> 144:ef7eb2e8f9f7 655
<> 144:ef7eb2e8f9f7 656 return event;
<> 144:ef7eb2e8f9f7 657 }
<> 144:ef7eb2e8f9f7 658 /******************************************
<> 144:ef7eb2e8f9f7 659 * void transferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 660 *
<> 144:ef7eb2e8f9f7 661 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 662 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 663 ******************************************/
<> 144:ef7eb2e8f9f7 664 void transferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 665 {
<> 144:ef7eb2e8f9f7 666 (void) channel;
<> 144:ef7eb2e8f9f7 667 (void) primary;
<> 144:ef7eb2e8f9f7 668
<> 144:ef7eb2e8f9f7 669 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 670 if (user != NULL) {
<> 144:ef7eb2e8f9f7 671 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 672 }
<> 144:ef7eb2e8f9f7 673 }
<> 144:ef7eb2e8f9f7 674
<> 144:ef7eb2e8f9f7 675 /******************************************
<> 144:ef7eb2e8f9f7 676 * bool spi_allocate_dma(spi_t *obj);
<> 144:ef7eb2e8f9f7 677 * (helper function for spi_enable_dma)
<> 144:ef7eb2e8f9f7 678 *
<> 144:ef7eb2e8f9f7 679 * This function will request two DMA channels from the DMA API if needed
<> 144:ef7eb2e8f9f7 680 * by the hint provided. They will be allocated to the SPI object pointed to.
<> 144:ef7eb2e8f9f7 681 *
<> 144:ef7eb2e8f9f7 682 * return value: whether the channels were acquired successfully (true) or not.
<> 144:ef7eb2e8f9f7 683 ******************************************/
<> 144:ef7eb2e8f9f7 684 bool spi_allocate_dma(spi_t *obj)
<> 144:ef7eb2e8f9f7 685 {
<> 144:ef7eb2e8f9f7 686 int dmaChannelIn, dmaChannelOut;
<> 144:ef7eb2e8f9f7 687 dmaChannelIn = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 688 if (dmaChannelIn == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 689 return false;
<> 144:ef7eb2e8f9f7 690 }
<> 144:ef7eb2e8f9f7 691 dmaChannelOut = dma_channel_allocate(DMA_CAP_NONE);
<> 144:ef7eb2e8f9f7 692 if (dmaChannelOut == DMA_ERROR_OUT_OF_CHANNELS) {
<> 144:ef7eb2e8f9f7 693 dma_channel_free(dmaChannelIn);
<> 144:ef7eb2e8f9f7 694 return false;
<> 144:ef7eb2e8f9f7 695 }
<> 144:ef7eb2e8f9f7 696
<> 144:ef7eb2e8f9f7 697 obj->spi.dmaOptionsTX.dmaChannel = dmaChannelOut;
<> 144:ef7eb2e8f9f7 698 obj->spi.dmaOptionsRX.dmaChannel = dmaChannelIn;
<> 144:ef7eb2e8f9f7 699 return true;
<> 144:ef7eb2e8f9f7 700 }
<> 144:ef7eb2e8f9f7 701
<> 144:ef7eb2e8f9f7 702 /******************************************
<> 144:ef7eb2e8f9f7 703 * void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 704 *
<> 144:ef7eb2e8f9f7 705 * This function tries to allocate DMA as indicated by the hint (state).
<> 144:ef7eb2e8f9f7 706 * There are three possibilities:
<> 144:ef7eb2e8f9f7 707 * * state = NEVER:
<> 144:ef7eb2e8f9f7 708 * if there were channels allocated by state = ALWAYS, they will be released
<> 144:ef7eb2e8f9f7 709 * * state = OPPORTUNITIC:
<> 144:ef7eb2e8f9f7 710 * if there are channels available, they will get used, but freed upon transfer completion
<> 144:ef7eb2e8f9f7 711 * * state = ALWAYS
<> 144:ef7eb2e8f9f7 712 * if there are channels available, they will get allocated and not be freed until state changes
<> 144:ef7eb2e8f9f7 713 ******************************************/
<> 144:ef7eb2e8f9f7 714 void spi_enable_dma(spi_t *obj, DMAUsage state)
<> 144:ef7eb2e8f9f7 715 {
<> 144:ef7eb2e8f9f7 716 if (state == DMA_USAGE_ALWAYS && obj->spi.dmaOptionsTX.dmaUsageState != DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 717 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 718 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 719 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_ALLOCATED;
<> 144:ef7eb2e8f9f7 720 } else {
<> 144:ef7eb2e8f9f7 721 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 722 }
<> 144:ef7eb2e8f9f7 723 } else if (state == DMA_USAGE_OPPORTUNISTIC) {
<> 144:ef7eb2e8f9f7 724 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 725 /* Channels have already been allocated previously by an ALWAYS state, so after this transfer, we will release them */
<> 144:ef7eb2e8f9f7 726 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 727 } else {
<> 144:ef7eb2e8f9f7 728 /* Try to allocate channels */
<> 144:ef7eb2e8f9f7 729 if (spi_allocate_dma(obj)) {
<> 144:ef7eb2e8f9f7 730 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_TEMPORARY_ALLOCATED;
<> 144:ef7eb2e8f9f7 731 } else {
<> 144:ef7eb2e8f9f7 732 obj->spi.dmaOptionsTX.dmaUsageState = state;
<> 144:ef7eb2e8f9f7 733 }
<> 144:ef7eb2e8f9f7 734 }
<> 144:ef7eb2e8f9f7 735 } else if (state == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 736 /* If channels are allocated, get rid of them */
<> 144:ef7eb2e8f9f7 737 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 738 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 739 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 740 }
<> 144:ef7eb2e8f9f7 741 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_NEVER;
<> 144:ef7eb2e8f9f7 742 }
<> 144:ef7eb2e8f9f7 743 }
<> 144:ef7eb2e8f9f7 744
<> 144:ef7eb2e8f9f7 745 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 746 /************************************************************************************
<> 144:ef7eb2e8f9f7 747 * DMA helper functions *
<> 144:ef7eb2e8f9f7 748 ************************************************************************************/
<> 144:ef7eb2e8f9f7 749 /******************************************
<> 144:ef7eb2e8f9f7 750 * static void serial_dmaTransferComplete(uint channel, bool primary, void* user)
<> 144:ef7eb2e8f9f7 751 *
<> 144:ef7eb2e8f9f7 752 * Callback function which gets called upon DMA transfer completion
<> 144:ef7eb2e8f9f7 753 * the user-defined pointer is pointing to the CPP-land thunk
<> 144:ef7eb2e8f9f7 754 ******************************************/
<> 144:ef7eb2e8f9f7 755 static void serial_dmaTransferComplete(unsigned int channel, bool primary, void *user)
<> 144:ef7eb2e8f9f7 756 {
<> 144:ef7eb2e8f9f7 757
<> 144:ef7eb2e8f9f7 758 /* User pointer should be a thunk to CPP land */
<> 144:ef7eb2e8f9f7 759 if (user != NULL) {
<> 144:ef7eb2e8f9f7 760 ((DMACallback)user)();
<> 144:ef7eb2e8f9f7 761 }
<> 144:ef7eb2e8f9f7 762 }
<> 144:ef7eb2e8f9f7 763 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 764 {
<> 144:ef7eb2e8f9f7 765 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 766 }
<> 144:ef7eb2e8f9f7 767 #else
<> 144:ef7eb2e8f9f7 768 /******************************************
<> 144:ef7eb2e8f9f7 769 * void spi_master_dma_channel_setup(spi_t *obj)
<> 144:ef7eb2e8f9f7 770 *
<> 144:ef7eb2e8f9f7 771 * This function will setup the DMA configuration for SPI transfers
<> 144:ef7eb2e8f9f7 772 *
<> 144:ef7eb2e8f9f7 773 * The channel numbers are fetched from the SPI instance, so this function
<> 144:ef7eb2e8f9f7 774 * should only be called when those channels have actually been allocated.
<> 144:ef7eb2e8f9f7 775 ******************************************/
<> 144:ef7eb2e8f9f7 776 static void spi_master_dma_channel_setup(spi_t *obj, void* callback)
<> 144:ef7eb2e8f9f7 777 {
<> 144:ef7eb2e8f9f7 778 DMA_CfgChannel_TypeDef rxChnlCfg;
<> 144:ef7eb2e8f9f7 779 DMA_CfgChannel_TypeDef txChnlCfg;
<> 144:ef7eb2e8f9f7 780
<> 144:ef7eb2e8f9f7 781 /* Setting up channel for rx. */
<> 144:ef7eb2e8f9f7 782 obj->spi.dmaOptionsRX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 783 obj->spi.dmaOptionsRX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 784
<> 144:ef7eb2e8f9f7 785 rxChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 786 rxChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 787 rxChnlCfg.cb = &(obj->spi.dmaOptionsRX.dmaCallback);
<> 144:ef7eb2e8f9f7 788
<> 144:ef7eb2e8f9f7 789 /* Setting up channel for tx. */
<> 144:ef7eb2e8f9f7 790 obj->spi.dmaOptionsTX.dmaCallback.cbFunc = transferComplete;
<> 144:ef7eb2e8f9f7 791 obj->spi.dmaOptionsTX.dmaCallback.userPtr = callback;
<> 144:ef7eb2e8f9f7 792
<> 144:ef7eb2e8f9f7 793 txChnlCfg.highPri = false;
<> 144:ef7eb2e8f9f7 794 txChnlCfg.enableInt = true;
<> 144:ef7eb2e8f9f7 795 txChnlCfg.cb = &(obj->spi.dmaOptionsTX.dmaCallback);
<> 144:ef7eb2e8f9f7 796
<> 144:ef7eb2e8f9f7 797 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 798 #ifdef USART0
<> 144:ef7eb2e8f9f7 799 case SPI_0:
<> 144:ef7eb2e8f9f7 800 rxChnlCfg.select = DMAREQ_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 801 txChnlCfg.select = DMAREQ_USART0_TXEMPTY;
<> 144:ef7eb2e8f9f7 802 break;
<> 144:ef7eb2e8f9f7 803 #endif
<> 144:ef7eb2e8f9f7 804 #ifdef USART1
<> 144:ef7eb2e8f9f7 805 case SPI_1:
<> 144:ef7eb2e8f9f7 806 rxChnlCfg.select = DMAREQ_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 807 txChnlCfg.select = DMAREQ_USART1_TXEMPTY;
<> 144:ef7eb2e8f9f7 808 break;
<> 144:ef7eb2e8f9f7 809 #endif
<> 144:ef7eb2e8f9f7 810 #ifdef USART2
<> 144:ef7eb2e8f9f7 811 case SPI_2:
<> 144:ef7eb2e8f9f7 812 rxChnlCfg.select = DMAREQ_USART2_RXDATAV;
<> 144:ef7eb2e8f9f7 813 txChnlCfg.select = DMAREQ_USART2_TXEMPTY;
<> 144:ef7eb2e8f9f7 814 break;
<> 144:ef7eb2e8f9f7 815 #endif
AnnaBridge 179:b0033dcd6934 816 #ifdef USART3
AnnaBridge 179:b0033dcd6934 817 case SPI_3:
AnnaBridge 179:b0033dcd6934 818 rxChnlCfg.select = DMAREQ_USART3_RXDATAV;
AnnaBridge 179:b0033dcd6934 819 txChnlCfg.select = DMAREQ_USART3_TXEMPTY;
AnnaBridge 179:b0033dcd6934 820 break;
AnnaBridge 179:b0033dcd6934 821 #endif
AnnaBridge 179:b0033dcd6934 822 #ifdef USART4
AnnaBridge 179:b0033dcd6934 823 case SPI_4:
AnnaBridge 179:b0033dcd6934 824 rxChnlCfg.select = DMAREQ_USART4_RXDATAV;
AnnaBridge 179:b0033dcd6934 825 txChnlCfg.select = DMAREQ_USART4_TXEMPTY;
AnnaBridge 179:b0033dcd6934 826 break;
AnnaBridge 179:b0033dcd6934 827 #endif
AnnaBridge 179:b0033dcd6934 828 #ifdef USART5
AnnaBridge 179:b0033dcd6934 829 case SPI_5:
AnnaBridge 179:b0033dcd6934 830 rxChnlCfg.select = DMAREQ_USART5_RXDATAV;
AnnaBridge 179:b0033dcd6934 831 txChnlCfg.select = DMAREQ_USART5_TXEMPTY;
AnnaBridge 179:b0033dcd6934 832 break;
AnnaBridge 179:b0033dcd6934 833 #endif
<> 144:ef7eb2e8f9f7 834 default:
<> 144:ef7eb2e8f9f7 835 error("Spi module not available.. Out of bound access.");
<> 144:ef7eb2e8f9f7 836 break;
<> 144:ef7eb2e8f9f7 837 }
<> 144:ef7eb2e8f9f7 838 DMA_CfgChannel(obj->spi.dmaOptionsRX.dmaChannel, &rxChnlCfg);
<> 144:ef7eb2e8f9f7 839 DMA_CfgChannel(obj->spi.dmaOptionsTX.dmaChannel, &txChnlCfg);
<> 144:ef7eb2e8f9f7 840 }
<> 144:ef7eb2e8f9f7 841 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 842 /******************************************
<> 144:ef7eb2e8f9f7 843 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 844 *
<> 144:ef7eb2e8f9f7 845 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 846 *
<> 144:ef7eb2e8f9f7 847 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 848 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 849 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 850 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 851 ******************************************/
<> 144:ef7eb2e8f9f7 852 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 853 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 854 {
<> 144:ef7eb2e8f9f7 855 LDMA_PeripheralSignal_t dma_periph;
<> 144:ef7eb2e8f9f7 856
<> 144:ef7eb2e8f9f7 857 if(rxdata) {
<> 144:ef7eb2e8f9f7 858 volatile const void *source_addr;
<> 144:ef7eb2e8f9f7 859 /* Select RX source address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 860 10 bit and larger frame requires to use RXDOUBLE register. */
<> 144:ef7eb2e8f9f7 861 switch((int)obj->spi.spi) {
AnnaBridge 179:b0033dcd6934 862 #ifdef USART0
<> 144:ef7eb2e8f9f7 863 case USART_0:
<> 144:ef7eb2e8f9f7 864 dma_periph = ldmaPeripheralSignal_USART0_RXDATAV;
<> 144:ef7eb2e8f9f7 865 break;
AnnaBridge 179:b0033dcd6934 866 #endif
AnnaBridge 179:b0033dcd6934 867 #ifdef USART1
<> 144:ef7eb2e8f9f7 868 case USART_1:
<> 144:ef7eb2e8f9f7 869 dma_periph = ldmaPeripheralSignal_USART1_RXDATAV;
<> 144:ef7eb2e8f9f7 870 break;
AnnaBridge 179:b0033dcd6934 871 #endif
AnnaBridge 179:b0033dcd6934 872 #ifdef USART2
AnnaBridge 179:b0033dcd6934 873 case USART_2:
AnnaBridge 179:b0033dcd6934 874 dma_periph = ldmaPeripheralSignal_USART2_RXDATAV;
AnnaBridge 179:b0033dcd6934 875 break;
AnnaBridge 179:b0033dcd6934 876 #endif
AnnaBridge 179:b0033dcd6934 877 #ifdef USART3
AnnaBridge 179:b0033dcd6934 878 case USART_3:
AnnaBridge 179:b0033dcd6934 879 dma_periph = ldmaPeripheralSignal_USART3_RXDATAV;
AnnaBridge 179:b0033dcd6934 880 break;
AnnaBridge 179:b0033dcd6934 881 #endif
AnnaBridge 179:b0033dcd6934 882 #ifdef USART4
AnnaBridge 179:b0033dcd6934 883 case USART_4:
AnnaBridge 179:b0033dcd6934 884 dma_periph = ldmaPeripheralSignal_USART4_RXDATAV;
AnnaBridge 179:b0033dcd6934 885 break;
AnnaBridge 179:b0033dcd6934 886 #endif
AnnaBridge 179:b0033dcd6934 887 #ifdef USART5
AnnaBridge 179:b0033dcd6934 888 case USART_5:
AnnaBridge 179:b0033dcd6934 889 dma_periph = ldmaPeripheralSignal_USART5_RXDATAV;
AnnaBridge 179:b0033dcd6934 890 break;
AnnaBridge 179:b0033dcd6934 891 #endif
<> 144:ef7eb2e8f9f7 892 default:
<> 144:ef7eb2e8f9f7 893 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 894 while(1);
<> 144:ef7eb2e8f9f7 895 break;
<> 144:ef7eb2e8f9f7 896 }
<> 144:ef7eb2e8f9f7 897
<> 144:ef7eb2e8f9f7 898 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 899 source_addr = &obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 900 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 901 source_addr = &obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 902 } else {
<> 144:ef7eb2e8f9f7 903 source_addr = &obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 904 }
<> 144:ef7eb2e8f9f7 905
<> 144:ef7eb2e8f9f7 906 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 907 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_P2M_BYTE(source_addr, rxdata, rx_length);
<> 144:ef7eb2e8f9f7 908
<> 144:ef7eb2e8f9f7 909 if(obj->spi.bits >= 9){
<> 144:ef7eb2e8f9f7 910 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 911 }
<> 144:ef7eb2e8f9f7 912
<> 144:ef7eb2e8f9f7 913 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 914 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 915 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 916 } else {
<> 144:ef7eb2e8f9f7 917 desc.xfer.dstInc = ldmaCtrlDstIncFour;
<> 144:ef7eb2e8f9f7 918 }
<> 144:ef7eb2e8f9f7 919 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 920 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 921 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 922 } else {
<> 144:ef7eb2e8f9f7 923 desc.xfer.dstInc = ldmaCtrlDstIncTwo;
<> 144:ef7eb2e8f9f7 924 }
<> 144:ef7eb2e8f9f7 925 } else {
<> 144:ef7eb2e8f9f7 926 desc.xfer.dstInc = ldmaCtrlDstIncOne;
<> 144:ef7eb2e8f9f7 927 }
<> 144:ef7eb2e8f9f7 928
<> 144:ef7eb2e8f9f7 929 LDMAx_StartTransfer(obj->spi.dmaOptionsRX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsRX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 930 }
<> 144:ef7eb2e8f9f7 931
<> 144:ef7eb2e8f9f7 932 volatile void *target_addr;
<> 144:ef7eb2e8f9f7 933
<> 144:ef7eb2e8f9f7 934 /* Select TX target address. 9 bit frame length requires to use extended register.
<> 144:ef7eb2e8f9f7 935 10 bit and larger frame requires to use TXDOUBLE register. */
<> 144:ef7eb2e8f9f7 936 switch ((int)obj->spi.spi) {
<> 144:ef7eb2e8f9f7 937 case USART_0:
<> 144:ef7eb2e8f9f7 938 dma_periph = ldmaPeripheralSignal_USART0_TXBL;
<> 144:ef7eb2e8f9f7 939 break;
<> 144:ef7eb2e8f9f7 940 case USART_1:
<> 144:ef7eb2e8f9f7 941 dma_periph = ldmaPeripheralSignal_USART1_TXBL;
<> 144:ef7eb2e8f9f7 942 break;
<> 144:ef7eb2e8f9f7 943 default:
<> 144:ef7eb2e8f9f7 944 EFM_ASSERT(0);
<> 144:ef7eb2e8f9f7 945 while(1);
<> 144:ef7eb2e8f9f7 946 break;
<> 144:ef7eb2e8f9f7 947 }
<> 144:ef7eb2e8f9f7 948
<> 144:ef7eb2e8f9f7 949 if (obj->spi.bits <= 8) {
<> 144:ef7eb2e8f9f7 950 target_addr = &obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 951 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 952 target_addr = &obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 953 } else {
<> 144:ef7eb2e8f9f7 954 target_addr = &obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 955 }
<> 144:ef7eb2e8f9f7 956
<> 144:ef7eb2e8f9f7 957 /* Check the transmit length, and split long transfers to smaller ones */
<> 144:ef7eb2e8f9f7 958 int max_length = 1024;
<> 144:ef7eb2e8f9f7 959 #ifdef _LDMA_CH_CTRL_XFERCNT_MASK
<> 144:ef7eb2e8f9f7 960 max_length = (_LDMA_CH_CTRL_XFERCNT_MASK>>_LDMA_CH_CTRL_XFERCNT_SHIFT)+1;
<> 144:ef7eb2e8f9f7 961 #endif
<> 144:ef7eb2e8f9f7 962 if (tx_length > max_length) {
<> 144:ef7eb2e8f9f7 963 tx_length = max_length;
<> 144:ef7eb2e8f9f7 964 }
<> 144:ef7eb2e8f9f7 965
<> 144:ef7eb2e8f9f7 966 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 967 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 968
<> 144:ef7eb2e8f9f7 969 LDMA_TransferCfg_t xferConf = LDMA_TRANSFER_CFG_PERIPHERAL(dma_periph);
<> 144:ef7eb2e8f9f7 970 LDMA_Descriptor_t desc = LDMA_DESCRIPTOR_SINGLE_M2P_BYTE((txdata ? txdata : &fill_word), target_addr, tx_length);
<> 144:ef7eb2e8f9f7 971
<> 144:ef7eb2e8f9f7 972 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 973 desc.xfer.size = ldmaCtrlSizeHalf;
<> 144:ef7eb2e8f9f7 974 }
<> 144:ef7eb2e8f9f7 975
<> 144:ef7eb2e8f9f7 976 if (!txdata) {
<> 144:ef7eb2e8f9f7 977 desc.xfer.srcInc = ldmaCtrlSrcIncNone;
<> 144:ef7eb2e8f9f7 978 } else if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 979 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 980 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 981 } else {
<> 144:ef7eb2e8f9f7 982 desc.xfer.srcInc = ldmaCtrlSrcIncFour;
<> 144:ef7eb2e8f9f7 983 }
<> 144:ef7eb2e8f9f7 984 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 985 if (obj->spi.bits >= 9) {
<> 144:ef7eb2e8f9f7 986 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 987 } else {
<> 144:ef7eb2e8f9f7 988 desc.xfer.srcInc = ldmaCtrlSrcIncTwo;
<> 144:ef7eb2e8f9f7 989 }
<> 144:ef7eb2e8f9f7 990 } else {
<> 144:ef7eb2e8f9f7 991 desc.xfer.srcInc = ldmaCtrlSrcIncOne;
<> 144:ef7eb2e8f9f7 992 }
<> 144:ef7eb2e8f9f7 993
<> 144:ef7eb2e8f9f7 994 // Kick off DMA TX
<> 144:ef7eb2e8f9f7 995 LDMAx_StartTransfer(obj->spi.dmaOptionsTX.dmaChannel, &xferConf, &desc, serial_dmaTransferComplete,obj->spi.dmaOptionsTX.dmaCallback.userPtr);
<> 144:ef7eb2e8f9f7 996 }
<> 144:ef7eb2e8f9f7 997
<> 144:ef7eb2e8f9f7 998 #else
<> 144:ef7eb2e8f9f7 999 /******************************************
<> 144:ef7eb2e8f9f7 1000 * void spi_activate_dma(spi_t *obj, void* rxdata, void* txdata, int length)
<> 144:ef7eb2e8f9f7 1001 *
<> 144:ef7eb2e8f9f7 1002 * This function will start the DMA engine for SPI transfers
<> 144:ef7eb2e8f9f7 1003 *
<> 144:ef7eb2e8f9f7 1004 * * rxdata: pointer to RX buffer, if needed.
<> 144:ef7eb2e8f9f7 1005 * * txdata: pointer to TX buffer, if needed. Else FF's.
<> 144:ef7eb2e8f9f7 1006 * * tx_length: how many bytes will get sent.
<> 144:ef7eb2e8f9f7 1007 * * rx_length: how many bytes will get received. If > tx_length, TX will get padded with n lower bits of SPI_FILL_WORD.
<> 144:ef7eb2e8f9f7 1008 ******************************************/
<> 144:ef7eb2e8f9f7 1009 static void spi_activate_dma(spi_t *obj, void* rxdata, const void* txdata, int tx_length, int rx_length)
<> 144:ef7eb2e8f9f7 1010 {
<> 144:ef7eb2e8f9f7 1011 /* DMA descriptors */
<> 144:ef7eb2e8f9f7 1012 DMA_CfgDescr_TypeDef rxDescrCfg;
<> 144:ef7eb2e8f9f7 1013 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 1014
<> 144:ef7eb2e8f9f7 1015 /* Split up transfers if the length is larger than what the DMA supports. */
<> 144:ef7eb2e8f9f7 1016 const int DMA_MAX_TRANSFER = (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT);
<> 144:ef7eb2e8f9f7 1017
<> 144:ef7eb2e8f9f7 1018 if (tx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 1019 tx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 1020 }
<> 144:ef7eb2e8f9f7 1021 if (rx_length > DMA_MAX_TRANSFER) {
<> 144:ef7eb2e8f9f7 1022 rx_length = DMA_MAX_TRANSFER;
<> 144:ef7eb2e8f9f7 1023 }
<> 144:ef7eb2e8f9f7 1024
<> 144:ef7eb2e8f9f7 1025 /* Save amount of TX done by DMA */
<> 144:ef7eb2e8f9f7 1026 obj->tx_buff.pos += tx_length;
<> 144:ef7eb2e8f9f7 1027 obj->rx_buff.pos += rx_length;
<> 144:ef7eb2e8f9f7 1028
<> 144:ef7eb2e8f9f7 1029 /* Only activate RX DMA if a receive buffer is specified */
<> 144:ef7eb2e8f9f7 1030 if (rxdata != NULL) {
<> 144:ef7eb2e8f9f7 1031 // Setting up channel descriptor
<> 144:ef7eb2e8f9f7 1032 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1033 rxDescrCfg.dstInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 1034 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1035 rxDescrCfg.dstInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 1036 } else {
<> 144:ef7eb2e8f9f7 1037 rxDescrCfg.dstInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 1038 }
<> 144:ef7eb2e8f9f7 1039 rxDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1040 rxDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use RXDOUBLE
<> 144:ef7eb2e8f9f7 1041 rxDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 1042 rxDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 1043 DMA_CfgDescr(obj->spi.dmaOptionsRX.dmaChannel, true, &rxDescrCfg);
<> 144:ef7eb2e8f9f7 1044
<> 144:ef7eb2e8f9f7 1045 void * rx_reg;
<> 144:ef7eb2e8f9f7 1046 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1047 rx_reg = (void *)&obj->spi.spi->RXDOUBLE;
<> 144:ef7eb2e8f9f7 1048 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1049 rx_reg = (void *)&obj->spi.spi->RXDATAX;
<> 144:ef7eb2e8f9f7 1050 } else {
<> 144:ef7eb2e8f9f7 1051 rx_reg = (void *)&obj->spi.spi->RXDATA;
<> 144:ef7eb2e8f9f7 1052 }
<> 144:ef7eb2e8f9f7 1053
<> 144:ef7eb2e8f9f7 1054 /* Activate RX channel */
<> 144:ef7eb2e8f9f7 1055 DMA_ActivateBasic(obj->spi.dmaOptionsRX.dmaChannel,
<> 144:ef7eb2e8f9f7 1056 true,
<> 144:ef7eb2e8f9f7 1057 false,
<> 144:ef7eb2e8f9f7 1058 rxdata,
<> 144:ef7eb2e8f9f7 1059 rx_reg,
<> 144:ef7eb2e8f9f7 1060 rx_length - 1);
<> 144:ef7eb2e8f9f7 1061 }
<> 144:ef7eb2e8f9f7 1062
<> 144:ef7eb2e8f9f7 1063 // buffer with all FFs.
<> 144:ef7eb2e8f9f7 1064 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 1065 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1066 if (txdata == 0) {
<> 144:ef7eb2e8f9f7 1067 // Don't increment source when there is no transmit buffer
<> 144:ef7eb2e8f9f7 1068 txDescrCfg.srcInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1069 } else {
<> 144:ef7eb2e8f9f7 1070 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1071 txDescrCfg.srcInc = dmaDataInc4;
<> 144:ef7eb2e8f9f7 1072 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1073 txDescrCfg.srcInc = dmaDataInc2;
<> 144:ef7eb2e8f9f7 1074 } else {
<> 144:ef7eb2e8f9f7 1075 txDescrCfg.srcInc = dmaDataInc1;
<> 144:ef7eb2e8f9f7 1076 }
<> 144:ef7eb2e8f9f7 1077 }
<> 144:ef7eb2e8f9f7 1078 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size >= 9, use TXDOUBLE
<> 144:ef7eb2e8f9f7 1079 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 1080 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 1081 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 1082
<> 144:ef7eb2e8f9f7 1083 void * tx_reg;
<> 144:ef7eb2e8f9f7 1084 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1085 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 1086 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1087 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 1088 } else {
<> 144:ef7eb2e8f9f7 1089 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 1090 }
<> 144:ef7eb2e8f9f7 1091
<> 144:ef7eb2e8f9f7 1092 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 1093 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 1094 true,
<> 144:ef7eb2e8f9f7 1095 false,
<> 144:ef7eb2e8f9f7 1096 tx_reg,
<> 144:ef7eb2e8f9f7 1097 (txdata == 0 ? &fill_word : (void *)txdata), // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 1098 (tx_length - 1));
<> 144:ef7eb2e8f9f7 1099 }
<> 144:ef7eb2e8f9f7 1100 #endif //LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1101 /********************************************************************
<> 144:ef7eb2e8f9f7 1102 * spi_master_transfer_dma(spi_t *obj, void *rxdata, void *txdata, int length, DMACallback cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1103 *
<> 144:ef7eb2e8f9f7 1104 * Start an SPI transfer by using DMA and the supplied hint for DMA useage
<> 144:ef7eb2e8f9f7 1105 *
<> 144:ef7eb2e8f9f7 1106 * * obj: pointer to specific SPI instance
<> 144:ef7eb2e8f9f7 1107 * * rxdata: pointer to rx buffer. If null, we will assume only TX is relevant, and RX will be ignored.
<> 144:ef7eb2e8f9f7 1108 * * txdata: pointer to TX buffer. If null, we will assume only the read is relevant, and will send FF's for reading back.
<> 144:ef7eb2e8f9f7 1109 * * length: How many bytes should be written/read.
<> 144:ef7eb2e8f9f7 1110 * * cb: thunk pointer into CPP-land to get the spi object
<> 144:ef7eb2e8f9f7 1111 * * hint: hint for the requested DMA useage.
<> 144:ef7eb2e8f9f7 1112 * * NEVER: do not use DMA, but use IRQ instead
<> 144:ef7eb2e8f9f7 1113 * * OPPORTUNISTIC: use DMA if there are channels available, but return them after the transfer.
<> 144:ef7eb2e8f9f7 1114 * * ALWAYS: use DMA if channels are available, and hold on to the channels after the transfer.
<> 144:ef7eb2e8f9f7 1115 * If the previous transfer has kept the channel, that channel will continue to get used.
<> 144:ef7eb2e8f9f7 1116 *
<> 144:ef7eb2e8f9f7 1117 ********************************************************************/
<> 144:ef7eb2e8f9f7 1118 void spi_master_transfer_dma(spi_t *obj, const void *txdata, void *rxdata, int tx_length, int rx_length, void* cb, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1119 {
<> 144:ef7eb2e8f9f7 1120 /* Init DMA here to include it in the power figure */
<> 144:ef7eb2e8f9f7 1121 dma_init();
<> 144:ef7eb2e8f9f7 1122 /* Clear TX and RX registers */
<> 144:ef7eb2e8f9f7 1123 obj->spi.spi->CMD = USART_CMD_CLEARTX;
<> 144:ef7eb2e8f9f7 1124 obj->spi.spi->CMD = USART_CMD_CLEARRX;
<> 144:ef7eb2e8f9f7 1125 /* If the DMA channels are already allocated, we can assume they have been setup already */
<> 144:ef7eb2e8f9f7 1126 if (hint != DMA_USAGE_NEVER && obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1127 /* setup has already been done, so just activate the transfer */
<> 144:ef7eb2e8f9f7 1128 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1129 } else if (hint == DMA_USAGE_NEVER) {
<> 144:ef7eb2e8f9f7 1130 /* use IRQ */
<> 144:ef7eb2e8f9f7 1131 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1132 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1133 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1134 } else {
<> 144:ef7eb2e8f9f7 1135 /* try to acquire channels */
<> 144:ef7eb2e8f9f7 1136 dma_init();
<> 144:ef7eb2e8f9f7 1137 spi_enable_dma(obj, hint);
<> 144:ef7eb2e8f9f7 1138
<> 144:ef7eb2e8f9f7 1139 /* decide between DMA and IRQ */
<> 144:ef7eb2e8f9f7 1140 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1141 /* disable the interrupts that may have been left open previously */
<> 144:ef7eb2e8f9f7 1142 spi_enable_interrupt(obj, (uint32_t)cb, false);
<> 144:ef7eb2e8f9f7 1143
<> 144:ef7eb2e8f9f7 1144 /* DMA channels are allocated, so do their setup */
<> 144:ef7eb2e8f9f7 1145 spi_master_dma_channel_setup(obj, cb);
<> 144:ef7eb2e8f9f7 1146 /* and activate the transfer */
<> 144:ef7eb2e8f9f7 1147 spi_activate_dma(obj, rxdata, txdata, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1148 } else {
<> 144:ef7eb2e8f9f7 1149 /* DMA is unavailable, so fall back to IRQ */
<> 144:ef7eb2e8f9f7 1150 obj->spi.spi->IFC = 0xFFFFFFFF;
<> 144:ef7eb2e8f9f7 1151 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1152 spi_enable_interrupt(obj, (uint32_t)cb, true);
<> 144:ef7eb2e8f9f7 1153 }
<> 144:ef7eb2e8f9f7 1154 }
<> 144:ef7eb2e8f9f7 1155 }
<> 144:ef7eb2e8f9f7 1156
<> 144:ef7eb2e8f9f7 1157 /** Begin the SPI transfer. Buffer pointers and lengths are specified in tx_buff and rx_buff
<> 144:ef7eb2e8f9f7 1158 *
<> 144:ef7eb2e8f9f7 1159 * @param[in] obj The SPI object which holds the transfer information
<> 144:ef7eb2e8f9f7 1160 * @param[in] tx The buffer to send
<> 144:ef7eb2e8f9f7 1161 * @param[in] tx_length The number of words to transmit
<> 144:ef7eb2e8f9f7 1162 * @param[in] rx The buffer to receive
<> 144:ef7eb2e8f9f7 1163 * @param[in] rx_length The number of words to receive
<> 144:ef7eb2e8f9f7 1164 * @param[in] bit_width The bit width of buffer words
<> 144:ef7eb2e8f9f7 1165 * @param[in] event The logical OR of events to be registered
<> 144:ef7eb2e8f9f7 1166 * @param[in] handler SPI interrupt handler
<> 144:ef7eb2e8f9f7 1167 * @param[in] hint A suggestion for how to use DMA with this transfer
<> 144:ef7eb2e8f9f7 1168 */
<> 144:ef7eb2e8f9f7 1169 void spi_master_transfer(spi_t *obj, const void *tx, size_t tx_length, void *rx, size_t rx_length, uint8_t bit_width, uint32_t handler, uint32_t event, DMAUsage hint)
<> 144:ef7eb2e8f9f7 1170 {
<> 144:ef7eb2e8f9f7 1171 if( spi_active(obj) ) return;
<> 144:ef7eb2e8f9f7 1172
<> 144:ef7eb2e8f9f7 1173 /* update fill word if on 9-bit frame size */
<> 144:ef7eb2e8f9f7 1174 if(obj->spi.bits == 9) fill_word = SPI_FILL_WORD & 0x1FF;
<> 144:ef7eb2e8f9f7 1175 else fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1176
<> 144:ef7eb2e8f9f7 1177 /* check corner case */
<> 144:ef7eb2e8f9f7 1178 if(tx_length == 0) {
<> 144:ef7eb2e8f9f7 1179 tx_length = rx_length;
<> 144:ef7eb2e8f9f7 1180 tx = (void*) 0;
<> 144:ef7eb2e8f9f7 1181 }
<> 144:ef7eb2e8f9f7 1182
<> 144:ef7eb2e8f9f7 1183 /* First, set the buffer */
<> 144:ef7eb2e8f9f7 1184 spi_buffer_set(obj, tx, tx_length, rx, rx_length, bit_width);
<> 144:ef7eb2e8f9f7 1185
<> 144:ef7eb2e8f9f7 1186 /* Then, enable the events */
<> 144:ef7eb2e8f9f7 1187 spi_enable_event(obj, SPI_EVENT_ALL, false);
<> 144:ef7eb2e8f9f7 1188 spi_enable_event(obj, event, true);
<> 144:ef7eb2e8f9f7 1189
<> 144:ef7eb2e8f9f7 1190 // Set the sleep mode
<> 144:ef7eb2e8f9f7 1191 blockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1192
<> 144:ef7eb2e8f9f7 1193 /* And kick off the transfer */
<> 144:ef7eb2e8f9f7 1194 spi_master_transfer_dma(obj, tx, rx, tx_length, rx_length, (void*)handler, hint);
<> 144:ef7eb2e8f9f7 1195 }
<> 144:ef7eb2e8f9f7 1196
<> 144:ef7eb2e8f9f7 1197
<> 144:ef7eb2e8f9f7 1198 /********************************************************************
<> 144:ef7eb2e8f9f7 1199 * uint32_t spi_irq_handler_generic(spi_t* obj)
<> 144:ef7eb2e8f9f7 1200 *
<> 144:ef7eb2e8f9f7 1201 * handler which should get called by CPP-land when either a DMA or SPI IRQ gets fired for a SPI transaction.
<> 144:ef7eb2e8f9f7 1202 *
<> 144:ef7eb2e8f9f7 1203 * * obj: pointer to the specific SPI instance
<> 144:ef7eb2e8f9f7 1204 *
<> 144:ef7eb2e8f9f7 1205 * return: event mask. Currently only 0 or SPI_EVENT_COMPLETE upon transfer completion.
<> 144:ef7eb2e8f9f7 1206 *
<> 144:ef7eb2e8f9f7 1207 ********************************************************************/
<> 144:ef7eb2e8f9f7 1208 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1209 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1210 {
<> 144:ef7eb2e8f9f7 1211 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1212 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1213 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1214 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1215 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1216 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1217 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1218
<> 144:ef7eb2e8f9f7 1219 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1220 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1221
<> 144:ef7eb2e8f9f7 1222 return 0;
<> 144:ef7eb2e8f9f7 1223 }
<> 144:ef7eb2e8f9f7 1224 /* If there is an RX transfer ongoing, wait for it to finish */
<> 144:ef7eb2e8f9f7 1225 if (LDMAx_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1226 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1227 if (LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel) && (obj->tx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1228 void* tx_pointer = (char*)obj->tx_buff.buffer + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1229 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1230 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1231 spi_activate_dma(obj, obj->rx_buff.buffer, tx_pointer, tx_length, obj->rx_buff.length);
<> 144:ef7eb2e8f9f7 1232 } else return 0;
<> 144:ef7eb2e8f9f7 1233 }
<> 144:ef7eb2e8f9f7 1234 /* If there is still a TX transfer ongoing (tx_length > rx_length), wait for it to finish */
<> 144:ef7eb2e8f9f7 1235 if (!LDMA_TransferDone(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1236 return 0;
<> 144:ef7eb2e8f9f7 1237 }
<> 144:ef7eb2e8f9f7 1238 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1239 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1240 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1241 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1242 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1243 }
<> 144:ef7eb2e8f9f7 1244
<> 144:ef7eb2e8f9f7 1245 /* Wait transmit to complete, before user code is indicated*/
<> 144:ef7eb2e8f9f7 1246 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1247 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1248 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1249 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1250 } else {
<> 144:ef7eb2e8f9f7 1251 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1252 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1253 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1254 }
<> 144:ef7eb2e8f9f7 1255
<> 144:ef7eb2e8f9f7 1256 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1257 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1258 }
<> 144:ef7eb2e8f9f7 1259
<> 144:ef7eb2e8f9f7 1260 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1261 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1262 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1263 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1264
<> 144:ef7eb2e8f9f7 1265 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1266 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1267 return event;
<> 144:ef7eb2e8f9f7 1268 }
<> 144:ef7eb2e8f9f7 1269
<> 144:ef7eb2e8f9f7 1270 return 0;
<> 144:ef7eb2e8f9f7 1271 }
<> 144:ef7eb2e8f9f7 1272 }
<> 144:ef7eb2e8f9f7 1273 #else
<> 144:ef7eb2e8f9f7 1274 uint32_t spi_irq_handler_asynch(spi_t* obj)
<> 144:ef7eb2e8f9f7 1275 {
<> 144:ef7eb2e8f9f7 1276
<> 144:ef7eb2e8f9f7 1277 /* Determine whether the current scenario is DMA or IRQ, and act accordingly */
<> 144:ef7eb2e8f9f7 1278
<> 144:ef7eb2e8f9f7 1279 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1280 /* DMA implementation */
<> 144:ef7eb2e8f9f7 1281
<> 144:ef7eb2e8f9f7 1282 /* If there is still data in the TX buffer, setup a new transfer. */
<> 144:ef7eb2e8f9f7 1283 if (obj->tx_buff.pos < obj->tx_buff.length) {
<> 144:ef7eb2e8f9f7 1284 /* If there is still a TX transfer ongoing, let it finish
<> 144:ef7eb2e8f9f7 1285 * before (if necessary) kicking off a new transfer */
<> 144:ef7eb2e8f9f7 1286 if (DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1287 return 0;
<> 144:ef7eb2e8f9f7 1288 }
<> 144:ef7eb2e8f9f7 1289 /* Find position and remaining length without modifying tx_buff. */
<> 144:ef7eb2e8f9f7 1290 void * tx_pointer;
<> 144:ef7eb2e8f9f7 1291 if (obj->tx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1292 tx_pointer = ((uint32_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1293 } else if (obj->tx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1294 tx_pointer = ((uint16_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1295 } else {
<> 144:ef7eb2e8f9f7 1296 tx_pointer = ((uint8_t *)obj->tx_buff.buffer) + obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1297 }
<> 144:ef7eb2e8f9f7 1298 uint32_t tx_length = obj->tx_buff.length - obj->tx_buff.pos;
<> 144:ef7eb2e8f9f7 1299
<> 144:ef7eb2e8f9f7 1300 /* Refresh RX transfer too if it exists */
<> 144:ef7eb2e8f9f7 1301 void * rx_pointer = NULL;
<> 144:ef7eb2e8f9f7 1302 if (obj->rx_buff.pos < obj->rx_buff.length) {
<> 144:ef7eb2e8f9f7 1303 if (obj->rx_buff.width == 32) {
<> 144:ef7eb2e8f9f7 1304 rx_pointer = ((uint32_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1305 } else if (obj->rx_buff.width == 16) {
<> 144:ef7eb2e8f9f7 1306 rx_pointer = ((uint16_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1307 } else {
<> 144:ef7eb2e8f9f7 1308 rx_pointer = ((uint8_t *)obj->rx_buff.buffer) + obj->rx_buff.pos;
AnnaBridge 179:b0033dcd6934 1309 }
<> 144:ef7eb2e8f9f7 1310 }
<> 144:ef7eb2e8f9f7 1311 uint32_t rx_length = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1312
<> 144:ef7eb2e8f9f7 1313 /* Wait for the previous transfer to complete. */
<> 144:ef7eb2e8f9f7 1314 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1315
<> 144:ef7eb2e8f9f7 1316 /* Begin transfer. Rely on spi_activate_dma to split up the transfer further. */
<> 144:ef7eb2e8f9f7 1317 spi_activate_dma(obj, rx_pointer, tx_pointer, tx_length, rx_length);
<> 144:ef7eb2e8f9f7 1318
<> 144:ef7eb2e8f9f7 1319 return 0;
<> 144:ef7eb2e8f9f7 1320 }
<> 144:ef7eb2e8f9f7 1321
<> 144:ef7eb2e8f9f7 1322 /* If an RX transfer is ongoing, continue processing RX data */
<> 144:ef7eb2e8f9f7 1323 if (DMA_ChannelEnabled(obj->spi.dmaOptionsRX.dmaChannel)) {
<> 144:ef7eb2e8f9f7 1324 /* Check if we need to kick off TX transfer again to force more incoming data. */
<> 144:ef7eb2e8f9f7 1325 if (!DMA_ChannelEnabled(obj->spi.dmaOptionsTX.dmaChannel) && (obj->rx_buff.pos < obj->rx_buff.length)) {
<> 144:ef7eb2e8f9f7 1326 //Save state of TX transfer amount
<> 144:ef7eb2e8f9f7 1327 int length_diff = obj->rx_buff.length - obj->rx_buff.pos;
<> 144:ef7eb2e8f9f7 1328 obj->tx_buff.pos = obj->rx_buff.length;
<> 144:ef7eb2e8f9f7 1329
<> 144:ef7eb2e8f9f7 1330 //Kick off a new DMA transfer
<> 144:ef7eb2e8f9f7 1331 DMA_CfgDescr_TypeDef txDescrCfg;
<> 144:ef7eb2e8f9f7 1332
<> 144:ef7eb2e8f9f7 1333 fill_word = SPI_FILL_WORD;
<> 144:ef7eb2e8f9f7 1334 /* Setting up channel descriptor */
<> 144:ef7eb2e8f9f7 1335 txDescrCfg.dstInc = dmaDataIncNone;
<> 144:ef7eb2e8f9f7 1336 txDescrCfg.srcInc = dmaDataIncNone; //Do not increment source pointer when there is no transmit buffer
<> 144:ef7eb2e8f9f7 1337 txDescrCfg.size = (obj->spi.bits <= 8 ? dmaDataSize1 : dmaDataSize2); //When frame size > 9, we can use TXDOUBLE to save bandwidth
<> 144:ef7eb2e8f9f7 1338 txDescrCfg.arbRate = dmaArbitrate1;
<> 144:ef7eb2e8f9f7 1339 txDescrCfg.hprot = 0;
<> 144:ef7eb2e8f9f7 1340 DMA_CfgDescr(obj->spi.dmaOptionsTX.dmaChannel, true, &txDescrCfg);
<> 144:ef7eb2e8f9f7 1341
<> 144:ef7eb2e8f9f7 1342 void * tx_reg;
<> 144:ef7eb2e8f9f7 1343 if (obj->spi.bits > 9) {
<> 144:ef7eb2e8f9f7 1344 tx_reg = (void *)&obj->spi.spi->TXDOUBLE;
<> 144:ef7eb2e8f9f7 1345 } else if (obj->spi.bits == 9) {
<> 144:ef7eb2e8f9f7 1346 tx_reg = (void *)&obj->spi.spi->TXDATAX;
<> 144:ef7eb2e8f9f7 1347 } else {
<> 144:ef7eb2e8f9f7 1348 tx_reg = (void *)&obj->spi.spi->TXDATA;
<> 144:ef7eb2e8f9f7 1349 }
<> 144:ef7eb2e8f9f7 1350
<> 144:ef7eb2e8f9f7 1351 /* Activate TX channel */
<> 144:ef7eb2e8f9f7 1352 DMA_ActivateBasic(obj->spi.dmaOptionsTX.dmaChannel,
<> 144:ef7eb2e8f9f7 1353 true,
<> 144:ef7eb2e8f9f7 1354 false,
<> 144:ef7eb2e8f9f7 1355 tx_reg, //When frame size > 9, point to TXDOUBLE
<> 144:ef7eb2e8f9f7 1356 &fill_word, // When there is nothing to transmit, point to static fill word
<> 144:ef7eb2e8f9f7 1357 length_diff - 1);
<> 144:ef7eb2e8f9f7 1358 } else {
<> 144:ef7eb2e8f9f7 1359 /* Nothing to do */
<> 144:ef7eb2e8f9f7 1360 return 0;
<> 144:ef7eb2e8f9f7 1361 }
<> 144:ef7eb2e8f9f7 1362 }
<> 144:ef7eb2e8f9f7 1363
<> 144:ef7eb2e8f9f7 1364 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1365 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1366 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1367 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1368 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1369 }
<> 144:ef7eb2e8f9f7 1370
<> 144:ef7eb2e8f9f7 1371 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1372 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1373 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1374
<> 144:ef7eb2e8f9f7 1375 /* return to CPP land to say we're finished */
<> 144:ef7eb2e8f9f7 1376 return SPI_EVENT_COMPLETE;
<> 144:ef7eb2e8f9f7 1377 } else {
<> 144:ef7eb2e8f9f7 1378 /* IRQ implementation */
<> 144:ef7eb2e8f9f7 1379 if (spi_master_rx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1380 spi_master_read_asynch(obj);
<> 144:ef7eb2e8f9f7 1381 }
<> 144:ef7eb2e8f9f7 1382
<> 144:ef7eb2e8f9f7 1383 if (spi_master_tx_int_flag(obj)) {
<> 144:ef7eb2e8f9f7 1384 spi_master_write_asynch(obj);
<> 144:ef7eb2e8f9f7 1385 }
<> 144:ef7eb2e8f9f7 1386
<> 144:ef7eb2e8f9f7 1387 uint32_t event = spi_event_check(obj);
<> 144:ef7eb2e8f9f7 1388 if (event & SPI_EVENT_INTERNAL_TRANSFER_COMPLETE) {
<> 144:ef7eb2e8f9f7 1389 /* disable interrupts */
<> 144:ef7eb2e8f9f7 1390 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1391
<> 144:ef7eb2e8f9f7 1392 /* Wait for transmit to complete, before user code is indicated */
<> 144:ef7eb2e8f9f7 1393 while(!(obj->spi.spi->STATUS & USART_STATUS_TXC));
<> 144:ef7eb2e8f9f7 1394 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1395
<> 144:ef7eb2e8f9f7 1396 /* Return the event back to userland */
<> 144:ef7eb2e8f9f7 1397 return event;
<> 144:ef7eb2e8f9f7 1398 }
<> 144:ef7eb2e8f9f7 1399
<> 144:ef7eb2e8f9f7 1400 return 0;
<> 144:ef7eb2e8f9f7 1401 }
<> 144:ef7eb2e8f9f7 1402 }
<> 144:ef7eb2e8f9f7 1403 #endif // LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1404 /** Abort an SPI transfer
<> 144:ef7eb2e8f9f7 1405 *
<> 144:ef7eb2e8f9f7 1406 * @param obj The SPI peripheral to stop
<> 144:ef7eb2e8f9f7 1407 */
<> 144:ef7eb2e8f9f7 1408 void spi_abort_asynch(spi_t *obj)
<> 144:ef7eb2e8f9f7 1409 {
<> 144:ef7eb2e8f9f7 1410 // If we're not currently transferring, then there's nothing to do here
<> 144:ef7eb2e8f9f7 1411 if(spi_active(obj) != 0) return;
<> 144:ef7eb2e8f9f7 1412
<> 144:ef7eb2e8f9f7 1413 // Determine whether we're running DMA or interrupt
<> 144:ef7eb2e8f9f7 1414 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_ALLOCATED || obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1415 // Cancel the DMA transfers
<> 144:ef7eb2e8f9f7 1416 #ifdef LDMA_PRESENT
<> 144:ef7eb2e8f9f7 1417 LDMA_StopTransfer(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1418 LDMA_StopTransfer(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1419 #else
<> 144:ef7eb2e8f9f7 1420 DMA_ChannelEnable(obj->spi.dmaOptionsTX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1421 DMA_ChannelEnable(obj->spi.dmaOptionsRX.dmaChannel, false);
<> 144:ef7eb2e8f9f7 1422 #endif
<> 144:ef7eb2e8f9f7 1423 /* Release the dma channels if they were opportunistically allocated */
<> 144:ef7eb2e8f9f7 1424 if (obj->spi.dmaOptionsTX.dmaUsageState == DMA_USAGE_TEMPORARY_ALLOCATED) {
<> 144:ef7eb2e8f9f7 1425 dma_channel_free(obj->spi.dmaOptionsTX.dmaChannel);
<> 144:ef7eb2e8f9f7 1426 dma_channel_free(obj->spi.dmaOptionsRX.dmaChannel);
<> 144:ef7eb2e8f9f7 1427 obj->spi.dmaOptionsTX.dmaUsageState = DMA_USAGE_OPPORTUNISTIC;
<> 144:ef7eb2e8f9f7 1428 }
<> 144:ef7eb2e8f9f7 1429
<> 144:ef7eb2e8f9f7 1430 } else {
<> 144:ef7eb2e8f9f7 1431 // Interrupt implementation: switch off interrupts
<> 144:ef7eb2e8f9f7 1432 spi_enable_interrupt(obj, (uint32_t)NULL, false);
<> 144:ef7eb2e8f9f7 1433 }
<> 144:ef7eb2e8f9f7 1434
<> 144:ef7eb2e8f9f7 1435 // Release sleep mode block
<> 144:ef7eb2e8f9f7 1436 unblockSleepMode(SPI_LEAST_ACTIVE_SLEEPMODE);
<> 144:ef7eb2e8f9f7 1437 }
<> 144:ef7eb2e8f9f7 1438
<> 144:ef7eb2e8f9f7 1439 #endif