Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependencies: mbed FastIO FastPWM USBDevice
Plunger/tsl14xxSensor.h@82:4f6209cb5c33, 2017-04-13 (annotated)
- Committer:
- mjr
- Date:
- Thu Apr 13 23:20:28 2017 +0000
- Revision:
- 82:4f6209cb5c33
- Child:
- 86:e30a1f60f783
Plunger refactoring; AEDR-8300 added; TSL1401CL in progress; VL6180X added
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
mjr | 82:4f6209cb5c33 | 1 | // Base class for TSL14xx-based plunger sensors. |
mjr | 82:4f6209cb5c33 | 2 | // |
mjr | 82:4f6209cb5c33 | 3 | // This provides a common base class for plunger sensors based on |
mjr | 82:4f6209cb5c33 | 4 | // AMS/TAOS TSL14xx sensors (TSL1410R, TSL1412S, TSL1401CL). The sensors |
mjr | 82:4f6209cb5c33 | 5 | // in this series all work the same way, differing mostly in the number |
mjr | 82:4f6209cb5c33 | 6 | // of pixels. However, we have two fundamentally different ways of using |
mjr | 82:4f6209cb5c33 | 7 | // these image sensors to detect position: sensing the position of the |
mjr | 82:4f6209cb5c33 | 8 | // shadow cast by the plunger on the sensor, and optically reading a bar |
mjr | 82:4f6209cb5c33 | 9 | // code telling us the location of the sensor along a scale. This class |
mjr | 82:4f6209cb5c33 | 10 | // provides the low-level pixel-sensor interface; subclasses provide the |
mjr | 82:4f6209cb5c33 | 11 | // image analysis that figures the position from the captured image. |
mjr | 82:4f6209cb5c33 | 12 | |
mjr | 82:4f6209cb5c33 | 13 | |
mjr | 82:4f6209cb5c33 | 14 | #ifndef _TSL14XXSENSOR_H_ |
mjr | 82:4f6209cb5c33 | 15 | #define _TSL14XXSENSOR_H_ |
mjr | 82:4f6209cb5c33 | 16 | |
mjr | 82:4f6209cb5c33 | 17 | #include "plunger.h" |
mjr | 82:4f6209cb5c33 | 18 | #include "TSL14xx.h" |
mjr | 82:4f6209cb5c33 | 19 | |
mjr | 82:4f6209cb5c33 | 20 | class PlungerSensorTSL14xx: public PlungerSensor |
mjr | 82:4f6209cb5c33 | 21 | { |
mjr | 82:4f6209cb5c33 | 22 | public: |
mjr | 82:4f6209cb5c33 | 23 | PlungerSensorTSL14xx(int nativePix, PinName si, PinName clock, PinName ao) |
mjr | 82:4f6209cb5c33 | 24 | : sensor(nativePix, si, clock, ao) |
mjr | 82:4f6209cb5c33 | 25 | { |
mjr | 82:4f6209cb5c33 | 26 | // Figure the scaling factor for converting native pixel readings |
mjr | 82:4f6209cb5c33 | 27 | // to our normalized 0..65535 range. The effective calculation we |
mjr | 82:4f6209cb5c33 | 28 | // need to perform is (reading*65535)/(npix-1). Division is slow |
mjr | 82:4f6209cb5c33 | 29 | // on the M0+, and floating point is dreadfully slow, so recast the |
mjr | 82:4f6209cb5c33 | 30 | // per-reading calculation as a multiply (which, unlike DIV, is fast |
mjr | 82:4f6209cb5c33 | 31 | // on KL25Z - the device has a single-cycle 32-bit hardware multiply). |
mjr | 82:4f6209cb5c33 | 32 | // How do we turn a divide into a multiply? By calculating the |
mjr | 82:4f6209cb5c33 | 33 | // inverse! How do we calculate a meaningful inverse of a large |
mjr | 82:4f6209cb5c33 | 34 | // integer using integers? By doing our calculations in fixed-point |
mjr | 82:4f6209cb5c33 | 35 | // integers, which is to say, using hardware integers but treating |
mjr | 82:4f6209cb5c33 | 36 | // all values as multiplied by a scaling factor. We'll use 64K as |
mjr | 82:4f6209cb5c33 | 37 | // the scaling factor, since we can divide the scaling factor back |
mjr | 82:4f6209cb5c33 | 38 | // out by using an arithmetic shift (also fast on M0+). |
mjr | 82:4f6209cb5c33 | 39 | native_npix = nativePix; |
mjr | 82:4f6209cb5c33 | 40 | scaling_factor = (65535U*65536U) / (nativePix - 1); |
mjr | 82:4f6209cb5c33 | 41 | |
mjr | 82:4f6209cb5c33 | 42 | // start with no additional integration time for automatic |
mjr | 82:4f6209cb5c33 | 43 | // exposure control |
mjr | 82:4f6209cb5c33 | 44 | axcTime = 0; |
mjr | 82:4f6209cb5c33 | 45 | } |
mjr | 82:4f6209cb5c33 | 46 | |
mjr | 82:4f6209cb5c33 | 47 | // is the sensor ready? |
mjr | 82:4f6209cb5c33 | 48 | virtual bool ready() { return sensor.ready(); } |
mjr | 82:4f6209cb5c33 | 49 | |
mjr | 82:4f6209cb5c33 | 50 | // read the plunger position |
mjr | 82:4f6209cb5c33 | 51 | virtual bool read(PlungerReading &r) |
mjr | 82:4f6209cb5c33 | 52 | { |
mjr | 82:4f6209cb5c33 | 53 | // start reading the next pixel array - this also waits for any |
mjr | 82:4f6209cb5c33 | 54 | // previous read to finish, ensuring that we have stable pixel |
mjr | 82:4f6209cb5c33 | 55 | // data in the capture buffer |
mjr | 82:4f6209cb5c33 | 56 | sensor.startCapture(axcTime); |
mjr | 82:4f6209cb5c33 | 57 | |
mjr | 82:4f6209cb5c33 | 58 | // get the image array from the last capture |
mjr | 82:4f6209cb5c33 | 59 | uint8_t *pix; |
mjr | 82:4f6209cb5c33 | 60 | uint32_t tpix; |
mjr | 82:4f6209cb5c33 | 61 | sensor.getPix(pix, tpix); |
mjr | 82:4f6209cb5c33 | 62 | |
mjr | 82:4f6209cb5c33 | 63 | // process the pixels |
mjr | 82:4f6209cb5c33 | 64 | int pixpos; |
mjr | 82:4f6209cb5c33 | 65 | if (process(pix, native_npix, pixpos)) |
mjr | 82:4f6209cb5c33 | 66 | { |
mjr | 82:4f6209cb5c33 | 67 | // Normalize to the 16-bit range by applying the scaling |
mjr | 82:4f6209cb5c33 | 68 | // factor. The scaling factor is 65535/npix expressed as |
mjr | 82:4f6209cb5c33 | 69 | // a fixed-point number with 64K scale, so multiplying the |
mjr | 82:4f6209cb5c33 | 70 | // pixel reading by this will give us the result with 64K |
mjr | 82:4f6209cb5c33 | 71 | // scale: so shift right 16 bits to get the final answer. |
mjr | 82:4f6209cb5c33 | 72 | // (The +32768 is added for rounding: it's equal to 0.5 |
mjr | 82:4f6209cb5c33 | 73 | // at our 64K scale.) |
mjr | 82:4f6209cb5c33 | 74 | r.pos = uint16_t((scaling_factor*uint32_t(pixpos) + 32768) >> 16); |
mjr | 82:4f6209cb5c33 | 75 | r.t = tpix; |
mjr | 82:4f6209cb5c33 | 76 | |
mjr | 82:4f6209cb5c33 | 77 | // success |
mjr | 82:4f6209cb5c33 | 78 | return true; |
mjr | 82:4f6209cb5c33 | 79 | } |
mjr | 82:4f6209cb5c33 | 80 | else |
mjr | 82:4f6209cb5c33 | 81 | { |
mjr | 82:4f6209cb5c33 | 82 | // no position found |
mjr | 82:4f6209cb5c33 | 83 | return false; |
mjr | 82:4f6209cb5c33 | 84 | } |
mjr | 82:4f6209cb5c33 | 85 | } |
mjr | 82:4f6209cb5c33 | 86 | |
mjr | 82:4f6209cb5c33 | 87 | virtual void init() |
mjr | 82:4f6209cb5c33 | 88 | { |
mjr | 82:4f6209cb5c33 | 89 | sensor.clear(); |
mjr | 82:4f6209cb5c33 | 90 | } |
mjr | 82:4f6209cb5c33 | 91 | |
mjr | 82:4f6209cb5c33 | 92 | // Send a status report to the joystick interface. |
mjr | 82:4f6209cb5c33 | 93 | // See plunger.h for details on the arguments. |
mjr | 82:4f6209cb5c33 | 94 | virtual void sendStatusReport(USBJoystick &js, uint8_t flags, uint8_t extraTime) |
mjr | 82:4f6209cb5c33 | 95 | { |
mjr | 82:4f6209cb5c33 | 96 | // To get the requested timing for the cycle we report, we need to run |
mjr | 82:4f6209cb5c33 | 97 | // an extra cycle. Right now, the sensor is integrating from whenever |
mjr | 82:4f6209cb5c33 | 98 | // the last start() call was made. |
mjr | 82:4f6209cb5c33 | 99 | // |
mjr | 82:4f6209cb5c33 | 100 | // 1. Call startCapture() to end that previous cycle. This will collect |
mjr | 82:4f6209cb5c33 | 101 | // dits pixels into one DMA buffer (call it EVEN), and start a new |
mjr | 82:4f6209cb5c33 | 102 | // integration cycle. |
mjr | 82:4f6209cb5c33 | 103 | // |
mjr | 82:4f6209cb5c33 | 104 | // 2. We know a new integration has just started, so we can control its |
mjr | 82:4f6209cb5c33 | 105 | // time. Wait for the cycle we just started to finish, since that sets |
mjr | 82:4f6209cb5c33 | 106 | // the minimum time. |
mjr | 82:4f6209cb5c33 | 107 | // |
mjr | 82:4f6209cb5c33 | 108 | // 3. The integration cycle we started in step 1 has now been running the |
mjr | 82:4f6209cb5c33 | 109 | // minimum time - namely, one read cycle. Pause for our extraTime delay |
mjr | 82:4f6209cb5c33 | 110 | // to add the requested added time. |
mjr | 82:4f6209cb5c33 | 111 | // |
mjr | 82:4f6209cb5c33 | 112 | // 4. Start the next cycle. This will make the pixels we started reading |
mjr | 82:4f6209cb5c33 | 113 | // in step 1 available via getPix(), and will end the integration cycle |
mjr | 82:4f6209cb5c33 | 114 | // we started in step 1 and start reading its pixels into the internal |
mjr | 82:4f6209cb5c33 | 115 | // DMA buffer. |
mjr | 82:4f6209cb5c33 | 116 | // |
mjr | 82:4f6209cb5c33 | 117 | // 5. This is where it gets tricky! The pixels we want are the ones that |
mjr | 82:4f6209cb5c33 | 118 | // started integrating in step 1, which are the ones we're reading via DMA |
mjr | 82:4f6209cb5c33 | 119 | // now. The pixels available via getPix() are the ones from the cycle we |
mjr | 82:4f6209cb5c33 | 120 | // *ended* in step 1 - we don't want these. So we need to start a *third* |
mjr | 82:4f6209cb5c33 | 121 | // cycle in order to get the pixels from the second cycle. |
mjr | 82:4f6209cb5c33 | 122 | |
mjr | 82:4f6209cb5c33 | 123 | sensor.startCapture(axcTime); // transfer pixels from period A, begin integration period B |
mjr | 82:4f6209cb5c33 | 124 | sensor.wait(); // wait for scan of A to complete, as minimum integration B time |
mjr | 82:4f6209cb5c33 | 125 | wait_us(long(extraTime) * 100); // add extraTime (0.1ms == 100us increments) to integration B time |
mjr | 82:4f6209cb5c33 | 126 | sensor.startCapture(axcTime); // transfer pixels from integration period B, begin period C; period A pixels now available |
mjr | 82:4f6209cb5c33 | 127 | sensor.startCapture(axcTime); // trnasfer pixels from integration period C, begin period D; period B pixels now available |
mjr | 82:4f6209cb5c33 | 128 | |
mjr | 82:4f6209cb5c33 | 129 | // get the pixel array |
mjr | 82:4f6209cb5c33 | 130 | uint8_t *pix; |
mjr | 82:4f6209cb5c33 | 131 | uint32_t t; |
mjr | 82:4f6209cb5c33 | 132 | sensor.getPix(pix, t); |
mjr | 82:4f6209cb5c33 | 133 | |
mjr | 82:4f6209cb5c33 | 134 | // start a timer to measure the processing time |
mjr | 82:4f6209cb5c33 | 135 | Timer pt; |
mjr | 82:4f6209cb5c33 | 136 | pt.start(); |
mjr | 82:4f6209cb5c33 | 137 | |
mjr | 82:4f6209cb5c33 | 138 | // process the pixels and read the position |
mjr | 82:4f6209cb5c33 | 139 | int pos; |
mjr | 82:4f6209cb5c33 | 140 | int n = native_npix; |
mjr | 82:4f6209cb5c33 | 141 | if (!process(pix, n, pos)) |
mjr | 82:4f6209cb5c33 | 142 | pos = 0xFFFF; |
mjr | 82:4f6209cb5c33 | 143 | |
mjr | 82:4f6209cb5c33 | 144 | // note the processing time |
mjr | 82:4f6209cb5c33 | 145 | uint32_t processTime = pt.read_us(); |
mjr | 82:4f6209cb5c33 | 146 | |
mjr | 82:4f6209cb5c33 | 147 | // if a low-res scan is desired, reduce to a subset of pixels |
mjr | 82:4f6209cb5c33 | 148 | if (flags & 0x01) |
mjr | 82:4f6209cb5c33 | 149 | { |
mjr | 82:4f6209cb5c33 | 150 | // figure how many sensor pixels we combine into each low-res pixel |
mjr | 82:4f6209cb5c33 | 151 | const int group = 8; |
mjr | 82:4f6209cb5c33 | 152 | int lowResPix = n / group; |
mjr | 82:4f6209cb5c33 | 153 | |
mjr | 82:4f6209cb5c33 | 154 | // combine the pixels |
mjr | 82:4f6209cb5c33 | 155 | int src, dst; |
mjr | 82:4f6209cb5c33 | 156 | for (src = dst = 0 ; dst < lowResPix ; ++dst) |
mjr | 82:4f6209cb5c33 | 157 | { |
mjr | 82:4f6209cb5c33 | 158 | // average this block of pixels |
mjr | 82:4f6209cb5c33 | 159 | int a = 0; |
mjr | 82:4f6209cb5c33 | 160 | for (int j = 0 ; j < group ; ++j) |
mjr | 82:4f6209cb5c33 | 161 | a += pix[src++]; |
mjr | 82:4f6209cb5c33 | 162 | |
mjr | 82:4f6209cb5c33 | 163 | // we have the sum, so get the average |
mjr | 82:4f6209cb5c33 | 164 | a /= group; |
mjr | 82:4f6209cb5c33 | 165 | |
mjr | 82:4f6209cb5c33 | 166 | // store the down-res'd pixel in the array |
mjr | 82:4f6209cb5c33 | 167 | pix[dst] = uint8_t(a); |
mjr | 82:4f6209cb5c33 | 168 | } |
mjr | 82:4f6209cb5c33 | 169 | |
mjr | 82:4f6209cb5c33 | 170 | // rescale the position for the reduced resolution |
mjr | 82:4f6209cb5c33 | 171 | if (pos != 0xFFFF) |
mjr | 82:4f6209cb5c33 | 172 | pos = pos * (lowResPix-1) / (n-1); |
mjr | 82:4f6209cb5c33 | 173 | |
mjr | 82:4f6209cb5c33 | 174 | // update the pixel count to the reduced array size |
mjr | 82:4f6209cb5c33 | 175 | n = lowResPix; |
mjr | 82:4f6209cb5c33 | 176 | } |
mjr | 82:4f6209cb5c33 | 177 | |
mjr | 82:4f6209cb5c33 | 178 | // send the sensor status report |
mjr | 82:4f6209cb5c33 | 179 | js.sendPlungerStatus(n, pos, getOrientation(), sensor.getAvgScanTime(), processTime); |
mjr | 82:4f6209cb5c33 | 180 | |
mjr | 82:4f6209cb5c33 | 181 | // If we're not in calibration mode, send the pixels |
mjr | 82:4f6209cb5c33 | 182 | extern bool plungerCalMode; |
mjr | 82:4f6209cb5c33 | 183 | if (!plungerCalMode) |
mjr | 82:4f6209cb5c33 | 184 | { |
mjr | 82:4f6209cb5c33 | 185 | // send the pixels in report-sized chunks until we get them all |
mjr | 82:4f6209cb5c33 | 186 | int idx = 0; |
mjr | 82:4f6209cb5c33 | 187 | while (idx < n) |
mjr | 82:4f6209cb5c33 | 188 | js.sendPlungerPix(idx, n, pix); |
mjr | 82:4f6209cb5c33 | 189 | } |
mjr | 82:4f6209cb5c33 | 190 | |
mjr | 82:4f6209cb5c33 | 191 | // It takes us a while to send all of the pixels, since we have |
mjr | 82:4f6209cb5c33 | 192 | // to break them up into many USB reports. This delay means that |
mjr | 82:4f6209cb5c33 | 193 | // the sensor has been sitting there integrating for much longer |
mjr | 82:4f6209cb5c33 | 194 | // than usual, so the next frame read will be overexposed. To |
mjr | 82:4f6209cb5c33 | 195 | // mitigate this, make sure we don't have a capture running, |
mjr | 82:4f6209cb5c33 | 196 | // then clear the sensor and start a new capture. |
mjr | 82:4f6209cb5c33 | 197 | sensor.wait(); |
mjr | 82:4f6209cb5c33 | 198 | sensor.clear(); |
mjr | 82:4f6209cb5c33 | 199 | sensor.startCapture(axcTime); |
mjr | 82:4f6209cb5c33 | 200 | } |
mjr | 82:4f6209cb5c33 | 201 | |
mjr | 82:4f6209cb5c33 | 202 | // get the average sensor scan time |
mjr | 82:4f6209cb5c33 | 203 | virtual uint32_t getAvgScanTime() { return sensor.getAvgScanTime(); } |
mjr | 82:4f6209cb5c33 | 204 | |
mjr | 82:4f6209cb5c33 | 205 | protected: |
mjr | 82:4f6209cb5c33 | 206 | // Analyze the image and find the plunger position. If successful, |
mjr | 82:4f6209cb5c33 | 207 | // fills in 'pixpos' with the plunger position using the 0..65535 |
mjr | 82:4f6209cb5c33 | 208 | // scale and returns true. If no position can be detected from the |
mjr | 82:4f6209cb5c33 | 209 | // image data, returns false. |
mjr | 82:4f6209cb5c33 | 210 | virtual bool process(const uint8_t *pix, int npix, int &pixpos) = 0; |
mjr | 82:4f6209cb5c33 | 211 | |
mjr | 82:4f6209cb5c33 | 212 | // Get the currently detected sensor orientation, if applicable. |
mjr | 82:4f6209cb5c33 | 213 | // Returns 1 for standard orientation, -1 for reversed orientation, |
mjr | 82:4f6209cb5c33 | 214 | // or 0 for orientation unknown or not applicable. Edge sensors can |
mjr | 82:4f6209cb5c33 | 215 | // automatically detect orientation by observing which side of the |
mjr | 82:4f6209cb5c33 | 216 | // image is in shadow. Bar code sensors generally can't detect |
mjr | 82:4f6209cb5c33 | 217 | // orientation. |
mjr | 82:4f6209cb5c33 | 218 | virtual int getOrientation() const { return 0; } |
mjr | 82:4f6209cb5c33 | 219 | |
mjr | 82:4f6209cb5c33 | 220 | // the low-level interface to the TSL14xx sensor |
mjr | 82:4f6209cb5c33 | 221 | TSL14xx sensor; |
mjr | 82:4f6209cb5c33 | 222 | |
mjr | 82:4f6209cb5c33 | 223 | // number of pixels |
mjr | 82:4f6209cb5c33 | 224 | int native_npix; |
mjr | 82:4f6209cb5c33 | 225 | |
mjr | 82:4f6209cb5c33 | 226 | // Scaling factor for converting a native pixel reading to the normalized |
mjr | 82:4f6209cb5c33 | 227 | // 0..65535 plunger reading scale. This value contains 65535*65536/npix, |
mjr | 82:4f6209cb5c33 | 228 | // which is equivalent to 65535/npix as a fixed-point number with a 64K |
mjr | 82:4f6209cb5c33 | 229 | // scale. To apply this, multiply a pixel reading by this value and |
mjr | 82:4f6209cb5c33 | 230 | // shift right by 16 bits. |
mjr | 82:4f6209cb5c33 | 231 | uint32_t scaling_factor; |
mjr | 82:4f6209cb5c33 | 232 | |
mjr | 82:4f6209cb5c33 | 233 | // Automatic exposure control time, in microseconds. This is an amount |
mjr | 82:4f6209cb5c33 | 234 | // of time we add to each integration cycle to compensate for low light |
mjr | 82:4f6209cb5c33 | 235 | // levels. By default, this is always zero; the base class doesn't have |
mjr | 82:4f6209cb5c33 | 236 | // any logic for determining proper exposure, because that's a function |
mjr | 82:4f6209cb5c33 | 237 | // of the type of image we're looking for. Subclasses can add logic in |
mjr | 82:4f6209cb5c33 | 238 | // the process() function to check exposure level and adjust this value |
mjr | 82:4f6209cb5c33 | 239 | // if the image looks over- or under-exposed. |
mjr | 82:4f6209cb5c33 | 240 | uint32_t axcTime; |
mjr | 82:4f6209cb5c33 | 241 | }; |
mjr | 82:4f6209cb5c33 | 242 | |
mjr | 82:4f6209cb5c33 | 243 | #endif |