Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
Dependencies: mbed FastIO FastPWM USBDevice
Fork of Pinscape_Controller by
ccdSensor.h@53:9b2611964afc, 2016-04-22 (annotated)
- Committer:
- mjr
- Date:
- Fri Apr 22 17:58:35 2016 +0000
- Revision:
- 53:9b2611964afc
- Parent:
- 52:8298b2a73eb2
- Child:
- 55:4db125cd11a0
Save some debugging instrumentation to be removed for release
Who changed what in which revision?
| User | Revision | Line number | New contents of line |
|---|---|---|---|
| mjr | 17:ab3cec0c8bf4 | 1 | // CCD plunger sensor |
| mjr | 17:ab3cec0c8bf4 | 2 | // |
| mjr | 35:e959ffba78fd | 3 | // This class implements our generic plunger sensor interface for the |
| mjr | 35:e959ffba78fd | 4 | // TAOS TSL1410R and TSL1412R linear sensor arrays. Physically, these |
| mjr | 35:e959ffba78fd | 5 | // sensors are installed with their image window running parallel to |
| mjr | 35:e959ffba78fd | 6 | // the plunger rod, spanning the travel range of the plunger tip. |
| mjr | 35:e959ffba78fd | 7 | // A light source is positioned on the opposite side of the rod, so |
| mjr | 35:e959ffba78fd | 8 | // that the rod casts a shadow on the sensor. We sense the position |
| mjr | 35:e959ffba78fd | 9 | // by looking for the edge of the shadow. |
| mjr | 17:ab3cec0c8bf4 | 10 | |
| mjr | 35:e959ffba78fd | 11 | #include "plunger.h" |
| mjr | 17:ab3cec0c8bf4 | 12 | |
| mjr | 17:ab3cec0c8bf4 | 13 | |
| mjr | 25:e22b88bd783a | 14 | // PlungerSensor interface implementation for the CCD |
| mjr | 35:e959ffba78fd | 15 | class PlungerSensorCCD: public PlungerSensor |
| mjr | 17:ab3cec0c8bf4 | 16 | { |
| mjr | 17:ab3cec0c8bf4 | 17 | public: |
| mjr | 47:df7a88cd249c | 18 | PlungerSensorCCD(int nativePix, PinName si, PinName clock, PinName ao1, PinName ao2) |
| mjr | 43:7a6364d82a41 | 19 | : ccd(nativePix, si, clock, ao1, ao2) |
| mjr | 17:ab3cec0c8bf4 | 20 | { |
| mjr | 47:df7a88cd249c | 21 | // we don't know the direction yet |
| mjr | 47:df7a88cd249c | 22 | dir = 0; |
| mjr | 47:df7a88cd249c | 23 | |
| mjr | 48:058ace2aed1d | 24 | // set the midpoint history arbitrarily to the absolute halfway point |
| mjr | 48:058ace2aed1d | 25 | memset(midpt, 127, sizeof(midpt)); |
| mjr | 48:058ace2aed1d | 26 | midptIdx = 0; |
| mjr | 48:058ace2aed1d | 27 | |
| mjr | 51:57eb311faafa | 28 | // no history readings yet |
| mjr | 51:57eb311faafa | 29 | histIdx = 0; |
| mjr | 17:ab3cec0c8bf4 | 30 | } |
| mjr | 17:ab3cec0c8bf4 | 31 | |
| mjr | 17:ab3cec0c8bf4 | 32 | // initialize |
| mjr | 35:e959ffba78fd | 33 | virtual void init() |
| mjr | 17:ab3cec0c8bf4 | 34 | { |
| mjr | 17:ab3cec0c8bf4 | 35 | // flush any random power-on values from the CCD's integration |
| mjr | 17:ab3cec0c8bf4 | 36 | // capacitors, and start the first integration cycle |
| mjr | 17:ab3cec0c8bf4 | 37 | ccd.clear(); |
| mjr | 17:ab3cec0c8bf4 | 38 | } |
| mjr | 17:ab3cec0c8bf4 | 39 | |
| mjr | 48:058ace2aed1d | 40 | // Read the plunger position |
| mjr | 48:058ace2aed1d | 41 | virtual bool read(PlungerReading &r) |
| mjr | 17:ab3cec0c8bf4 | 42 | { |
| mjr | 48:058ace2aed1d | 43 | // start reading the next pixel array - this also waits for any |
| mjr | 48:058ace2aed1d | 44 | // previous read to finish, ensuring that we have stable pixel |
| mjr | 48:058ace2aed1d | 45 | // data in the capture buffer |
| mjr | 47:df7a88cd249c | 46 | ccd.startCapture(); |
| mjr | 44:b5ac89b9cd5d | 47 | |
| mjr | 48:058ace2aed1d | 48 | // get the image array from the last capture |
| mjr | 47:df7a88cd249c | 49 | uint8_t *pix; |
| mjr | 47:df7a88cd249c | 50 | int n; |
| mjr | 48:058ace2aed1d | 51 | uint32_t tpix; |
| mjr | 48:058ace2aed1d | 52 | ccd.getPix(pix, n, tpix); |
| mjr | 17:ab3cec0c8bf4 | 53 | |
| mjr | 48:058ace2aed1d | 54 | // process the pixels and look for the edge position |
| mjr | 48:058ace2aed1d | 55 | int pixpos; |
| mjr | 53:9b2611964afc | 56 | if (process(pix, n, pixpos)) |
| mjr | 51:57eb311faafa | 57 | { |
| mjr | 52:8298b2a73eb2 | 58 | // run the position through the anti-jitter filter |
| mjr | 52:8298b2a73eb2 | 59 | filter(pixpos); |
| mjr | 52:8298b2a73eb2 | 60 | |
| mjr | 48:058ace2aed1d | 61 | // Normalize to the 16-bit range. Our reading from the |
| mjr | 48:058ace2aed1d | 62 | // sensor is a pixel position, 0..n-1. To rescale to the |
| mjr | 48:058ace2aed1d | 63 | // normalized range, figure pixpos*65535/(n-1). |
| mjr | 48:058ace2aed1d | 64 | r.pos = uint16_t(((pixpos << 16) - pixpos) / (n-1)); |
| mjr | 48:058ace2aed1d | 65 | r.t = tpix; |
| mjr | 44:b5ac89b9cd5d | 66 | |
| mjr | 47:df7a88cd249c | 67 | // success |
| mjr | 47:df7a88cd249c | 68 | return true; |
| mjr | 47:df7a88cd249c | 69 | } |
| mjr | 47:df7a88cd249c | 70 | else |
| mjr | 47:df7a88cd249c | 71 | { |
| mjr | 47:df7a88cd249c | 72 | // no position found |
| mjr | 47:df7a88cd249c | 73 | return false; |
| mjr | 47:df7a88cd249c | 74 | } |
| mjr | 47:df7a88cd249c | 75 | } |
| mjr | 17:ab3cec0c8bf4 | 76 | |
| mjr | 53:9b2611964afc | 77 | // Process an image - scan for the shadow edge to determine the plunger |
| mjr | 53:9b2611964afc | 78 | // position. |
| mjr | 53:9b2611964afc | 79 | // |
| mjr | 47:df7a88cd249c | 80 | // If we detect the plunger position, we set 'pos' to the pixel location |
| mjr | 48:058ace2aed1d | 81 | // of the edge and return true; otherwise we return false. The 'pos' |
| mjr | 48:058ace2aed1d | 82 | // value returned, if any, is adjusted for sensor orientation so that |
| mjr | 53:9b2611964afc | 83 | // it reflects the logical plunger position (i.e., distance retracted, |
| mjr | 53:9b2611964afc | 84 | // where 0 is always the fully forward position and 'n' is fully |
| mjr | 53:9b2611964afc | 85 | // retracted). |
| mjr | 53:9b2611964afc | 86 | bool process(uint8_t *pix, int n, int &pos) |
| mjr | 47:df7a88cd249c | 87 | { |
| mjr | 48:058ace2aed1d | 88 | // Get the levels at each end |
| mjr | 48:058ace2aed1d | 89 | int a = (int(pix[0]) + pix[1] + pix[2] + pix[3] + pix[4])/5; |
| mjr | 48:058ace2aed1d | 90 | int b = (int(pix[n-1]) + pix[n-2] + pix[n-3] + pix[n-4] + pix[n-5])/5; |
| mjr | 47:df7a88cd249c | 91 | |
| mjr | 53:9b2611964afc | 92 | // Figure the sensor orientation based on the relative brightness |
| mjr | 53:9b2611964afc | 93 | // levels at the opposite ends of the image. We're going to scan |
| mjr | 53:9b2611964afc | 94 | // across the image from each side - 'bi' is the starting index |
| mjr | 53:9b2611964afc | 95 | // scanning from the bright side, 'di' is the starting index on |
| mjr | 53:9b2611964afc | 96 | // the dark side. 'binc' and 'dinc' are the pixel increments |
| mjr | 53:9b2611964afc | 97 | // for the respective indices. |
| mjr | 53:9b2611964afc | 98 | int bi, di; |
| mjr | 53:9b2611964afc | 99 | int binc, dinc; |
| mjr | 48:058ace2aed1d | 100 | if (a > b+10) |
| mjr | 48:058ace2aed1d | 101 | { |
| mjr | 48:058ace2aed1d | 102 | // left end is brighter - standard orientation |
| mjr | 48:058ace2aed1d | 103 | dir = 1; |
| mjr | 53:9b2611964afc | 104 | bi = 4, di = n - 5; |
| mjr | 53:9b2611964afc | 105 | binc = 1, dinc = -1; |
| mjr | 48:058ace2aed1d | 106 | } |
| mjr | 48:058ace2aed1d | 107 | else if (b > a+10) |
| mjr | 48:058ace2aed1d | 108 | { |
| mjr | 48:058ace2aed1d | 109 | // right end is brighter - reverse orientation |
| mjr | 48:058ace2aed1d | 110 | dir = -1; |
| mjr | 53:9b2611964afc | 111 | bi = n - 5, di = 4; |
| mjr | 53:9b2611964afc | 112 | binc = -1, dinc = 1; |
| mjr | 48:058ace2aed1d | 113 | } |
| mjr | 48:058ace2aed1d | 114 | else if (dir != 0) |
| mjr | 17:ab3cec0c8bf4 | 115 | { |
| mjr | 48:058ace2aed1d | 116 | // We don't have enough contrast to detect the orientation |
| mjr | 48:058ace2aed1d | 117 | // from this image, so either the image is too overexposed |
| mjr | 48:058ace2aed1d | 118 | // or underexposed to be useful, or the entire sensor is in |
| mjr | 48:058ace2aed1d | 119 | // light or darkness. We'll assume the latter: the plunger |
| mjr | 48:058ace2aed1d | 120 | // is blocking the whole window or isn't in the frame at |
| mjr | 48:058ace2aed1d | 121 | // all. We'll also assume that the exposure level is |
| mjr | 48:058ace2aed1d | 122 | // similar to that in recent frames where we *did* detect |
| mjr | 48:058ace2aed1d | 123 | // the direction. This means that if the new exposure level |
| mjr | 48:058ace2aed1d | 124 | // (which is about the same over the whole array) is less |
| mjr | 48:058ace2aed1d | 125 | // than the recent midpoint, we must be entirely blocked |
| mjr | 48:058ace2aed1d | 126 | // by the plunger, so it's all the way forward; if the |
| mjr | 48:058ace2aed1d | 127 | // brightness is above the recent midpoint, we must be |
| mjr | 48:058ace2aed1d | 128 | // entirely exposed, so the plunger is all the way back. |
| mjr | 48:058ace2aed1d | 129 | |
| mjr | 48:058ace2aed1d | 130 | // figure the average of the recent midpoint brightnesses |
| mjr | 48:058ace2aed1d | 131 | int sum = 0; |
| mjr | 48:058ace2aed1d | 132 | for (int i = 0 ; i < countof(midpt) ; sum += midpt[i++]) ; |
| mjr | 48:058ace2aed1d | 133 | sum /= 10; |
| mjr | 48:058ace2aed1d | 134 | |
| mjr | 48:058ace2aed1d | 135 | // Figure the average of our two ends. We have very |
| mjr | 48:058ace2aed1d | 136 | // little contrast overall, so we already know that the |
| mjr | 48:058ace2aed1d | 137 | // two ends are about the same, but we can't expect the |
| mjr | 48:058ace2aed1d | 138 | // lighting to be perfectly uniform. Averaging the ends |
| mjr | 48:058ace2aed1d | 139 | // will smooth out variations due to light source placement, |
| mjr | 48:058ace2aed1d | 140 | // sensor noise, etc. |
| mjr | 48:058ace2aed1d | 141 | a = (a+b)/2; |
| mjr | 48:058ace2aed1d | 142 | |
| mjr | 48:058ace2aed1d | 143 | // Check if we seem to be fully exposed or fully covered |
| mjr | 48:058ace2aed1d | 144 | pos = a < sum ? 0 : n; |
| mjr | 48:058ace2aed1d | 145 | return true; |
| mjr | 48:058ace2aed1d | 146 | } |
| mjr | 48:058ace2aed1d | 147 | else |
| mjr | 48:058ace2aed1d | 148 | { |
| mjr | 48:058ace2aed1d | 149 | // We can't detect the orientation from this image, and |
| mjr | 48:058ace2aed1d | 150 | // we don't know it from previous images, so we have nothing |
| mjr | 48:058ace2aed1d | 151 | // to go on. Give up and return failure. |
| mjr | 48:058ace2aed1d | 152 | return false; |
| mjr | 48:058ace2aed1d | 153 | } |
| mjr | 48:058ace2aed1d | 154 | |
| mjr | 53:9b2611964afc | 155 | // Figure the crossover brightness levels for detecting the edge. |
| mjr | 53:9b2611964afc | 156 | // The midpoint is the brightness level halfway between the bright |
| mjr | 53:9b2611964afc | 157 | // and dark regions we detected at the opposite ends of the sensor. |
| mjr | 53:9b2611964afc | 158 | // To find the edge, we'll look for a brightness level slightly |
| mjr | 53:9b2611964afc | 159 | // *past* the midpoint, to help reject noise - the bright region |
| mjr | 53:9b2611964afc | 160 | // pixels should all cluster close to the higher level, and the |
| mjr | 53:9b2611964afc | 161 | // shadow region should all cluster close to the lower level. |
| mjr | 53:9b2611964afc | 162 | // We'll define "close" as within 1/3 of the gap between the |
| mjr | 53:9b2611964afc | 163 | // extremes. |
| mjr | 48:058ace2aed1d | 164 | int mid = (a+b)/2; |
| mjr | 53:9b2611964afc | 165 | int delta6 = abs(a-b)/6; |
| mjr | 53:9b2611964afc | 166 | int crossoverHi = mid + delta6; |
| mjr | 53:9b2611964afc | 167 | int crossoverLo = mid - delta6; |
| mjr | 48:058ace2aed1d | 168 | |
| mjr | 53:9b2611964afc | 169 | #if 1 // $$$ |
| mjr | 53:9b2611964afc | 170 | // Scan inward from the each end, looking for edges. Each time we |
| mjr | 53:9b2611964afc | 171 | // find an edge from one direction, we'll see if the scan from the |
| mjr | 53:9b2611964afc | 172 | // other direction agrees. If it does, we have a winner. If they |
| mjr | 53:9b2611964afc | 173 | // don't agree, we must have found some noise in one direction or the |
| mjr | 53:9b2611964afc | 174 | // other, so switch sides and continue the scan. On each continued |
| mjr | 53:9b2611964afc | 175 | // scan, if the stopping point from the last scan *was* noise, we'll |
| mjr | 53:9b2611964afc | 176 | // start seeing the expected non-edge pixels again as we move on, |
| mjr | 53:9b2611964afc | 177 | // so we'll effectively factor out the noise. If what stopped us |
| mjr | 53:9b2611964afc | 178 | // *wasn't* noise but was a legitimate edge, we'll see that we're |
| mjr | 53:9b2611964afc | 179 | // still in the region that stopped us in the first place and just |
| mjr | 53:9b2611964afc | 180 | // stop again immediately. |
| mjr | 53:9b2611964afc | 181 | // |
| mjr | 53:9b2611964afc | 182 | // The two sides have to converge, because they march relentlessly |
| mjr | 53:9b2611964afc | 183 | // towards each other until they cross. Even if we have a totally |
| mjr | 53:9b2611964afc | 184 | // random bunch of pixels, the two indices will eventually meet and |
| mjr | 53:9b2611964afc | 185 | // we'll declare that to be the edge position. The processing time |
| mjr | 53:9b2611964afc | 186 | // is linear in the pixel count - it's equivalent to one pass over |
| mjr | 53:9b2611964afc | 187 | // the pixels. The measured time for 1280 pixels is about 1.3ms, |
| mjr | 53:9b2611964afc | 188 | // which is about half the DMA transfer time. Our goal is always |
| mjr | 53:9b2611964afc | 189 | // to complete the processing in less than the DMA transfer time, |
| mjr | 53:9b2611964afc | 190 | // since that's as fast as we can possibly go with the physical |
| mjr | 53:9b2611964afc | 191 | // sensor. Since our processing time is overlapped with the DMA |
| mjr | 53:9b2611964afc | 192 | // transfer, the overall frame rate is limited by the *longer* of |
| mjr | 53:9b2611964afc | 193 | // the two times, not the sum of the two times. So as long as the |
| mjr | 53:9b2611964afc | 194 | // processing takes less time than the DMA transfer, we're not |
| mjr | 53:9b2611964afc | 195 | // contributing at all to the overall frame rate limit - it's like |
| mjr | 53:9b2611964afc | 196 | // we're not even here. |
| mjr | 53:9b2611964afc | 197 | for (;;) |
| mjr | 53:9b2611964afc | 198 | { |
| mjr | 53:9b2611964afc | 199 | // scan from the bright side |
| mjr | 53:9b2611964afc | 200 | for (bi += binc ; bi >= 5 && bi <= n-6 ; bi += binc) |
| mjr | 53:9b2611964afc | 201 | { |
| mjr | 53:9b2611964afc | 202 | // if we found a dark pixel, consider it to be an edge |
| mjr | 53:9b2611964afc | 203 | if (pix[bi] < crossoverLo) |
| mjr | 53:9b2611964afc | 204 | break; |
| mjr | 53:9b2611964afc | 205 | } |
| mjr | 53:9b2611964afc | 206 | |
| mjr | 53:9b2611964afc | 207 | // if we reached an extreme, return failure |
| mjr | 53:9b2611964afc | 208 | if (bi < 5 || bi > n-6) |
| mjr | 53:9b2611964afc | 209 | return false; |
| mjr | 53:9b2611964afc | 210 | |
| mjr | 53:9b2611964afc | 211 | // if the two directions crossed, we have a winner |
| mjr | 53:9b2611964afc | 212 | if (binc > 0 ? bi >= di : bi <= di) |
| mjr | 53:9b2611964afc | 213 | { |
| mjr | 53:9b2611964afc | 214 | pos = (dir == 1 ? bi : n - bi); |
| mjr | 53:9b2611964afc | 215 | return true; |
| mjr | 53:9b2611964afc | 216 | } |
| mjr | 53:9b2611964afc | 217 | |
| mjr | 53:9b2611964afc | 218 | // they haven't converged yet, so scan from the dark side |
| mjr | 53:9b2611964afc | 219 | for (di += dinc ; di >= 5 && di <= n-6 ; di += dinc) |
| mjr | 53:9b2611964afc | 220 | { |
| mjr | 53:9b2611964afc | 221 | // if we found a bright pixel, consider it to be an edge |
| mjr | 53:9b2611964afc | 222 | if (pix[di] > crossoverHi) |
| mjr | 53:9b2611964afc | 223 | break; |
| mjr | 53:9b2611964afc | 224 | } |
| mjr | 53:9b2611964afc | 225 | |
| mjr | 53:9b2611964afc | 226 | // if we reached an extreme, return failure |
| mjr | 53:9b2611964afc | 227 | if (di < 5 || di > n-6) |
| mjr | 53:9b2611964afc | 228 | return false; |
| mjr | 53:9b2611964afc | 229 | |
| mjr | 53:9b2611964afc | 230 | // if they crossed now, we have a winner |
| mjr | 53:9b2611964afc | 231 | if (binc > 0 ? bi >= di : bi <= di) |
| mjr | 53:9b2611964afc | 232 | { |
| mjr | 53:9b2611964afc | 233 | pos = (dir == 1 ? di : n - di); |
| mjr | 53:9b2611964afc | 234 | return true; |
| mjr | 53:9b2611964afc | 235 | } |
| mjr | 53:9b2611964afc | 236 | } |
| mjr | 53:9b2611964afc | 237 | |
| mjr | 53:9b2611964afc | 238 | #else // $$$ |
| mjr | 53:9b2611964afc | 239 | // Old method - single-sided scan with a little local noise suppression. |
| mjr | 53:9b2611964afc | 240 | // Scan from the bright side looking, for a pixel that drops below the |
| mjr | 53:9b2611964afc | 241 | // midpoint brightess. To reduce false positives from noise, check to |
| mjr | 53:9b2611964afc | 242 | // see if the majority of the next few pixels stay in shadow - if not, |
| mjr | 53:9b2611964afc | 243 | // consider the dark pixel to be some kind of transient noise, and |
| mjr | 53:9b2611964afc | 244 | // continue looking for a more solid edge. |
| mjr | 53:9b2611964afc | 245 | for (int i = 5 ; i < n-5 ; ++i, bi += dir) |
| mjr | 48:058ace2aed1d | 246 | { |
| mjr | 48:058ace2aed1d | 247 | // check to see if we found a dark pixel |
| mjr | 53:9b2611964afc | 248 | if (pix[bi] < mid) |
| mjr | 48:058ace2aed1d | 249 | { |
| mjr | 48:058ace2aed1d | 250 | // make sure we have a sustained edge |
| mjr | 48:058ace2aed1d | 251 | int ok = 0; |
| mjr | 53:9b2611964afc | 252 | int bi2 = bi + dir; |
| mjr | 53:9b2611964afc | 253 | for (int j = 0 ; j < 5 ; ++j, bi2 += dir) |
| mjr | 48:058ace2aed1d | 254 | { |
| mjr | 48:058ace2aed1d | 255 | // count this pixel if it's darker than the midpoint |
| mjr | 53:9b2611964afc | 256 | if (pix[bi2] < mid) |
| mjr | 48:058ace2aed1d | 257 | ++ok; |
| mjr | 48:058ace2aed1d | 258 | } |
| mjr | 48:058ace2aed1d | 259 | |
| mjr | 48:058ace2aed1d | 260 | // if we're clearly in the dark section, we have our edge |
| mjr | 48:058ace2aed1d | 261 | if (ok > 3) |
| mjr | 48:058ace2aed1d | 262 | { |
| mjr | 48:058ace2aed1d | 263 | // Success. Since we found an edge in this scan, save the |
| mjr | 48:058ace2aed1d | 264 | // midpoint brightness level in our history list, to help |
| mjr | 48:058ace2aed1d | 265 | // with any future frames with insufficient contrast. |
| mjr | 48:058ace2aed1d | 266 | midpt[midptIdx++] = mid; |
| mjr | 48:058ace2aed1d | 267 | midptIdx %= countof(midpt); |
| mjr | 48:058ace2aed1d | 268 | |
| mjr | 48:058ace2aed1d | 269 | // return the detected position |
| mjr | 48:058ace2aed1d | 270 | pos = i; |
| mjr | 48:058ace2aed1d | 271 | return true; |
| mjr | 48:058ace2aed1d | 272 | } |
| mjr | 48:058ace2aed1d | 273 | } |
| mjr | 17:ab3cec0c8bf4 | 274 | } |
| mjr | 17:ab3cec0c8bf4 | 275 | |
| mjr | 48:058ace2aed1d | 276 | // no edge found |
| mjr | 48:058ace2aed1d | 277 | return false; |
| mjr | 53:9b2611964afc | 278 | #endif |
| mjr | 48:058ace2aed1d | 279 | } |
| mjr | 52:8298b2a73eb2 | 280 | |
| mjr | 52:8298b2a73eb2 | 281 | // Filter a result through the jitter reducer. We tend to have some |
| mjr | 52:8298b2a73eb2 | 282 | // very slight jitter - by a pixel or two - even when the plunger is |
| mjr | 52:8298b2a73eb2 | 283 | // stationary. This happens due to analog noise. In the theoretical |
| mjr | 52:8298b2a73eb2 | 284 | // ideal, analog noise wouldn't be a factor for this sensor design, |
| mjr | 52:8298b2a73eb2 | 285 | // in that we'd have enough contrast between the bright and dark |
| mjr | 52:8298b2a73eb2 | 286 | // regions that there'd be no ambiguity as to where the shadow edge |
| mjr | 52:8298b2a73eb2 | 287 | // falls. But in the real system, the shadow edge isn't perfectly |
| mjr | 52:8298b2a73eb2 | 288 | // sharp on the scale of our pixels, so the edge isn't an ideal |
| mjr | 52:8298b2a73eb2 | 289 | // digital 0-1 discontinuity but rather a ramp of gray levels over |
| mjr | 52:8298b2a73eb2 | 290 | // a few pixels. Our edge detector picks the pixel where we cross |
| mjr | 52:8298b2a73eb2 | 291 | // the midpoint brightness threshold. The exact midpoint can vary |
| mjr | 52:8298b2a73eb2 | 292 | // a little from frame to frame due to exposure length variations, |
| mjr | 52:8298b2a73eb2 | 293 | // light source variations, other stray light sources in the cabinet, |
| mjr | 52:8298b2a73eb2 | 294 | // ADC error, sensor pixel noise, and electrical noise. As the |
| mjr | 52:8298b2a73eb2 | 295 | // midpoint varies, the pixel that qualifies as the edge position |
| mjr | 52:8298b2a73eb2 | 296 | // can move by a pixel or two from one from to the next, even |
| mjr | 52:8298b2a73eb2 | 297 | // though the physical shadow isn't moving. This all adds up to |
| mjr | 52:8298b2a73eb2 | 298 | // some slight jitter in the final position reading. |
| mjr | 52:8298b2a73eb2 | 299 | // |
| mjr | 52:8298b2a73eb2 | 300 | // To reduce the jitter, we keep a short history of recent readings. |
| mjr | 52:8298b2a73eb2 | 301 | // When we see a new reading that's close to the whole string of |
| mjr | 52:8298b2a73eb2 | 302 | // recent readings, we peg the new reading to the consensus of the |
| mjr | 52:8298b2a73eb2 | 303 | // recent history. This smooths out these small variations without |
| mjr | 52:8298b2a73eb2 | 304 | // affecting response time or resolution. |
| mjr | 52:8298b2a73eb2 | 305 | void filter(int &pos) |
| mjr | 52:8298b2a73eb2 | 306 | { |
| mjr | 52:8298b2a73eb2 | 307 | // check to see if it's close to all of the history elements |
| mjr | 53:9b2611964afc | 308 | const int dpos = 2; |
| mjr | 52:8298b2a73eb2 | 309 | long sum = 0; |
| mjr | 52:8298b2a73eb2 | 310 | for (int i = 0 ; i < countof(hist) ; ++i) |
| mjr | 47:df7a88cd249c | 311 | { |
| mjr | 52:8298b2a73eb2 | 312 | int ipos = hist[i]; |
| mjr | 52:8298b2a73eb2 | 313 | sum += ipos; |
| mjr | 52:8298b2a73eb2 | 314 | if (pos > ipos + dpos || pos < ipos - dpos) |
| mjr | 48:058ace2aed1d | 315 | { |
| mjr | 53:9b2611964afc | 316 | // not close enough - add the new position to the |
| mjr | 53:9b2611964afc | 317 | // history and use it as-is |
| mjr | 53:9b2611964afc | 318 | hist[histIdx++] = pos; |
| mjr | 53:9b2611964afc | 319 | histIdx %= countof(hist); |
| mjr | 53:9b2611964afc | 320 | return; |
| mjr | 17:ab3cec0c8bf4 | 321 | } |
| mjr | 17:ab3cec0c8bf4 | 322 | } |
| mjr | 17:ab3cec0c8bf4 | 323 | |
| mjr | 53:9b2611964afc | 324 | // We're close to all recent readings, so use the average |
| mjr | 53:9b2611964afc | 325 | // of the recent readings. Don't add the new reading to the |
| mjr | 53:9b2611964afc | 326 | // the history in this case. If the edge is about halfway |
| mjr | 53:9b2611964afc | 327 | // between two pixels, the history will be about 50/50 on |
| mjr | 53:9b2611964afc | 328 | // an ongoing basis, so if just kept adding samples we'd |
| mjr | 53:9b2611964afc | 329 | // still jitter (just at a slightly reduced rate). By |
| mjr | 53:9b2611964afc | 330 | // stalling the history when it looks like we're stationary, |
| mjr | 53:9b2611964afc | 331 | // we'll just pick one of the pixels and stay there as long |
| mjr | 53:9b2611964afc | 332 | // as the plunger stays where it is. |
| mjr | 53:9b2611964afc | 333 | pos = sum/countof(hist); |
| mjr | 44:b5ac89b9cd5d | 334 | } |
| mjr | 44:b5ac89b9cd5d | 335 | |
| mjr | 52:8298b2a73eb2 | 336 | // Send a status report to the joystick interface. |
| mjr | 53:9b2611964afc | 337 | // See plunger.h for details on the arguments. |
| mjr | 53:9b2611964afc | 338 | virtual void sendStatusReport(USBJoystick &js, uint8_t flags, uint8_t extraTime) |
| mjr | 17:ab3cec0c8bf4 | 339 | { |
| mjr | 53:9b2611964afc | 340 | // To get the requested timing for the cycle we report, we need to run |
| mjr | 53:9b2611964afc | 341 | // an extra cycle. Right now, the sensor is integrating from whenever |
| mjr | 53:9b2611964afc | 342 | // the last start() call was made. |
| mjr | 53:9b2611964afc | 343 | // |
| mjr | 53:9b2611964afc | 344 | // 1. Call startCapture() to end that previous cycle. This will collect |
| mjr | 53:9b2611964afc | 345 | // dits pixels into one DMA buffer (call it EVEN), and start a new |
| mjr | 53:9b2611964afc | 346 | // integration cycle. |
| mjr | 53:9b2611964afc | 347 | // |
| mjr | 53:9b2611964afc | 348 | // 2. We know a new integration has just started, so we can control its |
| mjr | 53:9b2611964afc | 349 | // time. Wait for the cycle we just started to finish, since that sets |
| mjr | 53:9b2611964afc | 350 | // the minimum time. |
| mjr | 53:9b2611964afc | 351 | // |
| mjr | 53:9b2611964afc | 352 | // 3. The integration cycle we started in step 1 has now been running the |
| mjr | 53:9b2611964afc | 353 | // minimum time - namely, one read cycle. Pause for our extraTime delay |
| mjr | 53:9b2611964afc | 354 | // to add the requested added time. |
| mjr | 53:9b2611964afc | 355 | // |
| mjr | 53:9b2611964afc | 356 | // 4. Start the next cycle. This will make the pixels we started reading |
| mjr | 53:9b2611964afc | 357 | // in step 1 available via getPix(), and will end the integration cycle |
| mjr | 53:9b2611964afc | 358 | // we started in step 1 and start reading its pixels into the internal |
| mjr | 53:9b2611964afc | 359 | // DMA buffer. |
| mjr | 53:9b2611964afc | 360 | // |
| mjr | 53:9b2611964afc | 361 | // 5. This is where it gets tricky! The pixels we want are the ones that |
| mjr | 53:9b2611964afc | 362 | // started integrating in step 1, which are the ones we're reading via DMA |
| mjr | 53:9b2611964afc | 363 | // now. The pixels available via getPix() are the ones from the cycle we |
| mjr | 53:9b2611964afc | 364 | // *ended* in step 1 - we don't want these. So we need to start a *third* |
| mjr | 53:9b2611964afc | 365 | // cycle in order to get the pixels from the second cycle. |
| mjr | 47:df7a88cd249c | 366 | |
| mjr | 53:9b2611964afc | 367 | ccd.startCapture(); // read pixels from period A, begin integration period B |
| mjr | 53:9b2611964afc | 368 | ccd.wait(); // wait for scan of A to complete, as minimum integration B time |
| mjr | 53:9b2611964afc | 369 | wait_us(long(extraTime) * 100); // add extraTime (0.1ms == 100us increments) to integration B time |
| mjr | 53:9b2611964afc | 370 | ccd.startCapture(); // read pixels from integration period B, begin period C; period A pixels now available |
| mjr | 53:9b2611964afc | 371 | ccd.startCapture(); // read pixels from integration period C, begin period D; period B pixels now available |
| mjr | 53:9b2611964afc | 372 | |
| mjr | 53:9b2611964afc | 373 | // get the pixel array |
| mjr | 47:df7a88cd249c | 374 | uint8_t *pix; |
| mjr | 47:df7a88cd249c | 375 | int n; |
| mjr | 48:058ace2aed1d | 376 | uint32_t t; |
| mjr | 48:058ace2aed1d | 377 | ccd.getPix(pix, n, t); |
| mjr | 52:8298b2a73eb2 | 378 | |
| mjr | 52:8298b2a73eb2 | 379 | // start a timer to measure the processing time |
| mjr | 52:8298b2a73eb2 | 380 | Timer pt; |
| mjr | 52:8298b2a73eb2 | 381 | pt.start(); |
| mjr | 52:8298b2a73eb2 | 382 | |
| mjr | 52:8298b2a73eb2 | 383 | // process the pixels and read the position |
| mjr | 52:8298b2a73eb2 | 384 | int pos; |
| mjr | 53:9b2611964afc | 385 | if (process(pix, n, pos)) |
| mjr | 52:8298b2a73eb2 | 386 | filter(pos); |
| mjr | 52:8298b2a73eb2 | 387 | else |
| mjr | 52:8298b2a73eb2 | 388 | pos = 0xFFFF; |
| mjr | 47:df7a88cd249c | 389 | |
| mjr | 52:8298b2a73eb2 | 390 | // note the processing time |
| mjr | 52:8298b2a73eb2 | 391 | uint32_t processTime = pt.read_us(); |
| mjr | 47:df7a88cd249c | 392 | |
| mjr | 47:df7a88cd249c | 393 | // if a low-res scan is desired, reduce to a subset of pixels |
| mjr | 48:058ace2aed1d | 394 | if (flags & 0x01) |
| mjr | 47:df7a88cd249c | 395 | { |
| mjr | 48:058ace2aed1d | 396 | // figure how many sensor pixels we combine into each low-res pixel |
| mjr | 48:058ace2aed1d | 397 | const int group = 8; |
| mjr | 48:058ace2aed1d | 398 | int lowResPix = n / group; |
| mjr | 48:058ace2aed1d | 399 | |
| mjr | 48:058ace2aed1d | 400 | // combine the pixels |
| mjr | 47:df7a88cd249c | 401 | int src, dst; |
| mjr | 48:058ace2aed1d | 402 | for (src = dst = 0 ; dst < lowResPix ; ++dst) |
| mjr | 48:058ace2aed1d | 403 | { |
| mjr | 52:8298b2a73eb2 | 404 | // average this block of pixels |
| mjr | 48:058ace2aed1d | 405 | int a = 0; |
| mjr | 52:8298b2a73eb2 | 406 | for (int j = 0 ; j < group ; ++j) |
| mjr | 52:8298b2a73eb2 | 407 | a += pix[src++]; |
| mjr | 48:058ace2aed1d | 408 | |
| mjr | 52:8298b2a73eb2 | 409 | // we have the sum, so get the average |
| mjr | 52:8298b2a73eb2 | 410 | a /= group; |
| mjr | 52:8298b2a73eb2 | 411 | |
| mjr | 48:058ace2aed1d | 412 | // store the down-res'd pixel in the array |
| mjr | 48:058ace2aed1d | 413 | pix[dst] = uint8_t(a); |
| mjr | 48:058ace2aed1d | 414 | } |
| mjr | 48:058ace2aed1d | 415 | |
| mjr | 52:8298b2a73eb2 | 416 | // rescale the position for the reduced resolution |
| mjr | 52:8298b2a73eb2 | 417 | if (pos != 0xFFFF) |
| mjr | 52:8298b2a73eb2 | 418 | pos = pos * (lowResPix-1) / (n-1); |
| mjr | 52:8298b2a73eb2 | 419 | |
| mjr | 52:8298b2a73eb2 | 420 | // update the pixel count to the reduced array size |
| mjr | 52:8298b2a73eb2 | 421 | n = lowResPix; |
| mjr | 47:df7a88cd249c | 422 | } |
| mjr | 43:7a6364d82a41 | 423 | |
| mjr | 52:8298b2a73eb2 | 424 | // send the sensor status report report |
| mjr | 52:8298b2a73eb2 | 425 | js.sendPlungerStatus(n, pos, dir, ccd.getAvgScanTime(), processTime); |
| mjr | 52:8298b2a73eb2 | 426 | |
| mjr | 52:8298b2a73eb2 | 427 | // If we're not in calibration mode, send the pixels |
| mjr | 52:8298b2a73eb2 | 428 | extern bool plungerCalMode; |
| mjr | 52:8298b2a73eb2 | 429 | if (!plungerCalMode) |
| mjr | 52:8298b2a73eb2 | 430 | { |
| mjr | 52:8298b2a73eb2 | 431 | // send the pixels in report-sized chunks until we get them all |
| mjr | 52:8298b2a73eb2 | 432 | int idx = 0; |
| mjr | 52:8298b2a73eb2 | 433 | while (idx < n) |
| mjr | 52:8298b2a73eb2 | 434 | js.sendPlungerPix(idx, n, pix); |
| mjr | 52:8298b2a73eb2 | 435 | } |
| mjr | 17:ab3cec0c8bf4 | 436 | |
| mjr | 48:058ace2aed1d | 437 | // It takes us a while to send all of the pixels, since we have |
| mjr | 48:058ace2aed1d | 438 | // to break them up into many USB reports. This delay means that |
| mjr | 48:058ace2aed1d | 439 | // the sensor has been sitting there integrating for much longer |
| mjr | 48:058ace2aed1d | 440 | // than usual, so the next frame read will be overexposed. To |
| mjr | 48:058ace2aed1d | 441 | // mitigate this, make sure we don't have a capture running, |
| mjr | 48:058ace2aed1d | 442 | // then clear the sensor and start a new capture. |
| mjr | 48:058ace2aed1d | 443 | ccd.wait(); |
| mjr | 48:058ace2aed1d | 444 | ccd.clear(); |
| mjr | 47:df7a88cd249c | 445 | ccd.startCapture(); |
| mjr | 17:ab3cec0c8bf4 | 446 | } |
| mjr | 17:ab3cec0c8bf4 | 447 | |
| mjr | 52:8298b2a73eb2 | 448 | // get the average sensor scan time |
| mjr | 52:8298b2a73eb2 | 449 | virtual uint32_t getAvgScanTime() { return ccd.getAvgScanTime(); } |
| mjr | 52:8298b2a73eb2 | 450 | |
| mjr | 35:e959ffba78fd | 451 | protected: |
| mjr | 44:b5ac89b9cd5d | 452 | // Sensor orientation. +1 means that the "tip" end - which is always |
| mjr | 44:b5ac89b9cd5d | 453 | // the brighter end in our images - is at the 0th pixel in the array. |
| mjr | 44:b5ac89b9cd5d | 454 | // -1 means that the tip is at the nth pixel in the array. 0 means |
| mjr | 48:058ace2aed1d | 455 | // that we haven't figured it out yet. We automatically infer this |
| mjr | 48:058ace2aed1d | 456 | // from the relative light levels at each end of the array when we |
| mjr | 48:058ace2aed1d | 457 | // successfully find a shadow edge. The reason we save the information |
| mjr | 48:058ace2aed1d | 458 | // is that we might occasionally get frames that are fully in shadow |
| mjr | 48:058ace2aed1d | 459 | // or fully in light, and we can't infer the direction from such |
| mjr | 48:058ace2aed1d | 460 | // frames. Saving the information from past frames gives us a fallback |
| mjr | 48:058ace2aed1d | 461 | // when we can't infer it from the current frame. Note that we update |
| mjr | 48:058ace2aed1d | 462 | // this each time we can infer the direction, so the device will adapt |
| mjr | 48:058ace2aed1d | 463 | // on the fly even if the user repositions the sensor while the software |
| mjr | 48:058ace2aed1d | 464 | // is running. |
| mjr | 44:b5ac89b9cd5d | 465 | int dir; |
| mjr | 51:57eb311faafa | 466 | |
| mjr | 51:57eb311faafa | 467 | // History of recent position readings. We keep a short history of |
| mjr | 51:57eb311faafa | 468 | // readings so that we can apply some filtering to the data. |
| mjr | 52:8298b2a73eb2 | 469 | uint16_t hist[8]; |
| mjr | 51:57eb311faafa | 470 | int histIdx; |
| mjr | 48:058ace2aed1d | 471 | |
| mjr | 48:058ace2aed1d | 472 | // History of midpoint brightness levels for the last few successful |
| mjr | 48:058ace2aed1d | 473 | // scans. This is a circular buffer that we write on each scan where |
| mjr | 48:058ace2aed1d | 474 | // we successfully detect a shadow edge. (It's circular, so we |
| mjr | 48:058ace2aed1d | 475 | // effectively discard the oldest element whenever we write a new one.) |
| mjr | 48:058ace2aed1d | 476 | // |
| mjr | 48:058ace2aed1d | 477 | // The history is useful in cases where we have too little contrast |
| mjr | 48:058ace2aed1d | 478 | // to detect an edge. In these cases, we assume that the entire sensor |
| mjr | 48:058ace2aed1d | 479 | // is either in shadow or light, which can happen if the plunger is at |
| mjr | 48:058ace2aed1d | 480 | // one extreme or the other such that the edge of its shadow is out of |
| mjr | 48:058ace2aed1d | 481 | // the frame. (Ideally, the sensor should be positioned so that the |
| mjr | 48:058ace2aed1d | 482 | // shadow edge is always in the frame, but it's not always possible |
| mjr | 48:058ace2aed1d | 483 | // to do this given the constrained space within a cabinet.) The |
| mjr | 48:058ace2aed1d | 484 | // history helps us decide which case we have - all shadow or all |
| mjr | 48:058ace2aed1d | 485 | // light - by letting us compare our average pixel level in this |
| mjr | 48:058ace2aed1d | 486 | // frame to the range in recent frames. This assumes that the |
| mjr | 48:058ace2aed1d | 487 | // exposure varies minimally from frame to frame, which is usually |
| mjr | 48:058ace2aed1d | 488 | // true because the physical installation (the light source and |
| mjr | 48:058ace2aed1d | 489 | // sensor positions) are usually static. |
| mjr | 48:058ace2aed1d | 490 | // |
| mjr | 48:058ace2aed1d | 491 | // We always try first to infer the bright and dark levels from the |
| mjr | 48:058ace2aed1d | 492 | // image, since this lets us adapt automatically to different exposure |
| mjr | 48:058ace2aed1d | 493 | // levels. The exposure level can vary by integration time and the |
| mjr | 48:058ace2aed1d | 494 | // intensity and positioning of the light source, and we want |
| mjr | 48:058ace2aed1d | 495 | // to be as flexible as we can about both. |
| mjr | 48:058ace2aed1d | 496 | uint8_t midpt[10]; |
| mjr | 48:058ace2aed1d | 497 | uint8_t midptIdx; |
| mjr | 47:df7a88cd249c | 498 | |
| mjr | 44:b5ac89b9cd5d | 499 | public: |
| mjr | 17:ab3cec0c8bf4 | 500 | // the low-level interface to the CCD hardware |
| mjr | 35:e959ffba78fd | 501 | TSL1410R ccd; |
| mjr | 17:ab3cec0c8bf4 | 502 | }; |
| mjr | 35:e959ffba78fd | 503 | |
| mjr | 35:e959ffba78fd | 504 | |
| mjr | 35:e959ffba78fd | 505 | // TSL1410R sensor |
| mjr | 35:e959ffba78fd | 506 | class PlungerSensorTSL1410R: public PlungerSensorCCD |
| mjr | 35:e959ffba78fd | 507 | { |
| mjr | 35:e959ffba78fd | 508 | public: |
| mjr | 35:e959ffba78fd | 509 | PlungerSensorTSL1410R(PinName si, PinName clock, PinName ao1, PinName ao2) |
| mjr | 47:df7a88cd249c | 510 | : PlungerSensorCCD(1280, si, clock, ao1, ao2) |
| mjr | 35:e959ffba78fd | 511 | { |
| mjr | 35:e959ffba78fd | 512 | } |
| mjr | 35:e959ffba78fd | 513 | }; |
| mjr | 35:e959ffba78fd | 514 | |
| mjr | 35:e959ffba78fd | 515 | // TSL1412R |
| mjr | 35:e959ffba78fd | 516 | class PlungerSensorTSL1412R: public PlungerSensorCCD |
| mjr | 35:e959ffba78fd | 517 | { |
| mjr | 35:e959ffba78fd | 518 | public: |
| mjr | 35:e959ffba78fd | 519 | PlungerSensorTSL1412R(PinName si, PinName clock, PinName ao1, PinName ao2) |
| mjr | 47:df7a88cd249c | 520 | : PlungerSensorCCD(1536, si, clock, ao1, ao2) |
| mjr | 35:e959ffba78fd | 521 | { |
| mjr | 35:e959ffba78fd | 522 | } |
| mjr | 35:e959ffba78fd | 523 | }; |
