Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
magnetic.cpp@0:f68c99fc1bc2, 2016-06-18 (annotated)
- Committer:
- Cotzo
- Date:
- Sat Jun 18 21:16:12 2016 +0000
- Revision:
- 0:f68c99fc1bc2
Initial commit
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
Cotzo | 0:f68c99fc1bc2 | 1 | // Source: eCompass v3 |
Cotzo | 0:f68c99fc1bc2 | 2 | |
Cotzo | 0:f68c99fc1bc2 | 3 | #include "magnetic.h" |
Cotzo | 0:f68c99fc1bc2 | 4 | #include "mbed.h" |
Cotzo | 0:f68c99fc1bc2 | 5 | |
Cotzo | 0:f68c99fc1bc2 | 6 | |
Cotzo | 0:f68c99fc1bc2 | 7 | float xftmpA4x4[4][4], *ftmpA4x4[4]; // scratch 4x4 matrix |
Cotzo | 0:f68c99fc1bc2 | 8 | float xftmpB4x4[4][4], *ftmpB4x4[4]; // scratch 4x4 matrix |
Cotzo | 0:f68c99fc1bc2 | 9 | float xftmpA4x1[4][1], *ftmpA4x1[4]; // scratch 4x1 matrix |
Cotzo | 0:f68c99fc1bc2 | 10 | float xftmpB4x1[4][1], *ftmpB4x1[4]; // scratch 4x1 matrix |
Cotzo | 0:f68c99fc1bc2 | 11 | |
Cotzo | 0:f68c99fc1bc2 | 12 | int32 xicolind[MAXMATINV][1], *icolind[MAXMATINV]; |
Cotzo | 0:f68c99fc1bc2 | 13 | int32 xirowind[MAXMATINV][1], *irowind[MAXMATINV]; |
Cotzo | 0:f68c99fc1bc2 | 14 | int32 xipivot[MAXMATINV][1], *ipivot[MAXMATINV]; |
Cotzo | 0:f68c99fc1bc2 | 15 | |
Cotzo | 0:f68c99fc1bc2 | 16 | |
Cotzo | 0:f68c99fc1bc2 | 17 | void magUpdateCalibration(struct MagCalibration *pthisMagCal, |
Cotzo | 0:f68c99fc1bc2 | 18 | struct MagneticBuffer *pthisMagneticBuffer) { |
Cotzo | 0:f68c99fc1bc2 | 19 | |
Cotzo | 0:f68c99fc1bc2 | 20 | int i; |
Cotzo | 0:f68c99fc1bc2 | 21 | |
Cotzo | 0:f68c99fc1bc2 | 22 | //while(1); |
Cotzo | 0:f68c99fc1bc2 | 23 | // 4 row arrays |
Cotzo | 0:f68c99fc1bc2 | 24 | for (i = 0; i < 4; i++) |
Cotzo | 0:f68c99fc1bc2 | 25 | { |
Cotzo | 0:f68c99fc1bc2 | 26 | ftmpA4x4[i] = xftmpA4x4[i]; |
Cotzo | 0:f68c99fc1bc2 | 27 | ftmpB4x4[i] = xftmpB4x4[i]; |
Cotzo | 0:f68c99fc1bc2 | 28 | ftmpA4x1[i] = xftmpA4x1[i]; |
Cotzo | 0:f68c99fc1bc2 | 29 | ftmpB4x1[i] = xftmpB4x1[i]; |
Cotzo | 0:f68c99fc1bc2 | 30 | }; |
Cotzo | 0:f68c99fc1bc2 | 31 | |
Cotzo | 0:f68c99fc1bc2 | 32 | // MAXMATINV row arrays |
Cotzo | 0:f68c99fc1bc2 | 33 | for (i = 0; i < MAXMATINV; i++) { |
Cotzo | 0:f68c99fc1bc2 | 34 | icolind[i] = xicolind[i]; |
Cotzo | 0:f68c99fc1bc2 | 35 | irowind[i] = xirowind[i]; |
Cotzo | 0:f68c99fc1bc2 | 36 | ipivot[i] = xipivot[i]; |
Cotzo | 0:f68c99fc1bc2 | 37 | }; |
Cotzo | 0:f68c99fc1bc2 | 38 | |
Cotzo | 0:f68c99fc1bc2 | 39 | |
Cotzo | 0:f68c99fc1bc2 | 40 | fUpdateCalibration4INV(pthisMagCal, pthisMagneticBuffer, ftmpA4x4, ftmpB4x4, ftmpA4x1, ftmpB4x1, icolind, irowind, ipivot); |
Cotzo | 0:f68c99fc1bc2 | 41 | |
Cotzo | 0:f68c99fc1bc2 | 42 | }; |
Cotzo | 0:f68c99fc1bc2 | 43 | |
Cotzo | 0:f68c99fc1bc2 | 44 | |
Cotzo | 0:f68c99fc1bc2 | 45 | |
Cotzo | 0:f68c99fc1bc2 | 46 | |
Cotzo | 0:f68c99fc1bc2 | 47 | // function calculates the matrix product A = B x C |
Cotzo | 0:f68c99fc1bc2 | 48 | void fmatrixAeqBxC(float **A, float **B, float **C, int32 rB, int32 cBrC, int32 cC) |
Cotzo | 0:f68c99fc1bc2 | 49 | { |
Cotzo | 0:f68c99fc1bc2 | 50 | // rB = rows in B |
Cotzo | 0:f68c99fc1bc2 | 51 | // cBrC = columns in B = rows in C |
Cotzo | 0:f68c99fc1bc2 | 52 | // cC = columns in C |
Cotzo | 0:f68c99fc1bc2 | 53 | // A has dimension rB rows x cC columns |
Cotzo | 0:f68c99fc1bc2 | 54 | |
Cotzo | 0:f68c99fc1bc2 | 55 | int32 i, j, k; // counters |
Cotzo | 0:f68c99fc1bc2 | 56 | |
Cotzo | 0:f68c99fc1bc2 | 57 | for (i = 0; i < rB; i++) |
Cotzo | 0:f68c99fc1bc2 | 58 | { |
Cotzo | 0:f68c99fc1bc2 | 59 | for (j = 0; j < cC; j++) |
Cotzo | 0:f68c99fc1bc2 | 60 | { |
Cotzo | 0:f68c99fc1bc2 | 61 | A[i][j] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 62 | for (k = 0; k < cBrC; k++) |
Cotzo | 0:f68c99fc1bc2 | 63 | A[i][j] += B[i][k] * C[k][j]; |
Cotzo | 0:f68c99fc1bc2 | 64 | } |
Cotzo | 0:f68c99fc1bc2 | 65 | } |
Cotzo | 0:f68c99fc1bc2 | 66 | return; |
Cotzo | 0:f68c99fc1bc2 | 67 | } |
Cotzo | 0:f68c99fc1bc2 | 68 | |
Cotzo | 0:f68c99fc1bc2 | 69 | |
Cotzo | 0:f68c99fc1bc2 | 70 | // function sets the matrix A to the identity matrix |
Cotzo | 0:f68c99fc1bc2 | 71 | void fmatrixAeqI(float **A, int32 rc) |
Cotzo | 0:f68c99fc1bc2 | 72 | { |
Cotzo | 0:f68c99fc1bc2 | 73 | // rc = rows and columns in A |
Cotzo | 0:f68c99fc1bc2 | 74 | |
Cotzo | 0:f68c99fc1bc2 | 75 | int32 i, j; // loop counters |
Cotzo | 0:f68c99fc1bc2 | 76 | |
Cotzo | 0:f68c99fc1bc2 | 77 | for (i = 0; i < rc; i++) |
Cotzo | 0:f68c99fc1bc2 | 78 | { |
Cotzo | 0:f68c99fc1bc2 | 79 | for (j = 0; j < rc; j++) |
Cotzo | 0:f68c99fc1bc2 | 80 | { |
Cotzo | 0:f68c99fc1bc2 | 81 | A[i][j] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 82 | } |
Cotzo | 0:f68c99fc1bc2 | 83 | A[i][i] = 1.0F; |
Cotzo | 0:f68c99fc1bc2 | 84 | } |
Cotzo | 0:f68c99fc1bc2 | 85 | return; |
Cotzo | 0:f68c99fc1bc2 | 86 | } |
Cotzo | 0:f68c99fc1bc2 | 87 | |
Cotzo | 0:f68c99fc1bc2 | 88 | |
Cotzo | 0:f68c99fc1bc2 | 89 | /* function uses Gauss-Jordan elimination to compute the inverse of matrix A in situ */ |
Cotzo | 0:f68c99fc1bc2 | 90 | /* on exit, A is replaced with its inverse */ |
Cotzo | 0:f68c99fc1bc2 | 91 | void fmatrixAeqInvA(float **A, int32 isize, int32 **icolind, int32 **irowind, int32 **ipivot) |
Cotzo | 0:f68c99fc1bc2 | 92 | { |
Cotzo | 0:f68c99fc1bc2 | 93 | int32 i, j, k, l, m; // index counters |
Cotzo | 0:f68c99fc1bc2 | 94 | int32 ipivotrow, ipivotcol; // row and column of pivot element |
Cotzo | 0:f68c99fc1bc2 | 95 | float largest; // largest element used for pivoting |
Cotzo | 0:f68c99fc1bc2 | 96 | float scaling; // scaling factor in pivoting |
Cotzo | 0:f68c99fc1bc2 | 97 | float recippiv; // reciprocal of pivot element |
Cotzo | 0:f68c99fc1bc2 | 98 | float ftmp; // temporary variable used in swaps |
Cotzo | 0:f68c99fc1bc2 | 99 | |
Cotzo | 0:f68c99fc1bc2 | 100 | // initialize the pivot array to 0 |
Cotzo | 0:f68c99fc1bc2 | 101 | for (j = 0; j < isize; j++) |
Cotzo | 0:f68c99fc1bc2 | 102 | { |
Cotzo | 0:f68c99fc1bc2 | 103 | ipivot[j][0] = 0; |
Cotzo | 0:f68c99fc1bc2 | 104 | } |
Cotzo | 0:f68c99fc1bc2 | 105 | |
Cotzo | 0:f68c99fc1bc2 | 106 | // main loop i over the dimensions of the square matrix A |
Cotzo | 0:f68c99fc1bc2 | 107 | for (i = 0; i < isize; i++) |
Cotzo | 0:f68c99fc1bc2 | 108 | { |
Cotzo | 0:f68c99fc1bc2 | 109 | // zero the largest element found for pivoting |
Cotzo | 0:f68c99fc1bc2 | 110 | largest = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 111 | // loop over candidate rows j |
Cotzo | 0:f68c99fc1bc2 | 112 | for (j = 0; j < isize; j++) |
Cotzo | 0:f68c99fc1bc2 | 113 | { |
Cotzo | 0:f68c99fc1bc2 | 114 | // check if row j has been previously pivoted |
Cotzo | 0:f68c99fc1bc2 | 115 | if (ipivot[j][0] != 1) |
Cotzo | 0:f68c99fc1bc2 | 116 | { |
Cotzo | 0:f68c99fc1bc2 | 117 | // loop over candidate columns k |
Cotzo | 0:f68c99fc1bc2 | 118 | for (k = 0; k < isize; k++) |
Cotzo | 0:f68c99fc1bc2 | 119 | { |
Cotzo | 0:f68c99fc1bc2 | 120 | // check if column k has previously been pivoted |
Cotzo | 0:f68c99fc1bc2 | 121 | if (ipivot[k][0] == 0) |
Cotzo | 0:f68c99fc1bc2 | 122 | { |
Cotzo | 0:f68c99fc1bc2 | 123 | // check if the pivot element is the largest found so far |
Cotzo | 0:f68c99fc1bc2 | 124 | if (fabs(A[j][k]) >= largest) |
Cotzo | 0:f68c99fc1bc2 | 125 | { |
Cotzo | 0:f68c99fc1bc2 | 126 | // and store this location as the current best candidate for pivoting |
Cotzo | 0:f68c99fc1bc2 | 127 | ipivotrow = j; |
Cotzo | 0:f68c99fc1bc2 | 128 | ipivotcol = k; |
Cotzo | 0:f68c99fc1bc2 | 129 | largest = (float) fabs(A[ipivotrow][ipivotcol]); |
Cotzo | 0:f68c99fc1bc2 | 130 | } |
Cotzo | 0:f68c99fc1bc2 | 131 | } |
Cotzo | 0:f68c99fc1bc2 | 132 | else if (ipivot[k][0] > 1) |
Cotzo | 0:f68c99fc1bc2 | 133 | { |
Cotzo | 0:f68c99fc1bc2 | 134 | // zero determinant situation: exit with identity matrix |
Cotzo | 0:f68c99fc1bc2 | 135 | fmatrixAeqI(A, 3); |
Cotzo | 0:f68c99fc1bc2 | 136 | return; |
Cotzo | 0:f68c99fc1bc2 | 137 | } |
Cotzo | 0:f68c99fc1bc2 | 138 | } |
Cotzo | 0:f68c99fc1bc2 | 139 | } |
Cotzo | 0:f68c99fc1bc2 | 140 | } |
Cotzo | 0:f68c99fc1bc2 | 141 | // increment the entry in ipivot to denote it has been selected for pivoting |
Cotzo | 0:f68c99fc1bc2 | 142 | ipivot[ipivotcol][0]++; |
Cotzo | 0:f68c99fc1bc2 | 143 | |
Cotzo | 0:f68c99fc1bc2 | 144 | // check the pivot rows ipivotrow and ipivotcol are not the same before swapping |
Cotzo | 0:f68c99fc1bc2 | 145 | if (ipivotrow != ipivotcol) |
Cotzo | 0:f68c99fc1bc2 | 146 | { |
Cotzo | 0:f68c99fc1bc2 | 147 | // loop over columns l |
Cotzo | 0:f68c99fc1bc2 | 148 | for (l = 0; l < isize; l++) |
Cotzo | 0:f68c99fc1bc2 | 149 | { |
Cotzo | 0:f68c99fc1bc2 | 150 | // and swap all elements of rows ipivotrow and ipivotcol |
Cotzo | 0:f68c99fc1bc2 | 151 | ftmp = A[ipivotrow][l]; |
Cotzo | 0:f68c99fc1bc2 | 152 | A[ipivotrow][l] = A[ipivotcol][l]; |
Cotzo | 0:f68c99fc1bc2 | 153 | A[ipivotcol][l] = ftmp; |
Cotzo | 0:f68c99fc1bc2 | 154 | } |
Cotzo | 0:f68c99fc1bc2 | 155 | } |
Cotzo | 0:f68c99fc1bc2 | 156 | |
Cotzo | 0:f68c99fc1bc2 | 157 | // record that on the i-th iteration rows ipivotrow and ipivotcol were swapped |
Cotzo | 0:f68c99fc1bc2 | 158 | irowind[i][0] = ipivotrow; |
Cotzo | 0:f68c99fc1bc2 | 159 | icolind[i][0] = ipivotcol; |
Cotzo | 0:f68c99fc1bc2 | 160 | |
Cotzo | 0:f68c99fc1bc2 | 161 | // check for zero on-diagonal element (singular matrix) and return with identity matrix if detected |
Cotzo | 0:f68c99fc1bc2 | 162 | if (A[ipivotcol][ipivotcol] == 0.0F) |
Cotzo | 0:f68c99fc1bc2 | 163 | { |
Cotzo | 0:f68c99fc1bc2 | 164 | // zero determinant situation: exit with identity matrix |
Cotzo | 0:f68c99fc1bc2 | 165 | fmatrixAeqI(A, 3); |
Cotzo | 0:f68c99fc1bc2 | 166 | return; |
Cotzo | 0:f68c99fc1bc2 | 167 | } |
Cotzo | 0:f68c99fc1bc2 | 168 | |
Cotzo | 0:f68c99fc1bc2 | 169 | // calculate the reciprocal of the pivot element knowing it's non-zero |
Cotzo | 0:f68c99fc1bc2 | 170 | recippiv = 1.0F / A[ipivotcol][ipivotcol]; |
Cotzo | 0:f68c99fc1bc2 | 171 | // by definition, the diagonal element normalizes to 1 |
Cotzo | 0:f68c99fc1bc2 | 172 | A[ipivotcol][ipivotcol] = 1.0F; |
Cotzo | 0:f68c99fc1bc2 | 173 | // multiply all of row ipivotcol by the reciprocal of the pivot element including the diagonal element |
Cotzo | 0:f68c99fc1bc2 | 174 | // the diagonal element A[ipivotcol][ipivotcol] now has value equal to the reciprocal of its previous value |
Cotzo | 0:f68c99fc1bc2 | 175 | for (l = 0; l < isize; l++) |
Cotzo | 0:f68c99fc1bc2 | 176 | { |
Cotzo | 0:f68c99fc1bc2 | 177 | A[ipivotcol][l] *= recippiv; |
Cotzo | 0:f68c99fc1bc2 | 178 | } |
Cotzo | 0:f68c99fc1bc2 | 179 | // loop over all rows m of A |
Cotzo | 0:f68c99fc1bc2 | 180 | for (m = 0; m < isize; m++) |
Cotzo | 0:f68c99fc1bc2 | 181 | { |
Cotzo | 0:f68c99fc1bc2 | 182 | if (m != ipivotcol) |
Cotzo | 0:f68c99fc1bc2 | 183 | { |
Cotzo | 0:f68c99fc1bc2 | 184 | // scaling factor for this row m is in column ipivotcol |
Cotzo | 0:f68c99fc1bc2 | 185 | scaling = A[m][ipivotcol]; |
Cotzo | 0:f68c99fc1bc2 | 186 | // zero this element |
Cotzo | 0:f68c99fc1bc2 | 187 | A[m][ipivotcol] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 188 | // loop over all columns l of A and perform elimination |
Cotzo | 0:f68c99fc1bc2 | 189 | for (l = 0; l < isize; l++) |
Cotzo | 0:f68c99fc1bc2 | 190 | { |
Cotzo | 0:f68c99fc1bc2 | 191 | A[m][l] -= A[ipivotcol][l] * scaling; |
Cotzo | 0:f68c99fc1bc2 | 192 | } |
Cotzo | 0:f68c99fc1bc2 | 193 | } |
Cotzo | 0:f68c99fc1bc2 | 194 | } |
Cotzo | 0:f68c99fc1bc2 | 195 | } // end of loop i over the matrix dimensions |
Cotzo | 0:f68c99fc1bc2 | 196 | |
Cotzo | 0:f68c99fc1bc2 | 197 | // finally, loop in inverse order to apply the missing column swaps |
Cotzo | 0:f68c99fc1bc2 | 198 | for (l = isize - 1; l >= 0; l--) |
Cotzo | 0:f68c99fc1bc2 | 199 | { |
Cotzo | 0:f68c99fc1bc2 | 200 | // set i and j to the two columns to be swapped |
Cotzo | 0:f68c99fc1bc2 | 201 | i = irowind[l][0]; |
Cotzo | 0:f68c99fc1bc2 | 202 | j = icolind[l][0]; |
Cotzo | 0:f68c99fc1bc2 | 203 | |
Cotzo | 0:f68c99fc1bc2 | 204 | // check that the two columns i and j to be swapped are not the same |
Cotzo | 0:f68c99fc1bc2 | 205 | if (i != j) |
Cotzo | 0:f68c99fc1bc2 | 206 | { |
Cotzo | 0:f68c99fc1bc2 | 207 | // loop over all rows k to swap columns i and j of A |
Cotzo | 0:f68c99fc1bc2 | 208 | for (k = 0; k < isize; k++) |
Cotzo | 0:f68c99fc1bc2 | 209 | { |
Cotzo | 0:f68c99fc1bc2 | 210 | ftmp = A[k][i]; |
Cotzo | 0:f68c99fc1bc2 | 211 | A[k][i] = A[k][j]; |
Cotzo | 0:f68c99fc1bc2 | 212 | A[k][j] = ftmp; |
Cotzo | 0:f68c99fc1bc2 | 213 | } |
Cotzo | 0:f68c99fc1bc2 | 214 | } |
Cotzo | 0:f68c99fc1bc2 | 215 | } |
Cotzo | 0:f68c99fc1bc2 | 216 | |
Cotzo | 0:f68c99fc1bc2 | 217 | return; |
Cotzo | 0:f68c99fc1bc2 | 218 | } |
Cotzo | 0:f68c99fc1bc2 | 219 | |
Cotzo | 0:f68c99fc1bc2 | 220 | |
Cotzo | 0:f68c99fc1bc2 | 221 | void ResetMagCalibration(struct MagCalibration *pthisMagCal/*, struct MagneticBuffer *pthisMagneticBuffer*/) |
Cotzo | 0:f68c99fc1bc2 | 222 | { |
Cotzo | 0:f68c99fc1bc2 | 223 | int32 i, j, k, l; // loop counters |
Cotzo | 0:f68c99fc1bc2 | 224 | |
Cotzo | 0:f68c99fc1bc2 | 225 | for (i = 0; i < 3; i++) { |
Cotzo | 0:f68c99fc1bc2 | 226 | pthisMagCal->finvW[i] = pthisMagCal->xfinvW[i]; |
Cotzo | 0:f68c99fc1bc2 | 227 | pthisMagCal->ftrinvW[i] = pthisMagCal->xftrinvW[i]; |
Cotzo | 0:f68c99fc1bc2 | 228 | pthisMagCal->fA[i] = pthisMagCal->xfA[i]; |
Cotzo | 0:f68c99fc1bc2 | 229 | pthisMagCal->finvA[i] = pthisMagCal->xinvA[i]; |
Cotzo | 0:f68c99fc1bc2 | 230 | }; |
Cotzo | 0:f68c99fc1bc2 | 231 | |
Cotzo | 0:f68c99fc1bc2 | 232 | // initialize the calibration hard and soft iron estimate to null |
Cotzo | 0:f68c99fc1bc2 | 233 | fmatrixAeqI(pthisMagCal->finvW, 3); |
Cotzo | 0:f68c99fc1bc2 | 234 | pthisMagCal->fVx = pthisMagCal->fVy = pthisMagCal->fVz = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 235 | pthisMagCal->fB = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 236 | pthisMagCal->fFitErrorpc = 1000.0F; |
Cotzo | 0:f68c99fc1bc2 | 237 | pthisMagCal->iValidMagCal = 0; |
Cotzo | 0:f68c99fc1bc2 | 238 | |
Cotzo | 0:f68c99fc1bc2 | 239 | // set magnetic buffer index to invalid value -1 to denote invalid |
Cotzo | 0:f68c99fc1bc2 | 240 | /*pthisMagneticBuffer->iMagBufferCount = 0; |
Cotzo | 0:f68c99fc1bc2 | 241 | for (j = 0; j < MAGBUFFSIZE; j++) |
Cotzo | 0:f68c99fc1bc2 | 242 | { |
Cotzo | 0:f68c99fc1bc2 | 243 | for (k = 0; k < MAGBUFFSIZE; k++) |
Cotzo | 0:f68c99fc1bc2 | 244 | { |
Cotzo | 0:f68c99fc1bc2 | 245 | for (l = 0; l < MAGBUFFSIZE; l++) |
Cotzo | 0:f68c99fc1bc2 | 246 | { |
Cotzo | 0:f68c99fc1bc2 | 247 | pthisMagneticBuffer->index[j][k][l] = -1; |
Cotzo | 0:f68c99fc1bc2 | 248 | } |
Cotzo | 0:f68c99fc1bc2 | 249 | } |
Cotzo | 0:f68c99fc1bc2 | 250 | }*/ |
Cotzo | 0:f68c99fc1bc2 | 251 | |
Cotzo | 0:f68c99fc1bc2 | 252 | return; |
Cotzo | 0:f68c99fc1bc2 | 253 | } |
Cotzo | 0:f68c99fc1bc2 | 254 | |
Cotzo | 0:f68c99fc1bc2 | 255 | // 4 element calibration using 4x4 matrix inverse |
Cotzo | 0:f68c99fc1bc2 | 256 | void fUpdateCalibration4INV(struct MagCalibration *pthisMagCal, |
Cotzo | 0:f68c99fc1bc2 | 257 | struct MagneticBuffer *pthisMagneticBuffer, |
Cotzo | 0:f68c99fc1bc2 | 258 | float **ftmpA4x4, float **ftmpB4x4, float **ftmpA4x1, |
Cotzo | 0:f68c99fc1bc2 | 259 | float **ftmpB4x1, int32 **icolind, int32 **irowind, int32 **ipivot) |
Cotzo | 0:f68c99fc1bc2 | 260 | { |
Cotzo | 0:f68c99fc1bc2 | 261 | int32 i, j, /*k, l,*/ m, n; // loop counters |
Cotzo | 0:f68c99fc1bc2 | 262 | int32 ilocalMagBufferCount; // local count of measurements for this process |
Cotzo | 0:f68c99fc1bc2 | 263 | float fOffsetx, fOffsety, fOffsetz; // offset to remove large DC hard iron bias in matrix |
Cotzo | 0:f68c99fc1bc2 | 264 | float ftmpBpx, ftmpBpy, ftmpBpz; // x, y, z magnetometer readings (uT) |
Cotzo | 0:f68c99fc1bc2 | 265 | float ftmpBpxsq, ftmpBpysq, ftmpBpzsq; // squares of x, y, z magnetometer readings (uT) |
Cotzo | 0:f68c99fc1bc2 | 266 | float fy; // dependent variable |
Cotzo | 0:f68c99fc1bc2 | 267 | float fYTY; // scalar equal to Y^T.Y |
Cotzo | 0:f68c99fc1bc2 | 268 | float fscaling; // set to FUTPERCOUNT * FMATRIXSCALING |
Cotzo | 0:f68c99fc1bc2 | 269 | float fP; // performance function = r^T.r |
Cotzo | 0:f68c99fc1bc2 | 270 | |
Cotzo | 0:f68c99fc1bc2 | 271 | // compute fscaling to reduce multiplications later |
Cotzo | 0:f68c99fc1bc2 | 272 | fscaling = FUTPERCOUNT * FMATRIXSCALING; |
Cotzo | 0:f68c99fc1bc2 | 273 | |
Cotzo | 0:f68c99fc1bc2 | 274 | // set trial inverse soft iron matrix invW to the identity matrix for 4 element calibration |
Cotzo | 0:f68c99fc1bc2 | 275 | pthisMagCal->ftrinvW[0][0] = pthisMagCal->ftrinvW[1][1] = pthisMagCal->ftrinvW[2][2] = 1.0F; |
Cotzo | 0:f68c99fc1bc2 | 276 | pthisMagCal->ftrinvW[0][1] = pthisMagCal->ftrinvW[0][2] = pthisMagCal->ftrinvW[1][0] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 277 | pthisMagCal->ftrinvW[1][2] = pthisMagCal->ftrinvW[2][0] = pthisMagCal->ftrinvW[2][1] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 278 | |
Cotzo | 0:f68c99fc1bc2 | 279 | // zero fYTY=Y^T.Y, ftmpA4x1=X^T.Y and on and above diagonal elements of ftmpA4x4=X^T*X |
Cotzo | 0:f68c99fc1bc2 | 280 | fYTY = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 281 | for (m = 0; m < 4; m++) |
Cotzo | 0:f68c99fc1bc2 | 282 | { |
Cotzo | 0:f68c99fc1bc2 | 283 | ftmpA4x1[m][0] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 284 | for (n = m; n < 4; n++) |
Cotzo | 0:f68c99fc1bc2 | 285 | { |
Cotzo | 0:f68c99fc1bc2 | 286 | ftmpA4x4[m][n] = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 287 | } |
Cotzo | 0:f68c99fc1bc2 | 288 | } |
Cotzo | 0:f68c99fc1bc2 | 289 | |
Cotzo | 0:f68c99fc1bc2 | 290 | // the offsets are guaranteed to be set from the first element but to avoid compiler error |
Cotzo | 0:f68c99fc1bc2 | 291 | fOffsetx = fOffsety = fOffsetz = 0.0F; |
Cotzo | 0:f68c99fc1bc2 | 292 | |
Cotzo | 0:f68c99fc1bc2 | 293 | // use from MINEQUATIONS up to MAXEQUATIONS entries from magnetic buffer to compute matrices |
Cotzo | 0:f68c99fc1bc2 | 294 | i = 0; |
Cotzo | 0:f68c99fc1bc2 | 295 | for (j = 0; j < MAGBUFFSIZE; j++) |
Cotzo | 0:f68c99fc1bc2 | 296 | { |
Cotzo | 0:f68c99fc1bc2 | 297 | //for (k = 0; k < MAGBUFFSIZE; k++) |
Cotzo | 0:f68c99fc1bc2 | 298 | //{ |
Cotzo | 0:f68c99fc1bc2 | 299 | //for (l = 0; l < MAGBUFFSIZE; l++) |
Cotzo | 0:f68c99fc1bc2 | 300 | //{ |
Cotzo | 0:f68c99fc1bc2 | 301 | //if (pthisMagneticBuffer->index[j][k][l] != -1) |
Cotzo | 0:f68c99fc1bc2 | 302 | //{ |
Cotzo | 0:f68c99fc1bc2 | 303 | // use first valid magnetic buffer entry as estimate (in counts) for offset |
Cotzo | 0:f68c99fc1bc2 | 304 | printf("."); |
Cotzo | 0:f68c99fc1bc2 | 305 | if (i == 0) |
Cotzo | 0:f68c99fc1bc2 | 306 | { |
Cotzo | 0:f68c99fc1bc2 | 307 | fOffsetx = (float)pthisMagneticBuffer->iBx[j]/*[k][l]*/; |
Cotzo | 0:f68c99fc1bc2 | 308 | fOffsety = (float)pthisMagneticBuffer->iBy[j]/*[k][l]*/; |
Cotzo | 0:f68c99fc1bc2 | 309 | fOffsetz = (float)pthisMagneticBuffer->iBz[j]/*[k][l]*/; |
Cotzo | 0:f68c99fc1bc2 | 310 | } |
Cotzo | 0:f68c99fc1bc2 | 311 | |
Cotzo | 0:f68c99fc1bc2 | 312 | // calculate offset and scaled magnetic buffer vector data Bx, By, Bz (scaled uT) |
Cotzo | 0:f68c99fc1bc2 | 313 | ftmpBpx = ((float)pthisMagneticBuffer->iBx[j]/*[k][l]*/ - fOffsetx) * fscaling; |
Cotzo | 0:f68c99fc1bc2 | 314 | ftmpBpy = ((float)pthisMagneticBuffer->iBy[j]/*[k][l]*/ - fOffsety) * fscaling; |
Cotzo | 0:f68c99fc1bc2 | 315 | ftmpBpz = ((float)pthisMagneticBuffer->iBz[j]/*[k][l]*/ - fOffsetz) * fscaling; |
Cotzo | 0:f68c99fc1bc2 | 316 | |
Cotzo | 0:f68c99fc1bc2 | 317 | // calculate y = Bx^2 + By^2 + Bz^2 (scaled uT^2) and accumulate Y^T.Y |
Cotzo | 0:f68c99fc1bc2 | 318 | ftmpBpxsq = ftmpBpx * ftmpBpx; |
Cotzo | 0:f68c99fc1bc2 | 319 | ftmpBpysq = ftmpBpy * ftmpBpy; |
Cotzo | 0:f68c99fc1bc2 | 320 | ftmpBpzsq = ftmpBpz * ftmpBpz; |
Cotzo | 0:f68c99fc1bc2 | 321 | fy = ftmpBpxsq + ftmpBpysq + ftmpBpzsq; |
Cotzo | 0:f68c99fc1bc2 | 322 | fYTY += fy * fy; |
Cotzo | 0:f68c99fc1bc2 | 323 | |
Cotzo | 0:f68c99fc1bc2 | 324 | // accumulate ftmpA4x1 = X^T.Y |
Cotzo | 0:f68c99fc1bc2 | 325 | ftmpA4x1[0][0] += ftmpBpx * fy; |
Cotzo | 0:f68c99fc1bc2 | 326 | ftmpA4x1[1][0] += ftmpBpy * fy; |
Cotzo | 0:f68c99fc1bc2 | 327 | ftmpA4x1[2][0] += ftmpBpz * fy; |
Cotzo | 0:f68c99fc1bc2 | 328 | ftmpA4x1[3][0] += fy; |
Cotzo | 0:f68c99fc1bc2 | 329 | |
Cotzo | 0:f68c99fc1bc2 | 330 | // accumulate on and above-diagonal terms of ftmpA4x4 = X^T.X |
Cotzo | 0:f68c99fc1bc2 | 331 | ftmpA4x4[0][0] += ftmpBpxsq; |
Cotzo | 0:f68c99fc1bc2 | 332 | ftmpA4x4[0][1] += ftmpBpx * ftmpBpy; |
Cotzo | 0:f68c99fc1bc2 | 333 | ftmpA4x4[0][2] += ftmpBpx * ftmpBpz; |
Cotzo | 0:f68c99fc1bc2 | 334 | ftmpA4x4[0][3] += ftmpBpx; |
Cotzo | 0:f68c99fc1bc2 | 335 | ftmpA4x4[1][1] += ftmpBpysq; |
Cotzo | 0:f68c99fc1bc2 | 336 | ftmpA4x4[1][2] += ftmpBpy * ftmpBpz; |
Cotzo | 0:f68c99fc1bc2 | 337 | ftmpA4x4[1][3] += ftmpBpy; |
Cotzo | 0:f68c99fc1bc2 | 338 | ftmpA4x4[2][2] += ftmpBpzsq; |
Cotzo | 0:f68c99fc1bc2 | 339 | ftmpA4x4[2][3] += ftmpBpz; |
Cotzo | 0:f68c99fc1bc2 | 340 | |
Cotzo | 0:f68c99fc1bc2 | 341 | // increment the counter for next iteration |
Cotzo | 0:f68c99fc1bc2 | 342 | i++; |
Cotzo | 0:f68c99fc1bc2 | 343 | //} |
Cotzo | 0:f68c99fc1bc2 | 344 | //} |
Cotzo | 0:f68c99fc1bc2 | 345 | //} |
Cotzo | 0:f68c99fc1bc2 | 346 | } |
Cotzo | 0:f68c99fc1bc2 | 347 | //printf("[dbg1]"); |
Cotzo | 0:f68c99fc1bc2 | 348 | |
Cotzo | 0:f68c99fc1bc2 | 349 | |
Cotzo | 0:f68c99fc1bc2 | 350 | // store the number of measurements accumulated |
Cotzo | 0:f68c99fc1bc2 | 351 | ilocalMagBufferCount = i; |
Cotzo | 0:f68c99fc1bc2 | 352 | |
Cotzo | 0:f68c99fc1bc2 | 353 | // set the last element of the measurement matrix to the number of buffer elements used |
Cotzo | 0:f68c99fc1bc2 | 354 | ftmpA4x4[3][3] = (float) i; |
Cotzo | 0:f68c99fc1bc2 | 355 | |
Cotzo | 0:f68c99fc1bc2 | 356 | // use above diagonal elements of symmetric matrix ftmpA4x4 to create ftmpB4x4 = ftmpA4x4 = X^T.X |
Cotzo | 0:f68c99fc1bc2 | 357 | for (m = 0; m < 4; m++) |
Cotzo | 0:f68c99fc1bc2 | 358 | { |
Cotzo | 0:f68c99fc1bc2 | 359 | for (n = m; n < 4; n++) |
Cotzo | 0:f68c99fc1bc2 | 360 | { |
Cotzo | 0:f68c99fc1bc2 | 361 | ftmpB4x4[m][n] = ftmpB4x4[n][m] = ftmpA4x4[n][m] = ftmpA4x4[m][n]; |
Cotzo | 0:f68c99fc1bc2 | 362 | } |
Cotzo | 0:f68c99fc1bc2 | 363 | } |
Cotzo | 0:f68c99fc1bc2 | 364 | |
Cotzo | 0:f68c99fc1bc2 | 365 | //printf("[dbg2]"); |
Cotzo | 0:f68c99fc1bc2 | 366 | // calculate in situ inverse of ftmpB4x4 = inv(X^T.X) (4x4) |
Cotzo | 0:f68c99fc1bc2 | 367 | fmatrixAeqInvA(ftmpB4x4, 4, icolind, irowind, ipivot); |
Cotzo | 0:f68c99fc1bc2 | 368 | |
Cotzo | 0:f68c99fc1bc2 | 369 | // calculate ftmpB4x1 = solution vector beta (4x1) = inv(X^T.X).X^T.Y = ftmpB4x4 * ftmpA4x1 |
Cotzo | 0:f68c99fc1bc2 | 370 | fmatrixAeqBxC(ftmpB4x1, ftmpB4x4, ftmpA4x1, 4, 4, 1); |
Cotzo | 0:f68c99fc1bc2 | 371 | |
Cotzo | 0:f68c99fc1bc2 | 372 | // calculate P = r^T.r = Y^T.Y - 2 * beta^T.(X^T.Y) + beta^T.(X^T.X).beta |
Cotzo | 0:f68c99fc1bc2 | 373 | // = fYTY - 2 * ftmpB4x1^T.ftmpA4x1 + ftmpB4x1^T.ftmpA4x4.ftmpB4x1 |
Cotzo | 0:f68c99fc1bc2 | 374 | // first set P = Y^T.Y - 2 * beta^T.(X^T.Y) = fYTY - 2 * ftmpB4x1^T.ftmpA4x1 |
Cotzo | 0:f68c99fc1bc2 | 375 | fP = fYTY - 2.0F * (ftmpA4x1[0][0] * ftmpB4x1[0][0] + ftmpA4x1[1][0] * ftmpB4x1[1][0] + |
Cotzo | 0:f68c99fc1bc2 | 376 | ftmpA4x1[2][0] * ftmpB4x1[2][0] + ftmpA4x1[3][0] * ftmpB4x1[3][0]); |
Cotzo | 0:f68c99fc1bc2 | 377 | // set ftmpA4x1 = (X^T.X).beta = ftmpA4x4.ftmpB4x1 |
Cotzo | 0:f68c99fc1bc2 | 378 | fmatrixAeqBxC(ftmpA4x1, ftmpA4x4, ftmpB4x1, 4, 4, 1); |
Cotzo | 0:f68c99fc1bc2 | 379 | // add beta^T.(X^T.X).beta = ftmpB4x1^T * ftmpA4x1 to P |
Cotzo | 0:f68c99fc1bc2 | 380 | fP += ftmpA4x1[0][0] * ftmpB4x1[0][0] + ftmpA4x1[1][0] * ftmpB4x1[1][0] + |
Cotzo | 0:f68c99fc1bc2 | 381 | ftmpA4x1[2][0] * ftmpB4x1[2][0] + ftmpA4x1[3][0] * ftmpB4x1[3][0]; |
Cotzo | 0:f68c99fc1bc2 | 382 | |
Cotzo | 0:f68c99fc1bc2 | 383 | // compute the hard iron vector (in uT but offset and scaled by FMATRIXSCALING) |
Cotzo | 0:f68c99fc1bc2 | 384 | pthisMagCal->ftrVx = 0.5F * ftmpB4x1[0][0]; |
Cotzo | 0:f68c99fc1bc2 | 385 | pthisMagCal->ftrVy = 0.5F * ftmpB4x1[1][0]; |
Cotzo | 0:f68c99fc1bc2 | 386 | pthisMagCal->ftrVz = 0.5F * ftmpB4x1[2][0]; |
Cotzo | 0:f68c99fc1bc2 | 387 | |
Cotzo | 0:f68c99fc1bc2 | 388 | // compute the scaled geomagnetic field strength B (in uT but scaled by FMATRIXSCALING) |
Cotzo | 0:f68c99fc1bc2 | 389 | pthisMagCal->ftrB = (float)sqrt(ftmpB4x1[3][0] + pthisMagCal->ftrVx * pthisMagCal->ftrVx + |
Cotzo | 0:f68c99fc1bc2 | 390 | pthisMagCal->ftrVy * pthisMagCal->ftrVy + pthisMagCal->ftrVz * pthisMagCal->ftrVz); |
Cotzo | 0:f68c99fc1bc2 | 391 | |
Cotzo | 0:f68c99fc1bc2 | 392 | // calculate the trial fit error (percent) normalized to number of measurements and scaled geomagnetic field strength |
Cotzo | 0:f68c99fc1bc2 | 393 | pthisMagCal->ftrFitErrorpc = (float)sqrt(fP / (float) ilocalMagBufferCount) * 100.0F / |
Cotzo | 0:f68c99fc1bc2 | 394 | (2.0F * pthisMagCal->ftrB * pthisMagCal->ftrB); |
Cotzo | 0:f68c99fc1bc2 | 395 | //printf("\n\nTrial new calibration fit error=%9.4f%% versus previous %9.4f%%", pthisMagCal->ftrFitErrorpc, pthisMagCal->fFitErrorpc); |
Cotzo | 0:f68c99fc1bc2 | 396 | |
Cotzo | 0:f68c99fc1bc2 | 397 | // correct the hard iron estimate for FMATRIXSCALING and the offsets applied (result in uT) |
Cotzo | 0:f68c99fc1bc2 | 398 | pthisMagCal->ftrVx = pthisMagCal->ftrVx * FINVMATRIXSCALING + fOffsetx * FUTPERCOUNT; |
Cotzo | 0:f68c99fc1bc2 | 399 | pthisMagCal->ftrVy = pthisMagCal->ftrVy * FINVMATRIXSCALING + fOffsety * FUTPERCOUNT; |
Cotzo | 0:f68c99fc1bc2 | 400 | pthisMagCal->ftrVz = pthisMagCal->ftrVz * FINVMATRIXSCALING + fOffsetz * FUTPERCOUNT; |
Cotzo | 0:f68c99fc1bc2 | 401 | //printf("\n\nTrial new calibration hard iron (uT) Vx=%9.3f Vy=%9.3f Vz=%9.3f", pthisMagCal->ftrVx, pthisMagCal->ftrVy, pthisMagCal->ftrVz); |
Cotzo | 0:f68c99fc1bc2 | 402 | |
Cotzo | 0:f68c99fc1bc2 | 403 | // correct the geomagnetic field strength B to correct scaling (result in uT) |
Cotzo | 0:f68c99fc1bc2 | 404 | pthisMagCal->ftrB *= FINVMATRIXSCALING; |
Cotzo | 0:f68c99fc1bc2 | 405 | //printf("\n\nTrial new calibration geomagnetic field (uT) B=%9.3f", pthisMagCal->ftrB); |
Cotzo | 0:f68c99fc1bc2 | 406 | |
Cotzo | 0:f68c99fc1bc2 | 407 | // set the valid calibration flag to true |
Cotzo | 0:f68c99fc1bc2 | 408 | pthisMagCal->iValidMagCal = 1; |
Cotzo | 0:f68c99fc1bc2 | 409 | |
Cotzo | 0:f68c99fc1bc2 | 410 | return; |
Cotzo | 0:f68c99fc1bc2 | 411 | } |