rs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992
  1. /*#define PROFILE*/
  2. /*
  3. * fec.c -- forward error correction based on Vandermonde matrices
  4. * 980624
  5. * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it)
  6. * (C) 2001 Alain Knaff (alain@knaff.lu)
  7. *
  8. * Portions derived from code by Phil Karn (karn@ka9q.ampr.org),
  9. * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari
  10. * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  25. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  26. * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
  27. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
  28. * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  29. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
  30. * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  31. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  32. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  33. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  34. * OF SUCH DAMAGE.
  35. *
  36. * Reimplement by Jannson (20161018): compatible for golang version of https://github.com/klauspost/reedsolomon
  37. */
  38. /*
  39. * The following parameter defines how many bits are used for
  40. * field elements. The code supports any value from 2 to 16
  41. * but fastest operation is achieved with 8 bit elements
  42. * This is the only parameter you may want to change.
  43. */
  44. #define GF_BITS 8 /* code over GF(2**GF_BITS) - change to suit */
  45. #include <stdio.h>
  46. #include <stdlib.h>
  47. #include <string.h>
  48. #include <assert.h>
  49. #include "rs.h"
  50. /*
  51. * stuff used for testing purposes only
  52. */
  53. #ifdef TEST
  54. #define DEB(x)
  55. #define DDB(x) x
  56. #define DEBUG 0 /* minimal debugging */
  57. #include <sys/time.h>
  58. #define DIFF_T(a,b) \
  59. (1+ 1000000*(a.tv_sec - b.tv_sec) + (a.tv_usec - b.tv_usec) )
  60. #define TICK(t) \
  61. {struct timeval x ; \
  62. gettimeofday(&x, NULL) ; \
  63. t = x.tv_usec + 1000000* (x.tv_sec & 0xff ) ; \
  64. }
  65. #define TOCK(t) \
  66. { u_long t1 ; TICK(t1) ; \
  67. if (t1 < t) t = 256000000 + t1 - t ; \
  68. else t = t1 - t ; \
  69. if (t == 0) t = 1 ;}
  70. u_long ticks[10]; /* vars for timekeeping */
  71. #else
  72. #define DEB(x)
  73. #define DDB(x)
  74. #define TICK(x)
  75. #define TOCK(x)
  76. #endif /* TEST */
  77. /*
  78. * You should not need to change anything beyond this point.
  79. * The first part of the file implements linear algebra in GF.
  80. *
  81. * gf is the type used to store an element of the Galois Field.
  82. * Must constain at least GF_BITS bits.
  83. *
  84. * Note: unsigned char will work up to GF(256) but int seems to run
  85. * faster on the Pentium. We use int whenever have to deal with an
  86. * index, since they are generally faster.
  87. */
  88. /*
  89. * AK: Udpcast only uses GF_BITS=8. Remove other possibilities
  90. */
  91. #if (GF_BITS != 8)
  92. #error "GF_BITS must be 8"
  93. #endif
  94. typedef unsigned char gf;
  95. #define GF_SIZE ((1 << GF_BITS) - 1) /* powers of \alpha */
  96. /*
  97. * Primitive polynomials - see Lin & Costello, Appendix A,
  98. * and Lee & Messerschmitt, p. 453.
  99. */
  100. static char *allPp[] = { /* GF_BITS polynomial */
  101. NULL, /* 0 no code */
  102. NULL, /* 1 no code */
  103. "111", /* 2 1+x+x^2 */
  104. "1101", /* 3 1+x+x^3 */
  105. "11001", /* 4 1+x+x^4 */
  106. "101001", /* 5 1+x^2+x^5 */
  107. "1100001", /* 6 1+x+x^6 */
  108. "10010001", /* 7 1 + x^3 + x^7 */
  109. "101110001", /* 8 1+x^2+x^3+x^4+x^8 */
  110. "1000100001", /* 9 1+x^4+x^9 */
  111. "10010000001", /* 10 1+x^3+x^10 */
  112. "101000000001", /* 11 1+x^2+x^11 */
  113. "1100101000001", /* 12 1+x+x^4+x^6+x^12 */
  114. "11011000000001", /* 13 1+x+x^3+x^4+x^13 */
  115. "110000100010001", /* 14 1+x+x^6+x^10+x^14 */
  116. "1100000000000001", /* 15 1+x+x^15 */
  117. "11010000000010001" /* 16 1+x+x^3+x^12+x^16 */
  118. };
  119. /*
  120. * To speed up computations, we have tables for logarithm, exponent
  121. * and inverse of a number. If GF_BITS <= 8, we use a table for
  122. * multiplication as well (it takes 64K, no big deal even on a PDA,
  123. * especially because it can be pre-initialized an put into a ROM!),
  124. * otherwhise we use a table of logarithms.
  125. * In any case the macro gf_mul(x,y) takes care of multiplications.
  126. */
  127. static gf gf_exp[2*GF_SIZE]; /* index->poly form conversion table */
  128. static int gf_log[GF_SIZE + 1]; /* Poly->index form conversion table */
  129. static gf inverse[GF_SIZE+1]; /* inverse of field elem. */
  130. /* inv[\alpha**i]=\alpha**(GF_SIZE-i-1) */
  131. /*
  132. * modnn(x) computes x % GF_SIZE, where GF_SIZE is 2**GF_BITS - 1,
  133. * without a slow divide.
  134. */
  135. static inline gf
  136. modnn(int x)
  137. {
  138. while (x >= GF_SIZE) {
  139. x -= GF_SIZE;
  140. x = (x >> GF_BITS) + (x & GF_SIZE);
  141. }
  142. return x;
  143. }
  144. #define SWAP(a,b,t) {t tmp; tmp=a; a=b; b=tmp;}
  145. /*
  146. * gf_mul(x,y) multiplies two numbers. If GF_BITS<=8, it is much
  147. * faster to use a multiplication table.
  148. *
  149. * USE_GF_MULC, GF_MULC0(c) and GF_ADDMULC(x) can be used when multiplying
  150. * many numbers by the same constant. In this case the first
  151. * call sets the constant, and others perform the multiplications.
  152. * A value related to the multiplication is held in a local variable
  153. * declared with USE_GF_MULC . See usage in addmul1().
  154. */
  155. __declspec(align(16)) static gf gf_mul_table[(GF_SIZE + 1)*(GF_SIZE + 1)];
  156. #define gf_mul(x,y) gf_mul_table[(x<<8)+y]
  157. #define USE_GF_MULC register gf * __gf_mulc_
  158. #define GF_MULC0(c) __gf_mulc_ = &gf_mul_table[(c)<<8]
  159. #define GF_ADDMULC(dst, x) dst ^= __gf_mulc_[x]
  160. #define GF_MULC(dst, x) dst = __gf_mulc_[x]
  161. static void
  162. init_mul_table(void)
  163. {
  164. int i, j;
  165. for (i=0; i< GF_SIZE+1; i++)
  166. for (j=0; j< GF_SIZE+1; j++)
  167. gf_mul_table[(i<<8)+j] = gf_exp[modnn(gf_log[i] + gf_log[j]) ] ;
  168. for (j=0; j< GF_SIZE+1; j++)
  169. gf_mul_table[j] = gf_mul_table[j<<8] = 0;
  170. }
  171. /*
  172. * Generate GF(2**m) from the irreducible polynomial p(X) in p[0]..p[m]
  173. * Lookup tables:
  174. * index->polynomial form gf_exp[] contains j= \alpha^i;
  175. * polynomial form -> index form gf_log[ j = \alpha^i ] = i
  176. * \alpha=x is the primitive element of GF(2^m)
  177. *
  178. * For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple
  179. * multiplication of two numbers can be resolved without calling modnn
  180. */
  181. /*
  182. * initialize the data structures used for computations in GF.
  183. */
  184. static void
  185. generate_gf(void)
  186. {
  187. int i;
  188. gf mask;
  189. char *Pp = allPp[GF_BITS] ;
  190. mask = 1; /* x ** 0 = 1 */
  191. gf_exp[GF_BITS] = 0; /* will be updated at the end of the 1st loop */
  192. /*
  193. * first, generate the (polynomial representation of) powers of \alpha,
  194. * which are stored in gf_exp[i] = \alpha ** i .
  195. * At the same time build gf_log[gf_exp[i]] = i .
  196. * The first GF_BITS powers are simply bits shifted to the left.
  197. */
  198. for (i = 0; i < GF_BITS; i++, mask <<= 1 ) {
  199. gf_exp[i] = mask;
  200. gf_log[gf_exp[i]] = i;
  201. /*
  202. * If Pp[i] == 1 then \alpha ** i occurs in poly-repr
  203. * gf_exp[GF_BITS] = \alpha ** GF_BITS
  204. */
  205. if ( Pp[i] == '1' )
  206. gf_exp[GF_BITS] ^= mask;
  207. }
  208. /*
  209. * now gf_exp[GF_BITS] = \alpha ** GF_BITS is complete, so can als
  210. * compute its inverse.
  211. */
  212. gf_log[gf_exp[GF_BITS]] = GF_BITS;
  213. /*
  214. * Poly-repr of \alpha ** (i+1) is given by poly-repr of
  215. * \alpha ** i shifted left one-bit and accounting for any
  216. * \alpha ** GF_BITS term that may occur when poly-repr of
  217. * \alpha ** i is shifted.
  218. */
  219. mask = 1 << (GF_BITS - 1 ) ;
  220. for (i = GF_BITS + 1; i < GF_SIZE; i++) {
  221. if (gf_exp[i - 1] >= mask)
  222. gf_exp[i] = gf_exp[GF_BITS] ^ ((gf_exp[i - 1] ^ mask) << 1);
  223. else
  224. gf_exp[i] = gf_exp[i - 1] << 1;
  225. gf_log[gf_exp[i]] = i;
  226. }
  227. /*
  228. * log(0) is not defined, so use a special value
  229. */
  230. gf_log[0] = GF_SIZE ;
  231. /* set the extended gf_exp values for fast multiply */
  232. for (i = 0 ; i < GF_SIZE ; i++)
  233. gf_exp[i + GF_SIZE] = gf_exp[i] ;
  234. /*
  235. * again special cases. 0 has no inverse. This used to
  236. * be initialized to GF_SIZE, but it should make no difference
  237. * since noone is supposed to read from here.
  238. */
  239. inverse[0] = 0 ;
  240. inverse[1] = 1;
  241. for (i=2; i<=GF_SIZE; i++)
  242. inverse[i] = gf_exp[GF_SIZE-gf_log[i]];
  243. }
  244. /*
  245. * Various linear algebra operations that i use often.
  246. */
  247. /*
  248. * addmul() computes dst[] = dst[] + c * src[]
  249. * This is used often, so better optimize it! Currently the loop is
  250. * unrolled 16 times, a good value for 486 and pentium-class machines.
  251. * The case c=0 is also optimized, whereas c=1 is not. These
  252. * calls are unfrequent in my typical apps so I did not bother.
  253. *
  254. * Note that gcc on
  255. */
  256. #if 0
  257. #define addmul(dst, src, c, sz) \
  258. if (c != 0) addmul1(dst, src, c, sz)
  259. #endif
  260. #define UNROLL 16 /* 1, 4, 8, 16 */
  261. static void
  262. slow_addmul1(gf *dst1, gf *src1, gf c, int sz)
  263. {
  264. USE_GF_MULC ;
  265. register gf *dst = dst1, *src = src1 ;
  266. gf *lim = &dst[sz - UNROLL + 1] ;
  267. GF_MULC0(c) ;
  268. #if (UNROLL > 1) /* unrolling by 8/16 is quite effective on the pentium */
  269. for (; dst < lim ; dst += UNROLL, src += UNROLL ) {
  270. GF_ADDMULC( dst[0] , src[0] );
  271. GF_ADDMULC( dst[1] , src[1] );
  272. GF_ADDMULC( dst[2] , src[2] );
  273. GF_ADDMULC( dst[3] , src[3] );
  274. #if (UNROLL > 4)
  275. GF_ADDMULC( dst[4] , src[4] );
  276. GF_ADDMULC( dst[5] , src[5] );
  277. GF_ADDMULC( dst[6] , src[6] );
  278. GF_ADDMULC( dst[7] , src[7] );
  279. #endif
  280. #if (UNROLL > 8)
  281. GF_ADDMULC( dst[8] , src[8] );
  282. GF_ADDMULC( dst[9] , src[9] );
  283. GF_ADDMULC( dst[10] , src[10] );
  284. GF_ADDMULC( dst[11] , src[11] );
  285. GF_ADDMULC( dst[12] , src[12] );
  286. GF_ADDMULC( dst[13] , src[13] );
  287. GF_ADDMULC( dst[14] , src[14] );
  288. GF_ADDMULC( dst[15] , src[15] );
  289. #endif
  290. }
  291. #endif
  292. lim += UNROLL - 1 ;
  293. for (; dst < lim; dst++, src++ ) /* final components */
  294. GF_ADDMULC( *dst , *src );
  295. }
  296. # define addmul1 slow_addmul1
  297. static void addmul(gf *dst, gf *src, gf c, int sz) {
  298. // fprintf(stderr, "Dst=%p Src=%p, gf=%02x sz=%d\n", dst, src, c, sz);
  299. if (c != 0) addmul1(dst, src, c, sz);
  300. }
  301. /*
  302. * mul() computes dst[] = c * src[]
  303. * This is used often, so better optimize it! Currently the loop is
  304. * unrolled 16 times, a good value for 486 and pentium-class machines.
  305. * The case c=0 is also optimized, whereas c=1 is not. These
  306. * calls are unfrequent in my typical apps so I did not bother.
  307. *
  308. * Note that gcc on
  309. */
  310. #if 0
  311. #define mul(dst, src, c, sz) \
  312. do { if (c != 0) mul1(dst, src, c, sz); else memset(dst, 0, c); } while(0)
  313. #endif
  314. #define UNROLL 16 /* 1, 4, 8, 16 */
  315. static void
  316. slow_mul1(gf *dst1, gf *src1, gf c, int sz)
  317. {
  318. USE_GF_MULC ;
  319. register gf *dst = dst1, *src = src1 ;
  320. gf *lim = &dst[sz - UNROLL + 1] ;
  321. GF_MULC0(c) ;
  322. #if (UNROLL > 1) /* unrolling by 8/16 is quite effective on the pentium */
  323. for (; dst < lim ; dst += UNROLL, src += UNROLL ) {
  324. GF_MULC( dst[0] , src[0] );
  325. GF_MULC( dst[1] , src[1] );
  326. GF_MULC( dst[2] , src[2] );
  327. GF_MULC( dst[3] , src[3] );
  328. #if (UNROLL > 4)
  329. GF_MULC( dst[4] , src[4] );
  330. GF_MULC( dst[5] , src[5] );
  331. GF_MULC( dst[6] , src[6] );
  332. GF_MULC( dst[7] , src[7] );
  333. #endif
  334. #if (UNROLL > 8)
  335. GF_MULC( dst[8] , src[8] );
  336. GF_MULC( dst[9] , src[9] );
  337. GF_MULC( dst[10] , src[10] );
  338. GF_MULC( dst[11] , src[11] );
  339. GF_MULC( dst[12] , src[12] );
  340. GF_MULC( dst[13] , src[13] );
  341. GF_MULC( dst[14] , src[14] );
  342. GF_MULC( dst[15] , src[15] );
  343. #endif
  344. }
  345. #endif
  346. lim += UNROLL - 1 ;
  347. for (; dst < lim; dst++, src++ ) /* final components */
  348. GF_MULC( *dst , *src );
  349. }
  350. # define mul1 slow_mul1
  351. static inline void mul(gf *dst, gf *src, gf c, int sz) {
  352. /*fprintf(stderr, "%p = %02x * %p\n", dst, c, src);*/
  353. if (c != 0) mul1(dst, src, c, sz); else memset(dst, 0, c);
  354. }
  355. /*
  356. * invert_mat() takes a matrix and produces its inverse
  357. * k is the size of the matrix.
  358. * (Gauss-Jordan, adapted from Numerical Recipes in C)
  359. * Return non-zero if singular.
  360. */
  361. DEB( int pivloops=0; int pivswaps=0 ; /* diagnostic */)
  362. static int
  363. invert_mat(gf *src, int k)
  364. {
  365. gf c, *p ;
  366. int irow, icol, row, col, i, ix ;
  367. int error = 1 ;
  368. int *indxc = malloc(k*sizeof(int));
  369. int *indxr = malloc(k*sizeof(int));
  370. int *ipiv = malloc(k*sizeof(int));
  371. gf *id_row = malloc(k*sizeof(gf));
  372. // int indxc[k];
  373. // int indxr[k];
  374. // int ipiv[k];
  375. // gf id_row[k];
  376. memset(id_row, 0, k*sizeof(gf));
  377. DEB( pivloops=0; pivswaps=0 ; /* diagnostic */ )
  378. /*
  379. * ipiv marks elements already used as pivots.
  380. */
  381. for (i = 0; i < k ; i++)
  382. ipiv[i] = 0 ;
  383. for (col = 0; col < k ; col++) {
  384. gf *pivot_row ;
  385. /*
  386. * Zeroing column 'col', look for a non-zero element.
  387. * First try on the diagonal, if it fails, look elsewhere.
  388. */
  389. irow = icol = -1 ;
  390. if (ipiv[col] != 1 && src[col*k + col] != 0) {
  391. irow = col ;
  392. icol = col ;
  393. goto found_piv ;
  394. }
  395. for (row = 0 ; row < k ; row++) {
  396. if (ipiv[row] != 1) {
  397. for (ix = 0 ; ix < k ; ix++) {
  398. DEB( pivloops++ ; )
  399. if (ipiv[ix] == 0) {
  400. if (src[row*k + ix] != 0) {
  401. irow = row ;
  402. icol = ix ;
  403. goto found_piv ;
  404. }
  405. } else if (ipiv[ix] > 1) {
  406. fprintf(stderr, "singular matrix\n");
  407. goto fail ;
  408. }
  409. }
  410. }
  411. }
  412. if (icol == -1) {
  413. fprintf(stderr, "XXX pivot not found!\n");
  414. goto fail ;
  415. }
  416. found_piv:
  417. ++(ipiv[icol]) ;
  418. /*
  419. * swap rows irow and icol, so afterwards the diagonal
  420. * element will be correct. Rarely done, not worth
  421. * optimizing.
  422. */
  423. if (irow != icol) {
  424. for (ix = 0 ; ix < k ; ix++ ) {
  425. SWAP( src[irow*k + ix], src[icol*k + ix], gf) ;
  426. }
  427. }
  428. indxr[col] = irow ;
  429. indxc[col] = icol ;
  430. pivot_row = &src[icol*k] ;
  431. c = pivot_row[icol] ;
  432. if (c == 0) {
  433. fprintf(stderr, "singular matrix 2\n");
  434. goto fail ;
  435. }
  436. if (c != 1 ) { /* otherwhise this is a NOP */
  437. /*
  438. * this is done often , but optimizing is not so
  439. * fruitful, at least in the obvious ways (unrolling)
  440. */
  441. DEB( pivswaps++ ; )
  442. c = inverse[ c ] ;
  443. pivot_row[icol] = 1 ;
  444. for (ix = 0 ; ix < k ; ix++ )
  445. pivot_row[ix] = gf_mul(c, pivot_row[ix] );
  446. }
  447. /*
  448. * from all rows, remove multiples of the selected row
  449. * to zero the relevant entry (in fact, the entry is not zero
  450. * because we know it must be zero).
  451. * (Here, if we know that the pivot_row is the identity,
  452. * we can optimize the addmul).
  453. */
  454. id_row[icol] = 1;
  455. if (memcmp(pivot_row, id_row, k*sizeof(gf)) != 0) {
  456. for (p = src, ix = 0 ; ix < k ; ix++, p += k ) {
  457. if (ix != icol) {
  458. c = p[icol] ;
  459. p[icol] = 0 ;
  460. addmul(p, pivot_row, c, k );
  461. }
  462. }
  463. }
  464. id_row[icol] = 0;
  465. } /* done all columns */
  466. for (col = k-1 ; col >= 0 ; col-- ) {
  467. if (indxr[col] <0 || indxr[col] >= k)
  468. fprintf(stderr, "AARGH, indxr[col] %d\n", indxr[col]);
  469. else if (indxc[col] <0 || indxc[col] >= k)
  470. fprintf(stderr, "AARGH, indxc[col] %d\n", indxc[col]);
  471. else
  472. if (indxr[col] != indxc[col] ) {
  473. for (row = 0 ; row < k ; row++ ) {
  474. SWAP( src[row*k + indxr[col]], src[row*k + indxc[col]], gf) ;
  475. }
  476. }
  477. }
  478. error = 0 ;
  479. fail:
  480. free(indxc);
  481. free(indxr);
  482. free(ipiv);
  483. free(id_row);
  484. return error ;
  485. }
  486. static int fec_initialized = 0 ;
  487. void fec_init(void)
  488. {
  489. TICK(ticks[0]);
  490. generate_gf();
  491. TOCK(ticks[0]);
  492. DDB(fprintf(stderr, "generate_gf took %ldus\n", ticks[0]);)
  493. TICK(ticks[0]);
  494. init_mul_table();
  495. TOCK(ticks[0]);
  496. DDB(fprintf(stderr, "init_mul_table took %ldus\n", ticks[0]);)
  497. fec_initialized = 1 ;
  498. }
  499. #ifdef PROFILE
  500. #ifdef __x86_64__
  501. static long long rdtsc(void)
  502. {
  503. unsigned long low, hi;
  504. asm volatile ("rdtsc" : "=d" (hi), "=a" (low));
  505. return ( (((long long)hi) << 32) | ((long long) low));
  506. }
  507. #elif __arm__
  508. static long long rdtsc(void)
  509. {
  510. u64 val;
  511. asm volatile("mrs %0, cntvct_el0" : "=r" (val));
  512. return val;
  513. }
  514. #endif
  515. void print_matrix1(gf* matrix, int nrows, int ncols) {
  516. int i, j;
  517. printf("matrix (%d,%d):\n", nrows, ncols);
  518. for(i = 0; i < nrows; i++) {
  519. for(j = 0; j < ncols; j++) {
  520. printf("%6d ", matrix[i*ncols + j]);
  521. }
  522. printf("\n");
  523. }
  524. }
  525. void print_matrix2(gf** matrix, int nrows, int ncols) {
  526. int i, j;
  527. printf("matrix (%d,%d):\n", nrows, ncols);
  528. for(i = 0; i < nrows; i++) {
  529. for(j = 0; j < ncols; j++) {
  530. printf("%6d ", matrix[i][j]);
  531. }
  532. printf("\n");
  533. }
  534. }
  535. #endif
  536. /* y = a**n */
  537. static gf galExp(gf a, gf n) {
  538. int logA;
  539. int logResult;
  540. if(0 == n) {
  541. return 1;
  542. }
  543. if(0 == a) {
  544. return 0;
  545. }
  546. logA = gf_log[a];
  547. logResult = logA * n;
  548. while(logResult >= 255) {
  549. logResult -= 255;
  550. }
  551. return gf_exp[logResult];
  552. }
  553. static inline gf galMultiply(gf a, gf b) {
  554. return gf_mul_table[ ((int)a << 8) + (int)b ];
  555. }
  556. static gf* vandermonde(int nrows, int ncols) {
  557. int row, col, ptr;
  558. gf* matrix = (gf*)RS_MALLOC(nrows * ncols);
  559. if(NULL != matrix) {
  560. ptr = 0;
  561. for(row = 0; row < nrows; row++) {
  562. for(col = 0; col < ncols; col++) {
  563. matrix[ptr++] = galExp((gf)row, (gf)col);
  564. }
  565. }
  566. }
  567. return matrix;
  568. }
  569. /*
  570. * Not check for input params
  571. * */
  572. static gf* sub_matrix(gf* matrix, int rmin, int cmin, int rmax, int cmax, int nrows, int ncols) {
  573. int i, j, ptr = 0;
  574. gf* new_m = (gf*)RS_MALLOC( (rmax-rmin) * (cmax-cmin) );
  575. if(NULL != new_m) {
  576. for(i = rmin; i < rmax; i++) {
  577. for(j = cmin; j < cmax; j++) {
  578. new_m[ptr++] = matrix[i*ncols + j];
  579. }
  580. }
  581. }
  582. return new_m;
  583. }
  584. /* y = a.dot(b) */
  585. static gf* multiply1(gf *a, int ar, int ac, gf *b, int br, int bc) {
  586. gf *new_m, tg;
  587. int r, c, i, ptr = 0;
  588. assert(ac == br);
  589. new_m = (gf*)RS_CALLOC(1, ar*bc);
  590. if(NULL != new_m) {
  591. /* this multiply is slow */
  592. for(r = 0; r < ar; r++) {
  593. for(c = 0; c < bc; c++) {
  594. tg = 0;
  595. for(i = 0; i < ac; i++) {
  596. /* tg ^= gf_mul_table[ ((int)a[r*ac+i] << 8) + (int)b[i*bc+c] ]; */
  597. tg ^= galMultiply(a[r*ac+i], b[i*bc+c]);
  598. }
  599. new_m[ptr++] = tg;
  600. }
  601. }
  602. }
  603. return new_m;
  604. }
  605. /* copy from golang rs version */
  606. static inline int code_some_shards(gf* matrixRows, gf** inputs, gf** outputs,
  607. int dataShards, int outputCount, int byteCount) {
  608. gf* in;
  609. int iRow, c;
  610. for(c = 0; c < dataShards; c++) {
  611. in = inputs[c];
  612. for(iRow = 0; iRow < outputCount; iRow++) {
  613. if(0 == c) {
  614. mul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount);
  615. } else {
  616. addmul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount);
  617. }
  618. }
  619. }
  620. return 0;
  621. }
  622. reed_solomon* reed_solomon_new(int data_shards, int parity_shards) {
  623. gf* vm = NULL;
  624. gf* top = NULL;
  625. int err = 0;
  626. reed_solomon* rs = NULL;
  627. /* MUST use fec_init once time first */
  628. assert(fec_initialized);
  629. do {
  630. rs = (reed_solomon*) RS_MALLOC(sizeof(reed_solomon));
  631. if(NULL == rs) {
  632. return NULL;
  633. }
  634. rs->data_shards = data_shards;
  635. rs->parity_shards = parity_shards;
  636. rs->shards = (data_shards + parity_shards);
  637. rs->m = NULL;
  638. rs->parity = NULL;
  639. if(rs->shards > DATA_SHARDS_MAX || data_shards <= 0 || parity_shards <= 0) {
  640. err = 1;
  641. break;
  642. }
  643. vm = vandermonde(rs->shards, rs->data_shards);
  644. if(NULL == vm) {
  645. err = 2;
  646. break;
  647. }
  648. top = sub_matrix(vm, 0, 0, data_shards, data_shards, rs->shards, data_shards);
  649. if(NULL == top) {
  650. err = 3;
  651. break;
  652. }
  653. err = invert_mat(top, data_shards);
  654. assert(0 == err);
  655. rs->m = multiply1(vm, rs->shards, data_shards, top, data_shards, data_shards);
  656. if(NULL == rs->m) {
  657. err = 4;
  658. break;
  659. }
  660. rs->parity = sub_matrix(rs->m, data_shards, 0, rs->shards, data_shards, rs->shards, data_shards);
  661. if(NULL == rs->parity) {
  662. err = 5;
  663. break;
  664. }
  665. RS_FREE(vm);
  666. RS_FREE(top);
  667. vm = NULL;
  668. top = NULL;
  669. return rs;
  670. } while(0);
  671. fprintf(stderr, "err=%d\n", err);
  672. if(NULL != vm) {
  673. RS_FREE(vm);
  674. }
  675. if(NULL != top) {
  676. RS_FREE(top);
  677. }
  678. if(NULL != rs) {
  679. if(NULL != rs->m) {
  680. RS_FREE(rs->m);
  681. }
  682. if(NULL != rs->parity) {
  683. RS_FREE(rs->parity);
  684. }
  685. RS_FREE(rs);
  686. }
  687. return NULL;
  688. }
  689. void reed_solomon_release(reed_solomon* rs) {
  690. if(NULL != rs) {
  691. if(NULL != rs->m) {
  692. RS_FREE(rs->m);
  693. }
  694. if(NULL != rs->parity) {
  695. RS_FREE(rs->parity);
  696. }
  697. RS_FREE(rs);
  698. }
  699. }
  700. /**
  701. * encode one shard
  702. * input:
  703. * rs
  704. * data_blocks[rs->data_shards][block_size]
  705. * fec_blocks[rs->data_shards][block_size]
  706. * */
  707. int reed_solomon_encode(reed_solomon* rs,
  708. unsigned char** data_blocks,
  709. unsigned char** fec_blocks,
  710. int block_size) {
  711. assert(NULL != rs && NULL != rs->parity);
  712. return code_some_shards(rs->parity, data_blocks, fec_blocks
  713. , rs->data_shards, rs->parity_shards, block_size);
  714. }
  715. /**
  716. * decode one shard
  717. * input:
  718. * rs
  719. * original data_blocks[rs->data_shards][block_size]
  720. * dec_fec_blocks[nr_fec_blocks][block_size]
  721. * fec_block_nos: fec pos number in original fec_blocks
  722. * erased_blocks: erased blocks in original data_blocks
  723. * nr_fec_blocks: the number of erased blocks
  724. * */
  725. int reed_solomon_decode(reed_solomon* rs,
  726. unsigned char **data_blocks,
  727. int block_size,
  728. unsigned char **dec_fec_blocks,
  729. unsigned int *fec_block_nos,
  730. unsigned int *erased_blocks,
  731. int nr_fec_blocks) {
  732. /* use stack instead of malloc, define a small number of DATA_SHARDS_MAX to save memory */
  733. gf dataDecodeMatrix[DATA_SHARDS_MAX*DATA_SHARDS_MAX];
  734. unsigned char* subShards[DATA_SHARDS_MAX];
  735. unsigned char* outputs[DATA_SHARDS_MAX];
  736. gf* m = rs->m;
  737. int i, j, c, swap, subMatrixRow, dataShards, nos, nshards;
  738. /* the erased_blocks should always sorted
  739. * if sorted, nr_fec_blocks times to check it
  740. * if not, sort it here
  741. * */
  742. for(i = 0; i < nr_fec_blocks; i++) {
  743. swap = 0;
  744. for(j = i+1; j < nr_fec_blocks; j++) {
  745. if(erased_blocks[i] > erased_blocks[j]) {
  746. /* the prefix is bigger than the following, swap */
  747. c = erased_blocks[i];
  748. erased_blocks[i] = erased_blocks[j];
  749. erased_blocks[j] = c;
  750. swap = 1;
  751. }
  752. }
  753. //printf("swap:%d\n", swap);
  754. if(!swap) {
  755. //already sorted or sorted ok
  756. break;
  757. }
  758. }
  759. j = 0;
  760. subMatrixRow = 0;
  761. nos = 0;
  762. nshards = 0;
  763. dataShards = rs->data_shards;
  764. for(i = 0; i < dataShards; i++) {
  765. if(j < nr_fec_blocks && i == erased_blocks[j]) {
  766. //ignore the invalid block
  767. j++;
  768. } else {
  769. /* this row is ok */
  770. for(c = 0; c < dataShards; c++) {
  771. dataDecodeMatrix[subMatrixRow*dataShards + c] = m[i*dataShards + c];
  772. }
  773. subShards[subMatrixRow] = data_blocks[i];
  774. subMatrixRow++;
  775. }
  776. }
  777. for(i = 0; i < nr_fec_blocks && subMatrixRow < dataShards; i++) {
  778. subShards[subMatrixRow] = dec_fec_blocks[i];
  779. j = dataShards + fec_block_nos[i];
  780. for(c = 0; c < dataShards; c++) {
  781. dataDecodeMatrix[subMatrixRow*dataShards + c] = m[j*dataShards + c]; //use spefic pos of original fec_blocks
  782. }
  783. subMatrixRow++;
  784. }
  785. if(subMatrixRow < dataShards) {
  786. //cannot correct
  787. return -1;
  788. }
  789. invert_mat(dataDecodeMatrix, dataShards);
  790. //printf("invert:\n");
  791. //print_matrix1(dataDecodeMatrix, dataShards, dataShards);
  792. //printf("nShards:\n");
  793. //print_matrix2(subShards, dataShards, block_size);
  794. for(i = 0; i < nr_fec_blocks; i++) {
  795. j = erased_blocks[i];
  796. outputs[i] = data_blocks[j];
  797. //data_blocks[j][0] = 0;
  798. memmove(dataDecodeMatrix+i*dataShards, dataDecodeMatrix+j*dataShards, dataShards);
  799. }
  800. //printf("subMatrixRow:\n");
  801. //print_matrix1(dataDecodeMatrix, nr_fec_blocks, dataShards);
  802. //printf("outputs:\n");
  803. //print_matrix2(outputs, nr_fec_blocks, block_size);
  804. return code_some_shards(dataDecodeMatrix, subShards, outputs,
  805. dataShards, nr_fec_blocks, block_size);
  806. }
  807. /**
  808. * encode a big size of buffer
  809. * input:
  810. * rs
  811. * nr_shards: assert(0 == nr_shards % rs->shards)
  812. * shards[nr_shards][block_size]
  813. * */
  814. int reed_solomon_encode2(reed_solomon* rs, unsigned char** shards, int nr_shards, int block_size) {
  815. unsigned char** data_blocks;
  816. unsigned char** fec_blocks;
  817. int i, ds = rs->data_shards, ps = rs->parity_shards, ss = rs->shards;
  818. i = nr_shards / ss;
  819. data_blocks = shards;
  820. fec_blocks = &shards[(i*ds)];
  821. for(i = 0; i < nr_shards; i += ss) {
  822. reed_solomon_encode(rs, data_blocks, fec_blocks, block_size);
  823. data_blocks += ds;
  824. fec_blocks += ps;
  825. }
  826. return 0;
  827. }
  828. /**
  829. * reconstruct a big size of buffer
  830. * input:
  831. * rs
  832. * nr_shards: assert(0 == nr_shards % rs->data_shards)
  833. * shards[nr_shards][block_size]
  834. * marks[nr_shards] marks as errors
  835. * */
  836. int reed_solomon_reconstruct(reed_solomon* rs,
  837. unsigned char** shards,
  838. unsigned char* marks,
  839. int nr_shards,
  840. int block_size) {
  841. unsigned char *dec_fec_blocks[DATA_SHARDS_MAX];
  842. unsigned int fec_block_nos[DATA_SHARDS_MAX];
  843. unsigned int erased_blocks[DATA_SHARDS_MAX];
  844. unsigned char* fec_marks;
  845. unsigned char **data_blocks, **fec_blocks;
  846. int i, j, dn, pn, n;
  847. int ds = rs->data_shards;
  848. int ps = rs->parity_shards;
  849. int err = 0;
  850. data_blocks = shards;
  851. n = nr_shards / rs->shards;
  852. fec_marks = marks + n*ds; //after all data, is't fec marks
  853. fec_blocks = shards + n*ds;
  854. for(j = 0; j < n; j++) {
  855. dn = 0;
  856. for(i = 0; i < ds; i++) {
  857. if(marks[i]) {
  858. //errors
  859. erased_blocks[dn++] = i;
  860. }
  861. }
  862. if(dn > 0) {
  863. pn = 0;
  864. for(i = 0; i < ps && pn < dn; i++) {
  865. if(!fec_marks[i]) {
  866. //got valid fec row
  867. fec_block_nos[pn] = i;
  868. dec_fec_blocks[pn] = fec_blocks[i];
  869. pn++;
  870. }
  871. }
  872. if(dn == pn) {
  873. reed_solomon_decode(rs
  874. , data_blocks
  875. , block_size
  876. , dec_fec_blocks
  877. , fec_block_nos
  878. , erased_blocks
  879. , dn);
  880. } else {
  881. //error but we continue
  882. err = -1;
  883. }
  884. }
  885. data_blocks += ds;
  886. marks += ds;
  887. fec_blocks += ps;
  888. fec_marks += ps;
  889. }
  890. return err;
  891. }