rs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /*#define PROFILE*/
  2. /*
  3. * fec.c -- forward error correction based on Vandermonde matrices
  4. * 980624
  5. * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it)
  6. * (C) 2001 Alain Knaff (alain@knaff.lu)
  7. *
  8. * Portions derived from code by Phil Karn (karn@ka9q.ampr.org),
  9. * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari
  10. * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND
  24. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  25. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  26. * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
  27. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
  28. * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  29. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
  30. * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  31. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  32. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  33. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
  34. * OF SUCH DAMAGE.
  35. *
  36. * Reimplement by Jannson (20161018): compatible for golang version of https://github.com/klauspost/reedsolomon
  37. */
  38. /*
  39. * The following parameter defines how many bits are used for
  40. * field elements. The code supports any value from 2 to 16
  41. * but fastest operation is achieved with 8 bit elements
  42. * This is the only parameter you may want to change.
  43. */
  44. #define GF_BITS 8 /* code over GF(2**GF_BITS) - change to suit */
  45. #include <stdio.h>
  46. #include <stdlib.h>
  47. #include <string.h>
  48. #include <assert.h>
  49. #include "rs.h"
  50. /*
  51. * stuff used for testing purposes only
  52. */
  53. #ifdef TEST
  54. #define DEB(x)
  55. #define DDB(x) x
  56. #define DEBUG 0 /* minimal debugging */
  57. #include <sys/time.h>
  58. #define DIFF_T(a,b) \
  59. (1+ 1000000*(a.tv_sec - b.tv_sec) + (a.tv_usec - b.tv_usec) )
  60. #define TICK(t) \
  61. {struct timeval x ; \
  62. gettimeofday(&x, NULL) ; \
  63. t = x.tv_usec + 1000000* (x.tv_sec & 0xff ) ; \
  64. }
  65. #define TOCK(t) \
  66. { u_long t1 ; TICK(t1) ; \
  67. if (t1 < t) t = 256000000 + t1 - t ; \
  68. else t = t1 - t ; \
  69. if (t == 0) t = 1 ;}
  70. u_long ticks[10]; /* vars for timekeeping */
  71. #else
  72. #define DEB(x)
  73. #define DDB(x)
  74. #define TICK(x)
  75. #define TOCK(x)
  76. #endif /* TEST */
  77. /*
  78. * You should not need to change anything beyond this point.
  79. * The first part of the file implements linear algebra in GF.
  80. *
  81. * gf is the type used to store an element of the Galois Field.
  82. * Must constain at least GF_BITS bits.
  83. *
  84. * Note: unsigned char will work up to GF(256) but int seems to run
  85. * faster on the Pentium. We use int whenever have to deal with an
  86. * index, since they are generally faster.
  87. */
  88. /*
  89. * AK: Udpcast only uses GF_BITS=8. Remove other possibilities
  90. */
  91. #if (GF_BITS != 8)
  92. #error "GF_BITS must be 8"
  93. #endif
  94. typedef unsigned char gf;
  95. #define GF_SIZE ((1 << GF_BITS) - 1) /* powers of \alpha */
  96. /*
  97. * Primitive polynomials - see Lin & Costello, Appendix A,
  98. * and Lee & Messerschmitt, p. 453.
  99. */
  100. static char *allPp[] = { /* GF_BITS polynomial */
  101. NULL, /* 0 no code */
  102. NULL, /* 1 no code */
  103. "111", /* 2 1+x+x^2 */
  104. "1101", /* 3 1+x+x^3 */
  105. "11001", /* 4 1+x+x^4 */
  106. "101001", /* 5 1+x^2+x^5 */
  107. "1100001", /* 6 1+x+x^6 */
  108. "10010001", /* 7 1 + x^3 + x^7 */
  109. "101110001", /* 8 1+x^2+x^3+x^4+x^8 */
  110. "1000100001", /* 9 1+x^4+x^9 */
  111. "10010000001", /* 10 1+x^3+x^10 */
  112. "101000000001", /* 11 1+x^2+x^11 */
  113. "1100101000001", /* 12 1+x+x^4+x^6+x^12 */
  114. "11011000000001", /* 13 1+x+x^3+x^4+x^13 */
  115. "110000100010001", /* 14 1+x+x^6+x^10+x^14 */
  116. "1100000000000001", /* 15 1+x+x^15 */
  117. "11010000000010001" /* 16 1+x+x^3+x^12+x^16 */
  118. };
  119. /*
  120. * To speed up computations, we have tables for logarithm, exponent
  121. * and inverse of a number. If GF_BITS <= 8, we use a table for
  122. * multiplication as well (it takes 64K, no big deal even on a PDA,
  123. * especially because it can be pre-initialized an put into a ROM!),
  124. * otherwhise we use a table of logarithms.
  125. * In any case the macro gf_mul(x,y) takes care of multiplications.
  126. */
  127. static gf gf_exp[2*GF_SIZE]; /* index->poly form conversion table */
  128. static int gf_log[GF_SIZE + 1]; /* Poly->index form conversion table */
  129. static gf inverse[GF_SIZE+1]; /* inverse of field elem. */
  130. /* inv[\alpha**i]=\alpha**(GF_SIZE-i-1) */
  131. /*
  132. * modnn(x) computes x % GF_SIZE, where GF_SIZE is 2**GF_BITS - 1,
  133. * without a slow divide.
  134. */
  135. static inline gf
  136. modnn(int x)
  137. {
  138. while (x >= GF_SIZE) {
  139. x -= GF_SIZE;
  140. x = (x >> GF_BITS) + (x & GF_SIZE);
  141. }
  142. return x;
  143. }
  144. #define SWAP(a,b,t) {t tmp; tmp=a; a=b; b=tmp;}
  145. /*
  146. * gf_mul(x,y) multiplies two numbers. If GF_BITS<=8, it is much
  147. * faster to use a multiplication table.
  148. *
  149. * USE_GF_MULC, GF_MULC0(c) and GF_ADDMULC(x) can be used when multiplying
  150. * many numbers by the same constant. In this case the first
  151. * call sets the constant, and others perform the multiplications.
  152. * A value related to the multiplication is held in a local variable
  153. * declared with USE_GF_MULC . See usage in addmul1().
  154. */
  155. static gf gf_mul_table[(GF_SIZE + 1)*(GF_SIZE + 1)]
  156. #ifdef WINDOWS
  157. __attribute__((aligned (16)))
  158. #else
  159. __attribute__((aligned (256)))
  160. #endif
  161. ;
  162. #define gf_mul(x,y) gf_mul_table[(x<<8)+y]
  163. #define USE_GF_MULC register gf * __gf_mulc_
  164. #define GF_MULC0(c) __gf_mulc_ = &gf_mul_table[(c)<<8]
  165. #define GF_ADDMULC(dst, x) dst ^= __gf_mulc_[x]
  166. #define GF_MULC(dst, x) dst = __gf_mulc_[x]
  167. static void
  168. init_mul_table(void)
  169. {
  170. int i, j;
  171. for (i=0; i< GF_SIZE+1; i++)
  172. for (j=0; j< GF_SIZE+1; j++)
  173. gf_mul_table[(i<<8)+j] = gf_exp[modnn(gf_log[i] + gf_log[j]) ] ;
  174. for (j=0; j< GF_SIZE+1; j++)
  175. gf_mul_table[j] = gf_mul_table[j<<8] = 0;
  176. }
  177. /*
  178. * Generate GF(2**m) from the irreducible polynomial p(X) in p[0]..p[m]
  179. * Lookup tables:
  180. * index->polynomial form gf_exp[] contains j= \alpha^i;
  181. * polynomial form -> index form gf_log[ j = \alpha^i ] = i
  182. * \alpha=x is the primitive element of GF(2^m)
  183. *
  184. * For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple
  185. * multiplication of two numbers can be resolved without calling modnn
  186. */
  187. /*
  188. * initialize the data structures used for computations in GF.
  189. */
  190. static void
  191. generate_gf(void)
  192. {
  193. int i;
  194. gf mask;
  195. char *Pp = allPp[GF_BITS] ;
  196. mask = 1; /* x ** 0 = 1 */
  197. gf_exp[GF_BITS] = 0; /* will be updated at the end of the 1st loop */
  198. /*
  199. * first, generate the (polynomial representation of) powers of \alpha,
  200. * which are stored in gf_exp[i] = \alpha ** i .
  201. * At the same time build gf_log[gf_exp[i]] = i .
  202. * The first GF_BITS powers are simply bits shifted to the left.
  203. */
  204. for (i = 0; i < GF_BITS; i++, mask <<= 1 ) {
  205. gf_exp[i] = mask;
  206. gf_log[gf_exp[i]] = i;
  207. /*
  208. * If Pp[i] == 1 then \alpha ** i occurs in poly-repr
  209. * gf_exp[GF_BITS] = \alpha ** GF_BITS
  210. */
  211. if ( Pp[i] == '1' )
  212. gf_exp[GF_BITS] ^= mask;
  213. }
  214. /*
  215. * now gf_exp[GF_BITS] = \alpha ** GF_BITS is complete, so can als
  216. * compute its inverse.
  217. */
  218. gf_log[gf_exp[GF_BITS]] = GF_BITS;
  219. /*
  220. * Poly-repr of \alpha ** (i+1) is given by poly-repr of
  221. * \alpha ** i shifted left one-bit and accounting for any
  222. * \alpha ** GF_BITS term that may occur when poly-repr of
  223. * \alpha ** i is shifted.
  224. */
  225. mask = 1 << (GF_BITS - 1 ) ;
  226. for (i = GF_BITS + 1; i < GF_SIZE; i++) {
  227. if (gf_exp[i - 1] >= mask)
  228. gf_exp[i] = gf_exp[GF_BITS] ^ ((gf_exp[i - 1] ^ mask) << 1);
  229. else
  230. gf_exp[i] = gf_exp[i - 1] << 1;
  231. gf_log[gf_exp[i]] = i;
  232. }
  233. /*
  234. * log(0) is not defined, so use a special value
  235. */
  236. gf_log[0] = GF_SIZE ;
  237. /* set the extended gf_exp values for fast multiply */
  238. for (i = 0 ; i < GF_SIZE ; i++)
  239. gf_exp[i + GF_SIZE] = gf_exp[i] ;
  240. /*
  241. * again special cases. 0 has no inverse. This used to
  242. * be initialized to GF_SIZE, but it should make no difference
  243. * since noone is supposed to read from here.
  244. */
  245. inverse[0] = 0 ;
  246. inverse[1] = 1;
  247. for (i=2; i<=GF_SIZE; i++)
  248. inverse[i] = gf_exp[GF_SIZE-gf_log[i]];
  249. }
  250. /*
  251. * Various linear algebra operations that i use often.
  252. */
  253. /*
  254. * addmul() computes dst[] = dst[] + c * src[]
  255. * This is used often, so better optimize it! Currently the loop is
  256. * unrolled 16 times, a good value for 486 and pentium-class machines.
  257. * The case c=0 is also optimized, whereas c=1 is not. These
  258. * calls are unfrequent in my typical apps so I did not bother.
  259. *
  260. * Note that gcc on
  261. */
  262. #if 0
  263. #define addmul(dst, src, c, sz) \
  264. if (c != 0) addmul1(dst, src, c, sz)
  265. #endif
  266. #define UNROLL 16 /* 1, 4, 8, 16 */
  267. static void
  268. slow_addmul1(gf *dst1, gf *src1, gf c, int sz)
  269. {
  270. USE_GF_MULC ;
  271. register gf *dst = dst1, *src = src1 ;
  272. gf *lim = &dst[sz - UNROLL + 1] ;
  273. GF_MULC0(c) ;
  274. #if (UNROLL > 1) /* unrolling by 8/16 is quite effective on the pentium */
  275. for (; dst < lim ; dst += UNROLL, src += UNROLL ) {
  276. GF_ADDMULC( dst[0] , src[0] );
  277. GF_ADDMULC( dst[1] , src[1] );
  278. GF_ADDMULC( dst[2] , src[2] );
  279. GF_ADDMULC( dst[3] , src[3] );
  280. #if (UNROLL > 4)
  281. GF_ADDMULC( dst[4] , src[4] );
  282. GF_ADDMULC( dst[5] , src[5] );
  283. GF_ADDMULC( dst[6] , src[6] );
  284. GF_ADDMULC( dst[7] , src[7] );
  285. #endif
  286. #if (UNROLL > 8)
  287. GF_ADDMULC( dst[8] , src[8] );
  288. GF_ADDMULC( dst[9] , src[9] );
  289. GF_ADDMULC( dst[10] , src[10] );
  290. GF_ADDMULC( dst[11] , src[11] );
  291. GF_ADDMULC( dst[12] , src[12] );
  292. GF_ADDMULC( dst[13] , src[13] );
  293. GF_ADDMULC( dst[14] , src[14] );
  294. GF_ADDMULC( dst[15] , src[15] );
  295. #endif
  296. }
  297. #endif
  298. lim += UNROLL - 1 ;
  299. for (; dst < lim; dst++, src++ ) /* final components */
  300. GF_ADDMULC( *dst , *src );
  301. }
  302. # define addmul1 slow_addmul1
  303. static void addmul(gf *dst, gf *src, gf c, int sz) {
  304. // fprintf(stderr, "Dst=%p Src=%p, gf=%02x sz=%d\n", dst, src, c, sz);
  305. if (c != 0) addmul1(dst, src, c, sz);
  306. }
  307. /*
  308. * mul() computes dst[] = c * src[]
  309. * This is used often, so better optimize it! Currently the loop is
  310. * unrolled 16 times, a good value for 486 and pentium-class machines.
  311. * The case c=0 is also optimized, whereas c=1 is not. These
  312. * calls are unfrequent in my typical apps so I did not bother.
  313. *
  314. * Note that gcc on
  315. */
  316. #if 0
  317. #define mul(dst, src, c, sz) \
  318. do { if (c != 0) mul1(dst, src, c, sz); else memset(dst, 0, c); } while(0)
  319. #endif
  320. #define UNROLL 16 /* 1, 4, 8, 16 */
  321. static void
  322. slow_mul1(gf *dst1, gf *src1, gf c, int sz)
  323. {
  324. USE_GF_MULC ;
  325. register gf *dst = dst1, *src = src1 ;
  326. gf *lim = &dst[sz - UNROLL + 1] ;
  327. GF_MULC0(c) ;
  328. #if (UNROLL > 1) /* unrolling by 8/16 is quite effective on the pentium */
  329. for (; dst < lim ; dst += UNROLL, src += UNROLL ) {
  330. GF_MULC( dst[0] , src[0] );
  331. GF_MULC( dst[1] , src[1] );
  332. GF_MULC( dst[2] , src[2] );
  333. GF_MULC( dst[3] , src[3] );
  334. #if (UNROLL > 4)
  335. GF_MULC( dst[4] , src[4] );
  336. GF_MULC( dst[5] , src[5] );
  337. GF_MULC( dst[6] , src[6] );
  338. GF_MULC( dst[7] , src[7] );
  339. #endif
  340. #if (UNROLL > 8)
  341. GF_MULC( dst[8] , src[8] );
  342. GF_MULC( dst[9] , src[9] );
  343. GF_MULC( dst[10] , src[10] );
  344. GF_MULC( dst[11] , src[11] );
  345. GF_MULC( dst[12] , src[12] );
  346. GF_MULC( dst[13] , src[13] );
  347. GF_MULC( dst[14] , src[14] );
  348. GF_MULC( dst[15] , src[15] );
  349. #endif
  350. }
  351. #endif
  352. lim += UNROLL - 1 ;
  353. for (; dst < lim; dst++, src++ ) /* final components */
  354. GF_MULC( *dst , *src );
  355. }
  356. # define mul1 slow_mul1
  357. static inline void mul(gf *dst, gf *src, gf c, int sz) {
  358. /*fprintf(stderr, "%p = %02x * %p\n", dst, c, src);*/
  359. if (c != 0) mul1(dst, src, c, sz); else memset(dst, 0, c);
  360. }
  361. /*
  362. * invert_mat() takes a matrix and produces its inverse
  363. * k is the size of the matrix.
  364. * (Gauss-Jordan, adapted from Numerical Recipes in C)
  365. * Return non-zero if singular.
  366. */
  367. DEB( int pivloops=0; int pivswaps=0 ; /* diagnostic */)
  368. static int
  369. invert_mat(gf *src, int k)
  370. {
  371. gf c, *p ;
  372. int irow, icol, row, col, i, ix ;
  373. int error = 1 ;
  374. int indxc[k];
  375. int indxr[k];
  376. int ipiv[k];
  377. gf id_row[k];
  378. memset(id_row, 0, k*sizeof(gf));
  379. DEB( pivloops=0; pivswaps=0 ; /* diagnostic */ )
  380. /*
  381. * ipiv marks elements already used as pivots.
  382. */
  383. for (i = 0; i < k ; i++)
  384. ipiv[i] = 0 ;
  385. for (col = 0; col < k ; col++) {
  386. gf *pivot_row ;
  387. /*
  388. * Zeroing column 'col', look for a non-zero element.
  389. * First try on the diagonal, if it fails, look elsewhere.
  390. */
  391. irow = icol = -1 ;
  392. if (ipiv[col] != 1 && src[col*k + col] != 0) {
  393. irow = col ;
  394. icol = col ;
  395. goto found_piv ;
  396. }
  397. for (row = 0 ; row < k ; row++) {
  398. if (ipiv[row] != 1) {
  399. for (ix = 0 ; ix < k ; ix++) {
  400. DEB( pivloops++ ; )
  401. if (ipiv[ix] == 0) {
  402. if (src[row*k + ix] != 0) {
  403. irow = row ;
  404. icol = ix ;
  405. goto found_piv ;
  406. }
  407. } else if (ipiv[ix] > 1) {
  408. fprintf(stderr, "singular matrix\n");
  409. goto fail ;
  410. }
  411. }
  412. }
  413. }
  414. if (icol == -1) {
  415. fprintf(stderr, "XXX pivot not found!\n");
  416. goto fail ;
  417. }
  418. found_piv:
  419. ++(ipiv[icol]) ;
  420. /*
  421. * swap rows irow and icol, so afterwards the diagonal
  422. * element will be correct. Rarely done, not worth
  423. * optimizing.
  424. */
  425. if (irow != icol) {
  426. for (ix = 0 ; ix < k ; ix++ ) {
  427. SWAP( src[irow*k + ix], src[icol*k + ix], gf) ;
  428. }
  429. }
  430. indxr[col] = irow ;
  431. indxc[col] = icol ;
  432. pivot_row = &src[icol*k] ;
  433. c = pivot_row[icol] ;
  434. if (c == 0) {
  435. fprintf(stderr, "singular matrix 2\n");
  436. goto fail ;
  437. }
  438. if (c != 1 ) { /* otherwhise this is a NOP */
  439. /*
  440. * this is done often , but optimizing is not so
  441. * fruitful, at least in the obvious ways (unrolling)
  442. */
  443. DEB( pivswaps++ ; )
  444. c = inverse[ c ] ;
  445. pivot_row[icol] = 1 ;
  446. for (ix = 0 ; ix < k ; ix++ )
  447. pivot_row[ix] = gf_mul(c, pivot_row[ix] );
  448. }
  449. /*
  450. * from all rows, remove multiples of the selected row
  451. * to zero the relevant entry (in fact, the entry is not zero
  452. * because we know it must be zero).
  453. * (Here, if we know that the pivot_row is the identity,
  454. * we can optimize the addmul).
  455. */
  456. id_row[icol] = 1;
  457. if (memcmp(pivot_row, id_row, k*sizeof(gf)) != 0) {
  458. for (p = src, ix = 0 ; ix < k ; ix++, p += k ) {
  459. if (ix != icol) {
  460. c = p[icol] ;
  461. p[icol] = 0 ;
  462. addmul(p, pivot_row, c, k );
  463. }
  464. }
  465. }
  466. id_row[icol] = 0;
  467. } /* done all columns */
  468. for (col = k-1 ; col >= 0 ; col-- ) {
  469. if (indxr[col] <0 || indxr[col] >= k)
  470. fprintf(stderr, "AARGH, indxr[col] %d\n", indxr[col]);
  471. else if (indxc[col] <0 || indxc[col] >= k)
  472. fprintf(stderr, "AARGH, indxc[col] %d\n", indxc[col]);
  473. else
  474. if (indxr[col] != indxc[col] ) {
  475. for (row = 0 ; row < k ; row++ ) {
  476. SWAP( src[row*k + indxr[col]], src[row*k + indxc[col]], gf) ;
  477. }
  478. }
  479. }
  480. error = 0 ;
  481. fail:
  482. return error ;
  483. }
  484. static int fec_initialized = 0 ;
  485. void fec_init(void)
  486. {
  487. TICK(ticks[0]);
  488. generate_gf();
  489. TOCK(ticks[0]);
  490. DDB(fprintf(stderr, "generate_gf took %ldus\n", ticks[0]);)
  491. TICK(ticks[0]);
  492. init_mul_table();
  493. TOCK(ticks[0]);
  494. DDB(fprintf(stderr, "init_mul_table took %ldus\n", ticks[0]);)
  495. fec_initialized = 1 ;
  496. }
  497. #ifdef PROFILE
  498. #ifdef __x86_64__
  499. static long long rdtsc(void)
  500. {
  501. unsigned long low, hi;
  502. asm volatile ("rdtsc" : "=d" (hi), "=a" (low));
  503. return ( (((long long)hi) << 32) | ((long long) low));
  504. }
  505. #elif __arm__
  506. static long long rdtsc(void)
  507. {
  508. u64 val;
  509. asm volatile("mrs %0, cntvct_el0" : "=r" (val));
  510. return val;
  511. }
  512. #endif
  513. void print_matrix1(gf* matrix, int nrows, int ncols) {
  514. int i, j;
  515. printf("matrix (%d,%d):\n", nrows, ncols);
  516. for(i = 0; i < nrows; i++) {
  517. for(j = 0; j < ncols; j++) {
  518. printf("%6d ", matrix[i*ncols + j]);
  519. }
  520. printf("\n");
  521. }
  522. }
  523. void print_matrix2(gf** matrix, int nrows, int ncols) {
  524. int i, j;
  525. printf("matrix (%d,%d):\n", nrows, ncols);
  526. for(i = 0; i < nrows; i++) {
  527. for(j = 0; j < ncols; j++) {
  528. printf("%6d ", matrix[i][j]);
  529. }
  530. printf("\n");
  531. }
  532. }
  533. #endif
  534. /* y = a**n */
  535. static gf galExp(gf a, gf n) {
  536. int logA;
  537. int logResult;
  538. if(0 == n) {
  539. return 1;
  540. }
  541. if(0 == a) {
  542. return 0;
  543. }
  544. logA = gf_log[a];
  545. logResult = logA * n;
  546. while(logResult >= 255) {
  547. logResult -= 255;
  548. }
  549. return gf_exp[logResult];
  550. }
  551. static inline gf galMultiply(gf a, gf b) {
  552. return gf_mul_table[ ((int)a << 8) + (int)b ];
  553. }
  554. static gf* vandermonde(int nrows, int ncols) {
  555. int row, col, ptr;
  556. gf* matrix = (gf*)RS_MALLOC(nrows * ncols);
  557. if(NULL != matrix) {
  558. ptr = 0;
  559. for(row = 0; row < nrows; row++) {
  560. for(col = 0; col < ncols; col++) {
  561. matrix[ptr++] = galExp((gf)row, (gf)col);
  562. }
  563. }
  564. }
  565. return matrix;
  566. }
  567. /*
  568. * Not check for input params
  569. * */
  570. static gf* sub_matrix(gf* matrix, int rmin, int cmin, int rmax, int cmax, int nrows, int ncols) {
  571. int i, j, ptr = 0;
  572. gf* new_m = (gf*)RS_MALLOC( (rmax-rmin) * (cmax-cmin) );
  573. if(NULL != new_m) {
  574. for(i = rmin; i < rmax; i++) {
  575. for(j = cmin; j < cmax; j++) {
  576. new_m[ptr++] = matrix[i*ncols + j];
  577. }
  578. }
  579. }
  580. return new_m;
  581. }
  582. /* y = a.dot(b) */
  583. static gf* multiply1(gf *a, int ar, int ac, gf *b, int br, int bc) {
  584. gf *new_m, tg;
  585. int r, c, i, ptr = 0;
  586. assert(ac == br);
  587. new_m = (gf*)RS_CALLOC(1, ar*bc);
  588. if(NULL != new_m) {
  589. /* this multiply is slow */
  590. for(r = 0; r < ar; r++) {
  591. for(c = 0; c < bc; c++) {
  592. tg = 0;
  593. for(i = 0; i < ac; i++) {
  594. /* tg ^= gf_mul_table[ ((int)a[r*ac+i] << 8) + (int)b[i*bc+c] ]; */
  595. tg ^= galMultiply(a[r*ac+i], b[i*bc+c]);
  596. }
  597. new_m[ptr++] = tg;
  598. }
  599. }
  600. }
  601. return new_m;
  602. }
  603. /* copy from golang rs version */
  604. static inline int code_some_shards(gf* matrixRows, gf** inputs, gf** outputs,
  605. int dataShards, int outputCount, int byteCount) {
  606. gf* in;
  607. int iRow, c;
  608. for(c = 0; c < dataShards; c++) {
  609. in = inputs[c];
  610. for(iRow = 0; iRow < outputCount; iRow++) {
  611. if(0 == c) {
  612. mul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount);
  613. } else {
  614. addmul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount);
  615. }
  616. }
  617. }
  618. return 0;
  619. }
  620. reed_solomon* reed_solomon_new(int data_shards, int parity_shards) {
  621. gf* vm = NULL;
  622. gf* top = NULL;
  623. int err = 0;
  624. reed_solomon* rs = NULL;
  625. /* MUST use fec_init once time first */
  626. assert(fec_initialized);
  627. do {
  628. rs = (reed_solomon*) RS_MALLOC(sizeof(reed_solomon));
  629. if(NULL == rs) {
  630. return NULL;
  631. }
  632. rs->data_shards = data_shards;
  633. rs->parity_shards = parity_shards;
  634. rs->shards = (data_shards + parity_shards);
  635. rs->m = NULL;
  636. rs->parity = NULL;
  637. if(rs->shards > DATA_SHARDS_MAX || data_shards <= 0 || parity_shards <= 0) {
  638. err = 1;
  639. break;
  640. }
  641. vm = vandermonde(rs->shards, rs->data_shards);
  642. if(NULL == vm) {
  643. err = 2;
  644. break;
  645. }
  646. top = sub_matrix(vm, 0, 0, data_shards, data_shards, rs->shards, data_shards);
  647. if(NULL == top) {
  648. err = 3;
  649. break;
  650. }
  651. err = invert_mat(top, data_shards);
  652. assert(0 == err);
  653. rs->m = multiply1(vm, rs->shards, data_shards, top, data_shards, data_shards);
  654. if(NULL == rs->m) {
  655. err = 4;
  656. break;
  657. }
  658. rs->parity = sub_matrix(rs->m, data_shards, 0, rs->shards, data_shards, rs->shards, data_shards);
  659. if(NULL == rs->parity) {
  660. err = 5;
  661. break;
  662. }
  663. RS_FREE(vm);
  664. RS_FREE(top);
  665. vm = NULL;
  666. top = NULL;
  667. return rs;
  668. } while(0);
  669. fprintf(stderr, "err=%d\n", err);
  670. if(NULL != vm) {
  671. RS_FREE(vm);
  672. }
  673. if(NULL != top) {
  674. RS_FREE(top);
  675. }
  676. if(NULL != rs) {
  677. if(NULL != rs->m) {
  678. RS_FREE(rs->m);
  679. }
  680. if(NULL != rs->parity) {
  681. RS_FREE(rs->parity);
  682. }
  683. RS_FREE(rs);
  684. }
  685. return NULL;
  686. }
  687. void reed_solomon_release(reed_solomon* rs) {
  688. if(NULL != rs) {
  689. if(NULL != rs->m) {
  690. RS_FREE(rs->m);
  691. }
  692. if(NULL != rs->parity) {
  693. RS_FREE(rs->parity);
  694. }
  695. RS_FREE(rs);
  696. }
  697. }
  698. /**
  699. * encode one shard
  700. * input:
  701. * rs
  702. * data_blocks[rs->data_shards][block_size]
  703. * fec_blocks[rs->data_shards][block_size]
  704. * */
  705. int reed_solomon_encode(reed_solomon* rs,
  706. unsigned char** data_blocks,
  707. unsigned char** fec_blocks,
  708. int block_size) {
  709. assert(NULL != rs && NULL != rs->parity);
  710. return code_some_shards(rs->parity, data_blocks, fec_blocks
  711. , rs->data_shards, rs->parity_shards, block_size);
  712. }
  713. /**
  714. * decode one shard
  715. * input:
  716. * rs
  717. * original data_blocks[rs->data_shards][block_size]
  718. * dec_fec_blocks[nr_fec_blocks][block_size]
  719. * fec_block_nos: fec pos number in original fec_blocks
  720. * erased_blocks: erased blocks in original data_blocks
  721. * nr_fec_blocks: the number of erased blocks
  722. * */
  723. int reed_solomon_decode(reed_solomon* rs,
  724. unsigned char **data_blocks,
  725. int block_size,
  726. unsigned char **dec_fec_blocks,
  727. unsigned int *fec_block_nos,
  728. unsigned int *erased_blocks,
  729. int nr_fec_blocks) {
  730. /* use stack instead of malloc, define a small number of DATA_SHARDS_MAX to save memory */
  731. gf dataDecodeMatrix[DATA_SHARDS_MAX*DATA_SHARDS_MAX];
  732. unsigned char* subShards[DATA_SHARDS_MAX];
  733. unsigned char* outputs[DATA_SHARDS_MAX];
  734. gf* m = rs->m;
  735. int i, j, c, swap, subMatrixRow, dataShards, nos, nshards;
  736. /* the erased_blocks should always sorted
  737. * if sorted, nr_fec_blocks times to check it
  738. * if not, sort it here
  739. * */
  740. for(i = 0; i < nr_fec_blocks; i++) {
  741. swap = 0;
  742. for(j = i+1; j < nr_fec_blocks; j++) {
  743. if(erased_blocks[i] > erased_blocks[j]) {
  744. /* the prefix is bigger than the following, swap */
  745. c = erased_blocks[i];
  746. erased_blocks[i] = erased_blocks[j];
  747. erased_blocks[j] = c;
  748. swap = 1;
  749. }
  750. }
  751. //printf("swap:%d\n", swap);
  752. if(!swap) {
  753. //already sorted or sorted ok
  754. break;
  755. }
  756. }
  757. j = 0;
  758. subMatrixRow = 0;
  759. nos = 0;
  760. nshards = 0;
  761. dataShards = rs->data_shards;
  762. for(i = 0; i < dataShards; i++) {
  763. if(j < nr_fec_blocks && i == erased_blocks[j]) {
  764. //ignore the invalid block
  765. j++;
  766. } else {
  767. /* this row is ok */
  768. for(c = 0; c < dataShards; c++) {
  769. dataDecodeMatrix[subMatrixRow*dataShards + c] = m[i*dataShards + c];
  770. }
  771. subShards[subMatrixRow] = data_blocks[i];
  772. subMatrixRow++;
  773. }
  774. }
  775. for(i = 0; i < nr_fec_blocks && subMatrixRow < dataShards; i++) {
  776. subShards[subMatrixRow] = dec_fec_blocks[i];
  777. j = dataShards + fec_block_nos[i];
  778. for(c = 0; c < dataShards; c++) {
  779. dataDecodeMatrix[subMatrixRow*dataShards + c] = m[j*dataShards + c]; //use spefic pos of original fec_blocks
  780. }
  781. subMatrixRow++;
  782. }
  783. if(subMatrixRow < dataShards) {
  784. //cannot correct
  785. return -1;
  786. }
  787. invert_mat(dataDecodeMatrix, dataShards);
  788. //printf("invert:\n");
  789. //print_matrix1(dataDecodeMatrix, dataShards, dataShards);
  790. //printf("nShards:\n");
  791. //print_matrix2(subShards, dataShards, block_size);
  792. for(i = 0; i < nr_fec_blocks; i++) {
  793. j = erased_blocks[i];
  794. outputs[i] = data_blocks[j];
  795. //data_blocks[j][0] = 0;
  796. memmove(dataDecodeMatrix+i*dataShards, dataDecodeMatrix+j*dataShards, dataShards);
  797. }
  798. //printf("subMatrixRow:\n");
  799. //print_matrix1(dataDecodeMatrix, nr_fec_blocks, dataShards);
  800. //printf("outputs:\n");
  801. //print_matrix2(outputs, nr_fec_blocks, block_size);
  802. return code_some_shards(dataDecodeMatrix, subShards, outputs,
  803. dataShards, nr_fec_blocks, block_size);
  804. }
  805. /**
  806. * encode a big size of buffer
  807. * input:
  808. * rs
  809. * nr_shards: assert(0 == nr_shards % rs->shards)
  810. * shards[nr_shards][block_size]
  811. * */
  812. int reed_solomon_encode2(reed_solomon* rs, unsigned char** shards, int nr_shards, int block_size) {
  813. unsigned char** data_blocks;
  814. unsigned char** fec_blocks;
  815. int i, ds = rs->data_shards, ps = rs->parity_shards, ss = rs->shards;
  816. i = nr_shards / ss;
  817. data_blocks = shards;
  818. fec_blocks = &shards[(i*ds)];
  819. for(i = 0; i < nr_shards; i += ss) {
  820. reed_solomon_encode(rs, data_blocks, fec_blocks, block_size);
  821. data_blocks += ds;
  822. fec_blocks += ps;
  823. }
  824. return 0;
  825. }
  826. /**
  827. * reconstruct a big size of buffer
  828. * input:
  829. * rs
  830. * nr_shards: assert(0 == nr_shards % rs->data_shards)
  831. * shards[nr_shards][block_size]
  832. * marks[nr_shards] marks as errors
  833. * */
  834. int reed_solomon_reconstruct(reed_solomon* rs,
  835. unsigned char** shards,
  836. unsigned char* marks,
  837. int nr_shards,
  838. int block_size) {
  839. unsigned char *dec_fec_blocks[DATA_SHARDS_MAX];
  840. unsigned int fec_block_nos[DATA_SHARDS_MAX];
  841. unsigned int erased_blocks[DATA_SHARDS_MAX];
  842. unsigned char* fec_marks;
  843. unsigned char **data_blocks, **fec_blocks;
  844. int i, j, dn, pn, n;
  845. int ds = rs->data_shards;
  846. int ps = rs->parity_shards;
  847. int err = 0;
  848. data_blocks = shards;
  849. n = nr_shards / rs->shards;
  850. fec_marks = marks + n*ds; //after all data, is't fec marks
  851. fec_blocks = shards + n*ds;
  852. for(j = 0; j < n; j++) {
  853. dn = 0;
  854. for(i = 0; i < ds; i++) {
  855. if(marks[i]) {
  856. //errors
  857. erased_blocks[dn++] = i;
  858. }
  859. }
  860. if(dn > 0) {
  861. pn = 0;
  862. for(i = 0; i < ps && pn < dn; i++) {
  863. if(!fec_marks[i]) {
  864. //got valid fec row
  865. fec_block_nos[pn] = i;
  866. dec_fec_blocks[pn] = fec_blocks[i];
  867. pn++;
  868. }
  869. }
  870. if(dn == pn) {
  871. reed_solomon_decode(rs
  872. , data_blocks
  873. , block_size
  874. , dec_fec_blocks
  875. , fec_block_nos
  876. , erased_blocks
  877. , dn);
  878. } else {
  879. //error but we continue
  880. err = -1;
  881. }
  882. }
  883. data_blocks += ds;
  884. marks += ds;
  885. fec_blocks += ps;
  886. fec_marks += ps;
  887. }
  888. return err;
  889. }