|
8 | 8 |
|
9 | 9 | u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len); |
10 | 10 |
|
11 | | -#define GET_P64_0(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 0)) |
12 | | -#define GET_P64_1(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 1)) |
13 | | - |
14 | 11 | /* x^191 mod G, x^127 mod G */ |
15 | 12 | static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL, |
16 | 13 | 0x21e9761e252621acULL }; |
17 | 14 | /* floor(x^127 / G), (G - x^64) / x */ |
18 | 15 | static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL, |
19 | 16 | 0x34d926535897936aULL }; |
20 | 17 |
|
21 | | -u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len) |
| 18 | +static inline uint64x2_t pmull64(uint64x2_t a, uint64x2_t b) |
22 | 19 | { |
23 | | - uint64x2_t v0_u64 = { crc, 0 }; |
24 | | - poly64x2_t v0 = vreinterpretq_p64_u64(v0_u64); |
25 | | - poly64x2_t fold_consts = |
26 | | - vreinterpretq_p64_u64(vld1q_u64(fold_consts_val)); |
27 | | - poly64x2_t v1 = vreinterpretq_p64_u8(vld1q_u8(p)); |
| 20 | + return vreinterpretq_u64_p128(vmull_p64(vgetq_lane_u64(a, 0), |
| 21 | + vgetq_lane_u64(b, 0))); |
| 22 | +} |
28 | 23 |
|
29 | | - v0 = vreinterpretq_p64_u8(veorq_u8(vreinterpretq_u8_p64(v0), |
30 | | - vreinterpretq_u8_p64(v1))); |
31 | | - p += 16; |
32 | | - len -= 16; |
| 24 | +static inline uint64x2_t pmull64_high(uint64x2_t a, uint64x2_t b) |
| 25 | +{ |
| 26 | + poly64x2_t l = vreinterpretq_p64_u64(a); |
| 27 | + poly64x2_t m = vreinterpretq_p64_u64(b); |
33 | 28 |
|
34 | | - do { |
35 | | - v1 = vreinterpretq_p64_u8(vld1q_u8(p)); |
| 29 | + return vreinterpretq_u64_p128(vmull_high_p64(l, m)); |
| 30 | +} |
36 | 31 |
|
37 | | - poly128_t v2 = vmull_high_p64(fold_consts, v0); |
38 | | - poly128_t v0_128 = |
39 | | - vmull_p64(GET_P64_0(fold_consts), GET_P64_0(v0)); |
| 32 | +static inline uint64x2_t pmull64_hi_lo(uint64x2_t a, uint64x2_t b) |
| 33 | +{ |
| 34 | + return vreinterpretq_u64_p128(vmull_p64(vgetq_lane_u64(a, 1), |
| 35 | + vgetq_lane_u64(b, 0))); |
| 36 | +} |
40 | 37 |
|
41 | | - uint8x16_t x0 = veorq_u8(vreinterpretq_u8_p128(v0_128), |
42 | | - vreinterpretq_u8_p128(v2)); |
| 38 | +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len) |
| 39 | +{ |
| 40 | + uint64x2_t fold_consts = vld1q_u64(fold_consts_val); |
| 41 | + uint64x2_t v0 = { crc, 0 }; |
| 42 | + uint64x2_t zero = { }; |
43 | 43 |
|
44 | | - x0 = veorq_u8(x0, vreinterpretq_u8_p64(v1)); |
45 | | - v0 = vreinterpretq_p64_u8(x0); |
| 44 | + for (;;) { |
| 45 | + v0 ^= vreinterpretq_u64_u8(vld1q_u8(p)); |
46 | 46 |
|
47 | 47 | p += 16; |
48 | 48 | len -= 16; |
49 | | - } while (len >= 16); |
50 | | - |
51 | | - /* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */ |
52 | | - poly64x2_t v7 = vreinterpretq_p64_u64((uint64x2_t){ 0, 0 }); |
53 | | - poly128_t v1_128 = vmull_p64(GET_P64_1(fold_consts), GET_P64_0(v0)); |
| 49 | + if (len < 16) |
| 50 | + break; |
54 | 51 |
|
55 | | - uint8x16_t ext_v0 = |
56 | | - vextq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p64(v7), 8); |
57 | | - uint8x16_t x0 = veorq_u8(ext_v0, vreinterpretq_u8_p128(v1_128)); |
| 52 | + v0 = pmull64(fold_consts, v0) ^ pmull64_high(fold_consts, v0); |
| 53 | + } |
58 | 54 |
|
59 | | - v0 = vreinterpretq_p64_u8(x0); |
| 55 | + /* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */ |
| 56 | + v0 = vextq_u64(v0, zero, 1) ^ pmull64_hi_lo(fold_consts, v0); |
60 | 57 |
|
61 | 58 | /* Final Barrett reduction */ |
62 | | - poly64x2_t bconsts = vreinterpretq_p64_u64(vld1q_u64(bconsts_val)); |
63 | | - |
64 | | - v1_128 = vmull_p64(GET_P64_0(bconsts), GET_P64_0(v0)); |
65 | | - |
66 | | - poly64x2_t v1_64 = vreinterpretq_p64_u8(vreinterpretq_u8_p128(v1_128)); |
67 | | - poly128_t v3_128 = vmull_p64(GET_P64_1(bconsts), GET_P64_0(v1_64)); |
68 | | - |
69 | | - x0 = veorq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p128(v3_128)); |
70 | | - |
71 | | - uint8x16_t ext_v2 = vextq_u8(vreinterpretq_u8_p64(v7), |
72 | | - vreinterpretq_u8_p128(v1_128), 8); |
| 59 | + uint64x2_t bconsts = vld1q_u64(bconsts_val); |
| 60 | + uint64x2_t final = pmull64(bconsts, v0); |
73 | 61 |
|
74 | | - x0 = veorq_u8(x0, ext_v2); |
| 62 | + v0 ^= vextq_u64(zero, final, 1) ^ pmull64_hi_lo(bconsts, final); |
75 | 63 |
|
76 | | - v0 = vreinterpretq_p64_u8(x0); |
77 | | - return vgetq_lane_u64(vreinterpretq_u64_p64(v0), 1); |
| 64 | + return vgetq_lane_u64(v0, 1); |
78 | 65 | } |
0 commit comments