|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | +/* |
| 3 | + * Accelerated CRC64 (NVMe) using ARM NEON C intrinsics |
| 4 | + */ |
| 5 | + |
| 6 | +#include <linux/types.h> |
| 7 | +#include <asm/neon-intrinsics.h> |
| 8 | + |
| 9 | +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len); |
| 10 | + |
| 11 | +#define GET_P64_0(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 0)) |
| 12 | +#define GET_P64_1(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 1)) |
| 13 | + |
| 14 | +/* x^191 mod G, x^127 mod G */ |
| 15 | +static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL, |
| 16 | + 0x21e9761e252621acULL }; |
| 17 | +/* floor(x^127 / G), (G - x^64) / x */ |
| 18 | +static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL, |
| 19 | + 0x34d926535897936aULL }; |
| 20 | + |
| 21 | +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len) |
| 22 | +{ |
| 23 | + uint64x2_t v0_u64 = { crc, 0 }; |
| 24 | + poly64x2_t v0 = vreinterpretq_p64_u64(v0_u64); |
| 25 | + poly64x2_t fold_consts = |
| 26 | + vreinterpretq_p64_u64(vld1q_u64(fold_consts_val)); |
| 27 | + poly64x2_t v1 = vreinterpretq_p64_u8(vld1q_u8(p)); |
| 28 | + |
| 29 | + v0 = vreinterpretq_p64_u8(veorq_u8(vreinterpretq_u8_p64(v0), |
| 30 | + vreinterpretq_u8_p64(v1))); |
| 31 | + p += 16; |
| 32 | + len -= 16; |
| 33 | + |
| 34 | + do { |
| 35 | + v1 = vreinterpretq_p64_u8(vld1q_u8(p)); |
| 36 | + |
| 37 | + poly128_t v2 = vmull_high_p64(fold_consts, v0); |
| 38 | + poly128_t v0_128 = |
| 39 | + vmull_p64(GET_P64_0(fold_consts), GET_P64_0(v0)); |
| 40 | + |
| 41 | + uint8x16_t x0 = veorq_u8(vreinterpretq_u8_p128(v0_128), |
| 42 | + vreinterpretq_u8_p128(v2)); |
| 43 | + |
| 44 | + x0 = veorq_u8(x0, vreinterpretq_u8_p64(v1)); |
| 45 | + v0 = vreinterpretq_p64_u8(x0); |
| 46 | + |
| 47 | + p += 16; |
| 48 | + len -= 16; |
| 49 | + } while (len >= 16); |
| 50 | + |
| 51 | + /* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */ |
| 52 | + poly64x2_t v7 = vreinterpretq_p64_u64((uint64x2_t){ 0, 0 }); |
| 53 | + poly128_t v1_128 = vmull_p64(GET_P64_1(fold_consts), GET_P64_0(v0)); |
| 54 | + |
| 55 | + uint8x16_t ext_v0 = |
| 56 | + vextq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p64(v7), 8); |
| 57 | + uint8x16_t x0 = veorq_u8(ext_v0, vreinterpretq_u8_p128(v1_128)); |
| 58 | + |
| 59 | + v0 = vreinterpretq_p64_u8(x0); |
| 60 | + |
| 61 | + /* Final Barrett reduction */ |
| 62 | + poly64x2_t bconsts = vreinterpretq_p64_u64(vld1q_u64(bconsts_val)); |
| 63 | + |
| 64 | + v1_128 = vmull_p64(GET_P64_0(bconsts), GET_P64_0(v0)); |
| 65 | + |
| 66 | + poly64x2_t v1_64 = vreinterpretq_p64_u8(vreinterpretq_u8_p128(v1_128)); |
| 67 | + poly128_t v3_128 = vmull_p64(GET_P64_1(bconsts), GET_P64_0(v1_64)); |
| 68 | + |
| 69 | + x0 = veorq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p128(v3_128)); |
| 70 | + |
| 71 | + uint8x16_t ext_v2 = vextq_u8(vreinterpretq_u8_p64(v7), |
| 72 | + vreinterpretq_u8_p128(v1_128), 8); |
| 73 | + |
| 74 | + x0 = veorq_u8(x0, ext_v2); |
| 75 | + |
| 76 | + v0 = vreinterpretq_p64_u8(x0); |
| 77 | + return vgetq_lane_u64(vreinterpretq_u64_p64(v0), 1); |
| 78 | +} |
0 commit comments