Skip to content

Commit 63432fd

Browse files
DemyanShulhanEric Biggers
authored andcommitted
lib/crc: arm64: add NEON accelerated CRC64-NVMe implementation
Implement an optimized CRC64 (NVMe) algorithm for ARM64 using NEON Polynomial Multiply Long (PMULL) instructions. The generic shift-and-XOR software implementation is slow, which creates a bottleneck in NVMe and other storage subsystems. The acceleration is implemented using C intrinsics (<arm_neon.h>) rather than raw assembly for better readability and maintainability. Key highlights of this implementation: - Uses 4KB chunking inside scoped_ksimd() to avoid preemption latency spikes on large buffers. - Pre-calculates and loads fold constants via vld1q_u64() to minimize register spilling. - Benchmarks show the break-even point against the generic implementation is around 128 bytes. The PMULL path is enabled only for len >= 128. Performance results (kunit crc_benchmark on Cortex-A72): - Generic (len=4096): ~268 MB/s - PMULL (len=4096): ~1556 MB/s (nearly 6x improvement) Signed-off-by: Demian Shulhan <demyansh@gmail.com> Link: https://lore.kernel.org/r/20260329074338.1053550-1-demyansh@gmail.com Signed-off-by: Eric Biggers <ebiggers@kernel.org>
1 parent 6e4d63e commit 63432fd

4 files changed

Lines changed: 116 additions & 1 deletion

File tree

lib/crc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ config CRC64
8282
config CRC64_ARCH
8383
bool
8484
depends on CRC64 && CRC_OPTIMIZATIONS
85+
default y if ARM64
8586
default y if RISCV && RISCV_ISA_ZBC && 64BIT
8687
default y if X86_64
8788

lib/crc/Makefile

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,15 @@ obj-$(CONFIG_CRC64) += crc64.o
3838
crc64-y := crc64-main.o
3939
ifeq ($(CONFIG_CRC64_ARCH),y)
4040
CFLAGS_crc64-main.o += -I$(src)/$(SRCARCH)
41+
42+
CFLAGS_REMOVE_arm64/crc64-neon-inner.o += -mgeneral-regs-only
43+
CFLAGS_arm64/crc64-neon-inner.o += -ffreestanding -march=armv8-a+crypto
44+
CFLAGS_arm64/crc64-neon-inner.o += -isystem $(shell $(CC) -print-file-name=include)
45+
crc64-$(CONFIG_ARM64) += arm64/crc64-neon-inner.o
46+
4147
crc64-$(CONFIG_RISCV) += riscv/crc64_lsb.o riscv/crc64_msb.o
4248
crc64-$(CONFIG_X86) += x86/crc64-pclmul.o
43-
endif
49+
endif # CONFIG_CRC64_ARCH
4450

4551
obj-y += tests/
4652

lib/crc/arm64/crc64-neon-inner.c

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Accelerated CRC64 (NVMe) using ARM NEON C intrinsics
4+
*/
5+
6+
#include <linux/types.h>
7+
#include <asm/neon-intrinsics.h>
8+
9+
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len);
10+
11+
#define GET_P64_0(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 0))
12+
#define GET_P64_1(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 1))
13+
14+
/* x^191 mod G, x^127 mod G */
15+
static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL,
16+
0x21e9761e252621acULL };
17+
/* floor(x^127 / G), (G - x^64) / x */
18+
static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL,
19+
0x34d926535897936aULL };
20+
21+
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len)
22+
{
23+
uint64x2_t v0_u64 = { crc, 0 };
24+
poly64x2_t v0 = vreinterpretq_p64_u64(v0_u64);
25+
poly64x2_t fold_consts =
26+
vreinterpretq_p64_u64(vld1q_u64(fold_consts_val));
27+
poly64x2_t v1 = vreinterpretq_p64_u8(vld1q_u8(p));
28+
29+
v0 = vreinterpretq_p64_u8(veorq_u8(vreinterpretq_u8_p64(v0),
30+
vreinterpretq_u8_p64(v1)));
31+
p += 16;
32+
len -= 16;
33+
34+
do {
35+
v1 = vreinterpretq_p64_u8(vld1q_u8(p));
36+
37+
poly128_t v2 = vmull_high_p64(fold_consts, v0);
38+
poly128_t v0_128 =
39+
vmull_p64(GET_P64_0(fold_consts), GET_P64_0(v0));
40+
41+
uint8x16_t x0 = veorq_u8(vreinterpretq_u8_p128(v0_128),
42+
vreinterpretq_u8_p128(v2));
43+
44+
x0 = veorq_u8(x0, vreinterpretq_u8_p64(v1));
45+
v0 = vreinterpretq_p64_u8(x0);
46+
47+
p += 16;
48+
len -= 16;
49+
} while (len >= 16);
50+
51+
/* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */
52+
poly64x2_t v7 = vreinterpretq_p64_u64((uint64x2_t){ 0, 0 });
53+
poly128_t v1_128 = vmull_p64(GET_P64_1(fold_consts), GET_P64_0(v0));
54+
55+
uint8x16_t ext_v0 =
56+
vextq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p64(v7), 8);
57+
uint8x16_t x0 = veorq_u8(ext_v0, vreinterpretq_u8_p128(v1_128));
58+
59+
v0 = vreinterpretq_p64_u8(x0);
60+
61+
/* Final Barrett reduction */
62+
poly64x2_t bconsts = vreinterpretq_p64_u64(vld1q_u64(bconsts_val));
63+
64+
v1_128 = vmull_p64(GET_P64_0(bconsts), GET_P64_0(v0));
65+
66+
poly64x2_t v1_64 = vreinterpretq_p64_u8(vreinterpretq_u8_p128(v1_128));
67+
poly128_t v3_128 = vmull_p64(GET_P64_1(bconsts), GET_P64_0(v1_64));
68+
69+
x0 = veorq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p128(v3_128));
70+
71+
uint8x16_t ext_v2 = vextq_u8(vreinterpretq_u8_p64(v7),
72+
vreinterpretq_u8_p128(v1_128), 8);
73+
74+
x0 = veorq_u8(x0, ext_v2);
75+
76+
v0 = vreinterpretq_p64_u8(x0);
77+
return vgetq_lane_u64(vreinterpretq_u64_p64(v0), 1);
78+
}

lib/crc/arm64/crc64.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
/*
3+
* CRC64 using ARM64 PMULL instructions
4+
*/
5+
6+
#include <linux/cpufeature.h>
7+
#include <asm/simd.h>
8+
#include <linux/minmax.h>
9+
#include <linux/sizes.h>
10+
11+
u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len);
12+
13+
#define crc64_be_arch crc64_be_generic
14+
15+
static inline u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
16+
{
17+
if (len >= 128 && cpu_have_named_feature(PMULL) &&
18+
likely(may_use_simd())) {
19+
do {
20+
size_t chunk = min_t(size_t, len & ~15, SZ_4K);
21+
22+
scoped_ksimd()
23+
crc = crc64_nvme_arm64_c(crc, p, chunk);
24+
25+
p += chunk;
26+
len -= chunk;
27+
} while (len >= 128);
28+
}
29+
return crc64_nvme_generic(crc, p, len);
30+
}

0 commit comments

Comments
 (0)