1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
9 asmlinkage void poly1305_init_arm(void *ctx, const u8 key[16]);
10 asmlinkage void poly1305_blocks_arm(void *ctx, const u8 *inp, const size_t len,
12 asmlinkage void poly1305_emit_arm(void *ctx, u8 mac[16], const u32 nonce[4]);
13 asmlinkage void poly1305_blocks_neon(void *ctx, const u8 *inp, const size_t len,
15 asmlinkage void poly1305_emit_neon(void *ctx, u8 mac[16], const u32 nonce[4]);
17 static bool poly1305_use_neon __ro_after_init;
18 static bool *const poly1305_nobs[] __initconst = { &poly1305_use_neon };
20 static void __init poly1305_fpu_init(void)
22 #if defined(CONFIG_ZINC_ARCH_ARM64)
23 poly1305_use_neon = cpu_have_named_feature(ASIMD);
24 #elif defined(CONFIG_ZINC_ARCH_ARM)
25 poly1305_use_neon = elf_hwcap & HWCAP_NEON;
29 #if defined(CONFIG_ZINC_ARCH_ARM64)
30 struct poly1305_arch_internal {
40 #elif defined(CONFIG_ZINC_ARCH_ARM)
41 struct poly1305_arch_internal {
54 /* The NEON code uses base 2^26, while the scalar code uses base 2^64 on 64-bit
55 * and base 2^32 on 32-bit. If we hit the unfortunate situation of using NEON
56 * and then having to go back to scalar -- because the user is silly and has
57 * called the update function from two separate contexts -- then we need to
58 * convert back to the original base before proceeding. The below function is
59 * written for 64-bit integers, and so we have to swap words at the end on
60 * big-endian 32-bit. It is possible to reason that the initial reduction below
61 * is sufficient given the implementation invariants. However, for an avoidance
62 * of doubt and because this is not performance critical, we do the full
65 static void convert_to_base2_64(void *ctx)
67 struct poly1305_arch_internal *state = ctx;
70 if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !state->is_base2_26)
73 cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy;
74 cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy;
75 cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy;
76 cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy;
77 state->h0 = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0];
78 state->h1 = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12);
79 state->h2 = state->h[4] >> 24;
80 if (IS_ENABLED(CONFIG_ZINC_ARCH_ARM) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
81 state->h0 = rol64(state->h0, 32);
82 state->h1 = rol64(state->h1, 32);
84 #define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1))
85 cy = (state->h2 >> 2) + (state->h2 & ~3ULL);
88 state->h1 += (cy = ULT(state->h0, cy));
89 state->h2 += ULT(state->h1, cy);
91 state->is_base2_26 = 0;
94 static inline bool poly1305_init_arch(void *ctx,
95 const u8 key[POLY1305_KEY_SIZE])
97 poly1305_init_arm(ctx, key);
101 static inline bool poly1305_blocks_arch(void *ctx, const u8 *inp,
102 size_t len, const u32 padbit,
103 simd_context_t *simd_context)
105 /* SIMD disables preemption, so relax after processing each page. */
106 BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
107 PAGE_SIZE % POLY1305_BLOCK_SIZE);
109 if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon ||
110 !simd_use(simd_context)) {
111 convert_to_base2_64(ctx);
112 poly1305_blocks_arm(ctx, inp, len, padbit);
117 const size_t bytes = min_t(size_t, len, PAGE_SIZE);
119 poly1305_blocks_neon(ctx, inp, bytes, padbit);
124 simd_relax(simd_context);
129 static inline bool poly1305_emit_arch(void *ctx, u8 mac[POLY1305_MAC_SIZE],
131 simd_context_t *simd_context)
133 if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !poly1305_use_neon ||
134 !simd_use(simd_context)) {
135 convert_to_base2_64(ctx);
136 poly1305_emit_arm(ctx, mac, nonce);
138 poly1305_emit_neon(ctx, mac, nonce);