[PATCH 1/1][Xenial] crypto: arm64/sha - avoid non-standard inline asm tricks
dann frazier
dann.frazier at canonical.com
Fri Jan 15 23:32:31 UTC 2021
From: Ard Biesheuvel <ard.biesheuvel at linaro.org>
BugLink: https://bugs.launchpad.net/bugs/1907489
commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.
Replace the inline asm which exports struct offsets as ELF symbols
with proper const variables exposing the same values. This works
around an issue with Clang which does not interpret the "i" (or "I")
constraints in the same way as GCC.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel at linaro.org>
Tested-by: Matthias Kaehlcke <mka at chromium.org>
Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
Signed-off-by: Nathan Chancellor <natechancellor at gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
Signed-off-by: Juerg Haefliger <juergh at canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza at canonical.com>
Signed-off-by: dann frazier <dann.frazier at canonical.com>
---
arch/arm64/crypto/sha1-ce-core.S | 6 ++++--
arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
arch/arm64/crypto/sha2-ce-core.S | 6 ++++--
arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
4 files changed, 16 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index c98e7e849f06..8550408735a0 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
ldr dgb, [x0, #16]
/* load sha1_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
+ ldr_l w4, sha1_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
/* load input */
0: ld1 {v8.4s-v11.4s}, [x1], #64
@@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
- ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
+ ldr_l w4, sha1_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
movi v9.2d, #0
mov x8, #0x80000000
movi v10.2d, #0
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 01e48b8970b1..1b7b4684c35b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -17,9 +17,6 @@
#include <linux/crypto.h>
#include <linux/module.h>
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel at linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -32,6 +29,9 @@ struct sha1_ce_state {
asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
int blocks);
+const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
+const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
+
static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
- ASM_EXPORT(sha1_ce_offsetof_count,
- offsetof(struct sha1_ce_state, sst.count));
- ASM_EXPORT(sha1_ce_offsetof_finalize,
- offsetof(struct sha1_ce_state, finalize));
-
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 01cfee066837..679c6c002f4f 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */
- ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
+ ldr_l w4, sha256_ce_offsetof_finalize, x4
+ ldr w4, [x0, x4]
/* load input */
0: ld1 {v16.4s-v19.4s}, [x1], #64
@@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
* the padding is handled by the C code in that case.
*/
cbz x4, 3f
- ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
+ ldr_l w4, sha256_ce_offsetof_count, x4
+ ldr x4, [x0, x4]
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 7a7f95b94869..356ca9397a86 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -17,9 +17,6 @@
#include <linux/crypto.h>
#include <linux/module.h>
-#define ASM_EXPORT(sym, val) \
- asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
-
MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel at linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -32,6 +29,11 @@ struct sha256_ce_state {
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks);
+const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
+ sst.count);
+const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
+ finalize);
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
- ASM_EXPORT(sha256_ce_offsetof_count,
- offsetof(struct sha256_ce_state, sst.count));
- ASM_EXPORT(sha256_ce_offsetof_finalize,
- offsetof(struct sha256_ce_state, finalize));
-
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
--
2.30.0
More information about the kernel-team
mailing list