arch/riscv/lib: update memmove and memcpy for big-endian

Change the shift patterns for the unaligned memory move and copy code
to deal with big-endian by definign macros to change the shfit left and
right to go the opposite way.

Signed-off-by: Ben Dooks <ben.dooks@codethink.co.uk>
Reviewed-by: Leo Yu-Chi Liang <ycliang@andestech.com>
This commit is contained in:
Ben Dooks 2025-08-07 17:49:33 +01:00 committed by Leo Yu-Chi Liang
parent 909ceacf4a
commit 35d6caad6d
2 changed files with 20 additions and 4 deletions

View File

@ -125,6 +125,14 @@ WEAK(memcpy)
.copy_end: .copy_end:
ret ret
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define M_SLL sll
#define M_SRL srl
#else
#define M_SLL srl
#define M_SRL sll
#endif
.Lmisaligned_word_copy: .Lmisaligned_word_copy:
/* /*
* Misaligned word-wise copy. * Misaligned word-wise copy.
@ -144,10 +152,10 @@ WEAK(memcpy)
addi t0, t0, -(SZREG-1) addi t0, t0, -(SZREG-1)
/* At least one iteration will be executed here, no check */ /* At least one iteration will be executed here, no check */
1: 1:
srl a4, a5, t3 M_SRL a4, a5, t3
REG_L a5, SZREG(a1) REG_L a5, SZREG(a1)
addi a1, a1, SZREG addi a1, a1, SZREG
sll a2, a5, t4 M_SLL a2, a5, t4
or a2, a2, a4 or a2, a2, a4
REG_S a2, 0(a0) REG_S a2, 0(a0)
addi a0, a0, SZREG addi a0, a0, SZREG

View File

@ -91,6 +91,14 @@ WEAK(memmove)
mv a0, t0 mv a0, t0
ret ret
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define M_SLL sll
#define M_SRL srl
#else
#define M_SLL srl
#define M_SRL sll
#endif
.Lmisaligned_word_copy: .Lmisaligned_word_copy:
/* /*
* Misaligned word-wise copy. * Misaligned word-wise copy.
@ -110,10 +118,10 @@ WEAK(memmove)
addi t0, t0, SZREG-1 addi t0, t0, SZREG-1
/* At least one iteration will be executed here, no check */ /* At least one iteration will be executed here, no check */
1: 1:
sll a4, a5, t4 M_SLL a4, a5, t4
addi a1, a1, -SZREG addi a1, a1, -SZREG
REG_L a5, 0(a1) REG_L a5, 0(a1)
srl a2, a5, t3 M_SRL a2, a5, t3
or a2, a2, a4 or a2, a2, a4
addi a0, a0, -SZREG addi a0, a0, -SZREG
REG_S a2, 0(a0) REG_S a2, 0(a0)