123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320 |
- /*
- * arch/xtensa/lib/usercopy.S
- *
- * Copy to/from user space (derived from arch/xtensa/lib/hal/memcopy.S)
- *
- * DO NOT COMBINE this function with <arch/xtensa/lib/hal/memcopy.S>.
- * It needs to remain separate and distinct. The hal files are part
- * of the Xtensa link-time HAL, and those files may differ per
- * processor configuration. Patching the kernel for another
- * processor configuration includes replacing the hal files, and we
- * could lose the special functionality for accessing user-space
- * memory during such a patch. We sacrifice a little code space here
- * in favor to simplify code maintenance.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 Tensilica Inc.
- */
- /*
- * size_t __xtensa_copy_user (void *dst, const void *src, size_t len);
- *
- * The returned value is the number of bytes not copied. Implies zero
- * is success.
- *
- * The general case algorithm is as follows:
- * If the destination and source are both aligned,
- * do 16B chunks with a loop, and then finish up with
- * 8B, 4B, 2B, and 1B copies conditional on the length.
- * If destination is aligned and source unaligned,
- * do the same, but use SRC to align the source data.
- * If destination is unaligned, align it by conditionally
- * copying 1B and 2B and then retest.
- * This code tries to use fall-through braches for the common
- * case of aligned destinations (except for the branches to
- * the alignment label).
- *
- * Register use:
- * a0/ return address
- * a1/ stack pointer
- * a2/ return value
- * a3/ src
- * a4/ length
- * a5/ dst
- * a6/ tmp
- * a7/ tmp
- * a8/ tmp
- * a9/ tmp
- * a10/ tmp
- * a11/ original length
- */
- #include <variant/core.h>
- #ifdef __XTENSA_EB__
- #define ALIGN(R, W0, W1) src R, W0, W1
- #define SSA8(R) ssa8b R
- #else
- #define ALIGN(R, W0, W1) src R, W1, W0
- #define SSA8(R) ssa8l R
- #endif
- /* Load or store instructions that may cause exceptions use the EX macro. */
- #define EX(insn,reg1,reg2,offset,handler) \
- 9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
- .text
- .align 4
- .global __xtensa_copy_user
- .type __xtensa_copy_user,@function
- __xtensa_copy_user:
- entry sp, 16 # minimal stack frame
- # a2/ dst, a3/ src, a4/ len
- mov a5, a2 # copy dst so that a2 is return value
- mov a11, a4 # preserve original len for error case
- .Lcommon:
- bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
- bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
- .Ldstaligned: # return here from .Ldstunaligned when dst is aligned
- srli a7, a4, 4 # number of loop iterations with 16B
- # per iteration
- movi a8, 3 # if source is also aligned,
- bnone a3, a8, .Laligned # then use word copy
- SSA8( a3) # set shift amount from byte offset
- bnez a4, .Lsrcunaligned
- movi a2, 0 # return success for len==0
- retw
- /*
- * Destination is unaligned
- */
- .Ldst1mod2: # dst is only byte aligned
- bltui a4, 7, .Lbytecopy # do short copies byte by byte
- # copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
- addi a3, a3, 1
- EX(s8i, a6, a5, 0, s_fixup)
- addi a5, a5, 1
- addi a4, a4, -1
- bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
- # return to main algorithm
- .Ldst2mod4: # dst 16-bit aligned
- # copy 2 bytes
- bltui a4, 6, .Lbytecopy # do short copies byte by byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(l8ui, a7, a3, 1, l_fixup)
- addi a3, a3, 2
- EX(s8i, a6, a5, 0, s_fixup)
- EX(s8i, a7, a5, 1, s_fixup)
- addi a5, a5, 2
- addi a4, a4, -2
- j .Ldstaligned # dst is now aligned, return to main algorithm
- /*
- * Byte by byte copy
- */
- .align 4
- .byte 0 # 1 mod 4 alignment for LOOPNEZ
- # (0 mod 4 alignment for LBEG)
- .Lbytecopy:
- #if XCHAL_HAVE_LOOPS
- loopnez a4, .Lbytecopydone
- #else /* !XCHAL_HAVE_LOOPS */
- beqz a4, .Lbytecopydone
- add a7, a3, a4 # a7 = end address for source
- #endif /* !XCHAL_HAVE_LOOPS */
- .Lnextbyte:
- EX(l8ui, a6, a3, 0, l_fixup)
- addi a3, a3, 1
- EX(s8i, a6, a5, 0, s_fixup)
- addi a5, a5, 1
- #if !XCHAL_HAVE_LOOPS
- blt a3, a7, .Lnextbyte
- #endif /* !XCHAL_HAVE_LOOPS */
- .Lbytecopydone:
- movi a2, 0 # return success for len bytes copied
- retw
- /*
- * Destination and source are word-aligned.
- */
- # copy 16 bytes per iteration for word-aligned dst and word-aligned src
- .align 4 # 1 mod 4 alignment for LOOPNEZ
- .byte 0 # (0 mod 4 alignment for LBEG)
- .Laligned:
- #if XCHAL_HAVE_LOOPS
- loopnez a7, .Loop1done
- #else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .Loop1done
- slli a8, a7, 4
- add a8, a8, a3 # a8 = end of last 16B source chunk
- #endif /* !XCHAL_HAVE_LOOPS */
- .Loop1:
- EX(l32i, a6, a3, 0, l_fixup)
- EX(l32i, a7, a3, 4, l_fixup)
- EX(s32i, a6, a5, 0, s_fixup)
- EX(l32i, a6, a3, 8, l_fixup)
- EX(s32i, a7, a5, 4, s_fixup)
- EX(l32i, a7, a3, 12, l_fixup)
- EX(s32i, a6, a5, 8, s_fixup)
- addi a3, a3, 16
- EX(s32i, a7, a5, 12, s_fixup)
- addi a5, a5, 16
- #if !XCHAL_HAVE_LOOPS
- blt a3, a8, .Loop1
- #endif /* !XCHAL_HAVE_LOOPS */
- .Loop1done:
- bbci.l a4, 3, .L2
- # copy 8 bytes
- EX(l32i, a6, a3, 0, l_fixup)
- EX(l32i, a7, a3, 4, l_fixup)
- addi a3, a3, 8
- EX(s32i, a6, a5, 0, s_fixup)
- EX(s32i, a7, a5, 4, s_fixup)
- addi a5, a5, 8
- .L2:
- bbci.l a4, 2, .L3
- # copy 4 bytes
- EX(l32i, a6, a3, 0, l_fixup)
- addi a3, a3, 4
- EX(s32i, a6, a5, 0, s_fixup)
- addi a5, a5, 4
- .L3:
- bbci.l a4, 1, .L4
- # copy 2 bytes
- EX(l16ui, a6, a3, 0, l_fixup)
- addi a3, a3, 2
- EX(s16i, a6, a5, 0, s_fixup)
- addi a5, a5, 2
- .L4:
- bbci.l a4, 0, .L5
- # copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(s8i, a6, a5, 0, s_fixup)
- .L5:
- movi a2, 0 # return success for len bytes copied
- retw
- /*
- * Destination is aligned, Source is unaligned
- */
- .align 4
- .byte 0 # 1 mod 4 alignement for LOOPNEZ
- # (0 mod 4 alignment for LBEG)
- .Lsrcunaligned:
- # copy 16 bytes per iteration for word-aligned dst and unaligned src
- and a10, a3, a8 # save unalignment offset for below
- sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
- EX(l32i, a6, a3, 0, l_fixup) # load first word
- #if XCHAL_HAVE_LOOPS
- loopnez a7, .Loop2done
- #else /* !XCHAL_HAVE_LOOPS */
- beqz a7, .Loop2done
- slli a12, a7, 4
- add a12, a12, a3 # a12 = end of last 16B source chunk
- #endif /* !XCHAL_HAVE_LOOPS */
- .Loop2:
- EX(l32i, a7, a3, 4, l_fixup)
- EX(l32i, a8, a3, 8, l_fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
- EX(l32i, a9, a3, 12, l_fixup)
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, s_fixup)
- EX(l32i, a6, a3, 16, l_fixup)
- ALIGN( a8, a8, a9)
- EX(s32i, a8, a5, 8, s_fixup)
- addi a3, a3, 16
- ALIGN( a9, a9, a6)
- EX(s32i, a9, a5, 12, s_fixup)
- addi a5, a5, 16
- #if !XCHAL_HAVE_LOOPS
- blt a3, a12, .Loop2
- #endif /* !XCHAL_HAVE_LOOPS */
- .Loop2done:
- bbci.l a4, 3, .L12
- # copy 8 bytes
- EX(l32i, a7, a3, 4, l_fixup)
- EX(l32i, a8, a3, 8, l_fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
- addi a3, a3, 8
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, s_fixup)
- addi a5, a5, 8
- mov a6, a8
- .L12:
- bbci.l a4, 2, .L13
- # copy 4 bytes
- EX(l32i, a7, a3, 4, l_fixup)
- addi a3, a3, 4
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
- addi a5, a5, 4
- mov a6, a7
- .L13:
- add a3, a3, a10 # readjust a3 with correct misalignment
- bbci.l a4, 1, .L14
- # copy 2 bytes
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(l8ui, a7, a3, 1, l_fixup)
- addi a3, a3, 2
- EX(s8i, a6, a5, 0, s_fixup)
- EX(s8i, a7, a5, 1, s_fixup)
- addi a5, a5, 2
- .L14:
- bbci.l a4, 0, .L15
- # copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(s8i, a6, a5, 0, s_fixup)
- .L15:
- movi a2, 0 # return success for len bytes copied
- retw
- .section .fixup, "ax"
- .align 4
- /* a2 = original dst; a5 = current dst; a11= original len
- * bytes_copied = a5 - a2
- * retval = bytes_not_copied = original len - bytes_copied
- * retval = a11 - (a5 - a2)
- *
- * Clearing the remaining pieces of kernel memory plugs security
- * holes. This functionality is the equivalent of the *_zeroing
- * functions that some architectures provide.
- */
- .Lmemset:
- .word memset
- s_fixup:
- sub a2, a5, a2 /* a2 <-- bytes copied */
- sub a2, a11, a2 /* a2 <-- bytes not copied */
- retw
- l_fixup:
- sub a2, a5, a2 /* a2 <-- bytes copied */
- sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
- /* void *memset(void *s, int c, size_t n); */
- mov a6, a5 /* s */
- movi a7, 0 /* c */
- mov a8, a2 /* n */
- l32r a4, .Lmemset
- callx4 a4
- /* Ignore memset return value in a6. */
- /* a2 still contains bytes not copied. */
- retw
|