Rev 286 | Rev 424 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed
/**************************************************************************** __________ __ ___.* Open \______ \ ____ ____ | | _\_ |__ _______ ___* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \* \/ \/ \/ \/ \/* $Id$** Copyright (C) 2006 by Thom Johansen** This program is free software; you can redistribute it and/or* modify it under the terms of the GNU General Public License* as published by the Free Software Foundation; either version 2* of the License, or (at your option) any later version.** This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY* KIND, either express or implied.*****************************************************************************/#define ASM_FILE#include "global.h"/* ARMv4T doesn't switch the T bit when popping pc directly, we must use BX */.macro ldmpc cond="", order="ia", regs#if ARM_ARCH == 4 && defined(USE_THUMB)ldm\cond\order sp!, { \regs, lr }bx\cond lr#elseldm\cond\order sp!, { \regs, pc }#endif.endm.macro ldrpc cond=""#if ARM_ARCH == 4 && defined(USE_THUMB)ldr\cond lr, [sp], #4bx\cond lr#elseldr\cond pc, [sp], #4#endif.endm.section .icode,"ax",%progbits.align 2/* The following code is based on code found in Linux kernel version 2.6.15.3* linux/arch/arm/lib/memset.S** Copyright (C) 1995-2000 Russell King*//* This code will align a pointer for memset, if needed */1: cmp r2, #4 @ 1 do we have enoughblt 5f @ 1 bytes to align with?cmp r3, #2 @ 1strgtb r1, [r0, #-1]! @ 1strgeb r1, [r0, #-1]! @ 1strb r1, [r0, #-1]! @ 1sub r2, r2, r3 @ 1 r2 = r2 - r3b 2f.global memset.type memset,%functionmemset:add r0, r0, r2 @ we'll write backwards in memoryands r3, r0, #3 @ 1 unaligned?bne 1b @ 12:/** we know that the pointer in r0 is aligned to a word boundary.*/orr r1, r1, r1, lsl #8orr r1, r1, r1, lsl #16mov r3, r1cmp r2, #16blt 5f/** We need an extra register for this loop - save the return address and* use the LR*/str lr, [sp, #-4]!mov ip, r1mov lr, r13: subs r2, r2, #64stmgedb r0!, {r1, r3, ip, lr} @ 64 bytes at a time.stmgedb r0!, {r1, r3, ip, lr}stmgedb r0!, {r1, r3, ip, lr}stmgedb r0!, {r1, r3, ip, lr}bgt 3bldrpc cond=eq @ Now <64 bytes to go./** No need to correct the count; we're only testing bits from now on*/tst r2, #32stmnedb r0!, {r1, r3, ip, lr}stmnedb r0!, {r1, r3, ip, lr}tst r2, #16stmnedb r0!, {r1, r3, ip, lr}ldr lr, [sp], #45: tst r2, #8stmnedb r0!, {r1, r3}tst r2, #4strne r1, [r0, #-4]!/** When we get here, we've got less than 4 bytes to zero. We* may have an unaligned pointer as well.*/6: tst r2, #2strneb r1, [r0, #-1]!strneb r1, [r0, #-1]!tst r2, #1strneb r1, [r0, #-1]!bx lr.end:.size memset,.end-memset