Skip to content

Commit eec56b2

Browse files
committed
implement memset and memclr as well; reimplement memclr4 and memset4
1 parent 2c621d0 commit eec56b2

File tree

1 file changed

+40
-59
lines changed

1 file changed

+40
-59
lines changed

src/arm.rs

Lines changed: 40 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use core::intrinsics;
22

33
#[cfg(feature = "mem")]
4-
use mem::{memmove, memset};
4+
use mem::memmove;
55

66
// NOTE This function and the ones below are implemented using assembly because they using a custom
77
// calling convention which can't be implemented using a normal Rust function
@@ -126,28 +126,11 @@ extern "C" {
126126
fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8;
127127
}
128128

129-
// FIXME: The `*4` and `*8` variants should be defined as aliases.
130-
131-
// Assembly optimized memclr{4,8} and memset{4,8}
132-
//
133-
// Original source: https://github.com/bobbl/libaeabi-cortexm0/blob/15e10736afed6f947fba4b5740274fec4f2c95ca/memset.S
134-
// Author: Jörg Mische <bobbl@gmx.de>
135-
// License: ICS
136-
//
137-
// Copyright (c) 2013 Jörg Mische <bobbl@gmx.de>
138-
//
139-
// Permission to use, copy, modify, and/or distribute this software for any
140-
// purpose with or without fee is hereby granted, provided that the above
141-
// copyright notice and this permission notice appear in all copies.
142-
//
143-
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
144-
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
145-
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
146-
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
147-
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
148-
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
149-
// OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
129+
// Assembly optimized memclr{,4,8} and memset{,4,8}
150130
global_asm!(r"
131+
@ fn __aeabi_memclr(r0: *mut u8, r1: usize)
132+
.global __aeabi_memclr
133+
151134
@ fn __aeabi_memclr4(r0: *mut u8, r1: usize)
152135
@ r0 is 4-byte aligned
153136
.global __aeabi_memclr4
@@ -156,12 +139,9 @@ global_asm!(r"
156139
@ r0 is 8-byte aligned
157140
.global __aeabi_memclr8
158141
159-
__aeabi_memclr4:
160-
__aeabi_memclr8:
161-
@ r2 = r2 ^ r2 = 0
162-
eors r2, r2
163-
164-
@ tail call __aeabi_memset4(r0, r1, 0)
142+
@ fn __aeabi_memset(r0: *mut u8, r1: usize, r2: i32)
143+
@ r0 is 4-byte aligned
144+
.global __aeabi_memset
165145
166146
@ fn __aeabi_memset4(r0: *mut u8, r1: usize, r2: i32)
167147
@ r0 is 4-byte aligned
@@ -171,42 +151,53 @@ __aeabi_memclr8:
171151
@ r0 is 8-byte aligned
172152
.global __aeabi_memset8
173153
154+
__aeabi_memclr4:
155+
__aeabi_memclr8:
156+
@ r2 = r2 ^ r2 = 0
157+
eors r2, r2
158+
159+
@ tail call __aeabi_memset4(r0, r1, 0)
160+
174161
__aeabi_memset4:
175162
__aeabi_memset8:
176-
@ branch to '2' if r1 < 4
177-
subs r1, #4
178-
blo 2f
163+
cmp r1, #4
164+
blt __aeabi_memset
179165
180166
@ copy r2 lower byte into all the other bytes of r2
181-
@ e.g. r2 = 0xdead_beef -> r2 = 0xefef_efef
167+
@ e.g. r2 = 0xdeadbeef -> r2 = 0xefefefef
182168
lsls r2, r2, #24
183169
lsrs r3, r2, #8
184170
orrs r2, r3
185171
lsrs r3, r2, #16
186172
orrs r2, r3
187173
174+
b 2f
175+
188176
1: @ word-wise copy loop
189177
str r2, [r0]
190178
adds r0, #4
191-
subs r1, #4
192-
bhs 1b
179+
2: subs r1, #4
180+
bcc 1b
193181
194-
2: @ 3 bytes left to copy
195-
@ r1 = (original_r1 % 4) - 4
182+
adds r1, #4
183+
b __aeabi_memset
196184
197-
adds r1, #2 @ branch to '4' if (original_r1 % 4) == {0,1}
198-
blo 4f
199-
strb r2, [r0]
200-
strb r2, [r0, #1]
201-
beq 3f @ branch to '3' if (original_r1 % 4) == 2
202-
strb r2, [r0, #2]
203-
3: bx lr
185+
__aeabi_memclr:
186+
@ r2 = r2 ^ r2 = 0
187+
eors r2, r2
204188
205-
4: @ 1 byte left to copy
206-
adds r1, #1
207-
bne 5f @ branch to '5' if (original_r1 % 4) == 0
208-
strb r2, [r0]
209-
5: bx lr
189+
@ tail call __aeabi_memset(r0, r1, 0)
190+
191+
__aeabi_memset:
192+
cmp r1, #0
193+
beq 2f
194+
195+
1: strb r2, [r0]
196+
adds r0, #1
197+
subs r1, #1
198+
bne 1b
199+
200+
2: bx lr
210201
"#);
211202

212203
// Assembly optimized memcpy{,4,8}
@@ -252,6 +243,7 @@ __aeabi_memcpy:
252243
2: bx lr
253244
"#);
254245

246+
// FIXME: The `*4` and `*8` variants should be defined as aliases.
255247
#[cfg_attr(not(test), no_mangle)]
256248
pub unsafe extern "aapcs" fn __aeabi_memmove(dest: *mut u8, src: *const u8, n: usize) {
257249
memmove(dest, src, n);
@@ -264,14 +256,3 @@ pub unsafe extern "aapcs" fn __aeabi_memmove4(dest: *mut u8, src: *const u8, n:
264256
pub unsafe extern "aapcs" fn __aeabi_memmove8(dest: *mut u8, src: *const u8, n: usize) {
265257
memmove(dest, src, n);
266258
}
267-
268-
// Note the different argument order
269-
#[cfg_attr(not(test), no_mangle)]
270-
pub unsafe extern "aapcs" fn __aeabi_memset(dest: *mut u8, n: usize, c: i32) {
271-
memset(dest, c, n);
272-
}
273-
274-
#[cfg_attr(not(test), no_mangle)]
275-
pub unsafe extern "aapcs" fn __aeabi_memclr(dest: *mut u8, n: usize) {
276-
memset(dest, 0, n);
277-
}

0 commit comments

Comments
 (0)