Improved scrolling and added better memcpy/memmove functions written in assembly.
This commit is contained in:
@@ -3,25 +3,64 @@
|
||||
.global memcpy
|
||||
.type memcpy @function
|
||||
|
||||
// int memcpy(void* dest [%rdi], void* src [%rsi], size_t count [%rdx])
|
||||
// void* memcpy(void* dest [%rdi], void* src [%rsi], size_t count [%rdx])
|
||||
//
|
||||
memcpy:
|
||||
cmpq %rdx, 4
|
||||
jl memcpy_bytes
|
||||
movq (%rsi), %rax
|
||||
movq %rax, (%rdi)
|
||||
addq $4, %rsi
|
||||
addq $4, %rdi
|
||||
subq $4, %rdx
|
||||
jmp memcpy
|
||||
movq %rdi, %rax // return value
|
||||
memcpy_qwords:
|
||||
cmpq $8, %rdx // check if the remaining bytes are at least 8
|
||||
jl memcpy_bytes // if not copy the remaining bytes per byte
|
||||
movq (%rsi), %rcx // if yes, copy using movq
|
||||
movq %rcx, (%rdi)
|
||||
addq $8, %rsi
|
||||
addq $8, %rdi
|
||||
subq $8, %rdx
|
||||
jmp memcpy_qwords
|
||||
memcpy_bytes:
|
||||
cmpq %rdx, 0
|
||||
cmpq $0, %rdx
|
||||
je memcpy_end
|
||||
movb (%rsi), %ah
|
||||
movb %ah, (%rdi)
|
||||
movb (%rsi), %ch
|
||||
movb %ch, (%rdi)
|
||||
addq $1, %rsi
|
||||
addq $1, %rdi
|
||||
subq $1, %rdx
|
||||
jmp memcpy_bytes
|
||||
memcpy_end:
|
||||
ret
|
||||
|
||||
.global memmove
|
||||
.type memmove @function
|
||||
// void* memmove(void* dest [%rdi], void* src [%rsi], size_t count [%rdx])
|
||||
//
|
||||
memmove:
|
||||
movq %rdi, %rax // preserve dest as return value
|
||||
cmpq %rsi, %rdi // check if dest > src
|
||||
jg memmove_backward // if yes, do everything backwards
|
||||
movq %rsi, %rcx // check if (src - dest) < 8
|
||||
subq %rdi, %rcx
|
||||
cmpq $8, %rdi
|
||||
jl memcpy_bytes // if yes, we have to do a bytewise copy
|
||||
jmp memcpy_qwords // otherwise copy whole qwords
|
||||
memmove_backward: // dest > src, copy backwards
|
||||
addq %rdx, %rdi // dest = dest + count
|
||||
addq %rdx, %rsi // src = src + count
|
||||
memmove_qwords:
|
||||
cmpq $8, %rdx // check if the remaining bytes are at least 8
|
||||
jl memmove_bytes // if not copy the remaining bytes per byte
|
||||
movq -8(%rsi), %rcx // if yes, copy using movq
|
||||
movq %rcx, -8(%rdi)
|
||||
subq $8, %rsi
|
||||
subq $8, %rdi
|
||||
subq $8, %rdx
|
||||
jmp memmove_qwords
|
||||
memmove_bytes:
|
||||
cmpq $0, %rdx
|
||||
je memmove_end
|
||||
movb (%rsi), %ch
|
||||
movb %ch, (%rdi)
|
||||
subq $1, %rsi
|
||||
subq $1, %rdi
|
||||
subq $1, %rdx
|
||||
jmp memmove_bytes
|
||||
memmove_end:
|
||||
ret
|
||||
|
||||
Reference in New Issue
Block a user