Improved SCons script for building the userspace stdlib.

This commit is contained in:
2024-02-02 22:16:02 +01:00
parent a096458bdb
commit 3cd4fe9537
7 changed files with 18 additions and 18 deletions

View File

@@ -24,16 +24,14 @@ def get_crt_object(name: str) -> str:
crtbegin_o = get_crt_object('crtbegin.o')
crtend_o = get_crt_object('crtend.o')
crti_o = kernel_env.Object('src/crt/crti.s')
crtn_o = kernel_env.Object('src/crt/crtn.s')
crti_o = kernel_env.Object(kernel_env['CRTI_PATH'])
crtn_o = kernel_env.Object(kernel_env['CRTN_PATH'])
kernel_env['LINKCOM'] = env['LINKCOM'].replace('$_LIBFLAGS', f'{crti_o[0].abspath} {crtbegin_o} -Wl,--start-group $_LIBFLAGS -Wl,--end-group -lgcc {crtend_o} {crtn_o[0].abspath}')
kernel_isr_sources = env['KERNEL_ISR_SOURCES'] + Split('''
''')
kernel_sources = env['KERNEL_SOURCES'] + Split('''
src/cstdlib/memory.s
src/kernel/boot.s
src/kernel/startup.cpp
''') + [kernel_env.Object(f, CCFLAGS = kernel_env['CCFLAGS'] + ['-mgeneral-regs-only']) for f in kernel_isr_sources]

View File

@@ -1,16 +0,0 @@
/* x86_64 crti.s */
.section .init
.global _init
.type _init, @function
_init:
push %rbp
movq %rsp, %rbp
/* gcc will nicely put the contents of crtbegin.o's .init section here. */
.section .fini
.global _fini
.type _fini, @function
_fini:
push %rbp
movq %rsp, %rbp
/* gcc will nicely put the contents of crtbegin.o's .fini section here. */

View File

@@ -1,10 +0,0 @@
/* x86_64 crtn.s */
.section .init
/* gcc will nicely put the contents of crtend.o's .init section here. */
popq %rbp
ret
.section .fini
/* gcc will nicely put the contents of crtend.o's .fini section here. */
popq %rbp
ret

View File

@@ -1,66 +0,0 @@
.section .text
.global memcpy
.type memcpy @function
// void* memcpy(void* dest [%rdi], void* src [%rsi], size_t count [%rdx])
//
memcpy:
movq %rdi, %rax // return value
memcpy_qwords:
cmpq $8, %rdx // check if the remaining bytes are at least 8
jl memcpy_bytes // if not copy the remaining bytes per byte
movq (%rsi), %rcx // if yes, copy using movq
movq %rcx, (%rdi)
addq $8, %rsi
addq $8, %rdi
subq $8, %rdx
jmp memcpy_qwords
memcpy_bytes:
cmpq $0, %rdx
je memcpy_end
movb (%rsi), %ch
movb %ch, (%rdi)
addq $1, %rsi
addq $1, %rdi
subq $1, %rdx
jmp memcpy_bytes
memcpy_end:
ret
.global memmove
.type memmove @function
// void* memmove(void* dest [%rdi], void* src [%rsi], size_t count [%rdx])
//
memmove:
movq %rdi, %rax // preserve dest as return value
cmpq %rsi, %rdi // check if dest > src
jg memmove_backward // if yes, do everything backwards
movq %rsi, %rcx // check if (src - dest) < 8
subq %rdi, %rcx
cmpq $8, %rdi
jl memcpy_bytes // if yes, we have to do a bytewise copy
jmp memcpy_qwords // otherwise copy whole qwords
memmove_backward: // dest > src, copy backwards
addq %rdx, %rdi // dest = dest + count
addq %rdx, %rsi // src = src + count
memmove_qwords:
cmpq $8, %rdx // check if the remaining bytes are at least 8
jl memmove_bytes // if not copy the remaining bytes per byte
movq -8(%rsi), %rcx // if yes, copy using movq
movq %rcx, -8(%rdi)
subq $8, %rsi
subq $8, %rdi
subq $8, %rdx
jmp memmove_qwords
memmove_bytes:
cmpq $0, %rdx
je memmove_end
movb (%rsi), %ch
movb %ch, (%rdi)
subq $1, %rsi
subq $1, %rdi
subq $1, %rdx
jmp memmove_bytes
memmove_end:
ret