aboutsummaryrefslogtreecommitdiff
path: root/src/boot.S
blob: 976a6f4ec1941a5483985be04f4bc7c5a8c4e136 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/*  Declare a multiboot header that marks the program as a kernel.
 *  https://www.gnu.org/software/grub/manual/multiboot2/html_node/index.html
 *  The Multiboot2 header must be contained completely within the first 32kB, and must be 8 byte aligned.
 */
#define ASM_FILE
#include <multiboot2.h>

.section .multiboot.header, "a"
.align 8
header_start:
    .int MULTIBOOT2_HEADER_MAGIC
    .int MULTIBOOT_ARCHITECTURE_I386
    .int header_end - header_start
    .int -(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT_ARCHITECTURE_I386 + (header_end - header_start))

.align 8
information_tag_start:
    .short MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST
    .short 0
    .int information_tag_end - information_tag_start
    .int MULTIBOOT_TAG_TYPE_MMAP
information_tag_end:

.align 8
module_tag_start:
    .short MULTIBOOT_HEADER_TAG_MODULE_ALIGN
    .short 0
    .int module_tag_end - module_tag_start
module_tag_end:

.align 8
    .short MULTIBOOT_HEADER_TAG_END
    .short 0
    .int 8
header_end:

.section .multiboot.pages, "aw", @nobits
.align 4096
bootstrap_page0:
    .skip 1024 * 4

/*  The stack on x86 must be 16-byte aligned according to the System V ABI standard and de-facto extensions. The
 *  compiler will assume the stack is properly aligned and failure to align the stack will result in undefined
 *  behavior.
 */
.section .stack, "aw", @nobits
.align 16
stack_bottom:
    .skip 16 * 1024
stack_top:

.section .pages, "aw", @nobits
.align 4096
.global kernel_pagedir
kernel_pagedir:
    .skip 1024 * 4
.global kernel_ptable0
kernel_ptable0:
    .skip 1024 * 4


/*
    The linker script specifies _start as the entry point to the kernel and the
    bootloader will jump to this position once the kernel has been loaded.
 */
.section .multiboot.text, "ax"
.global VADDR_OFFSET
.set VADDR_OFFSET, 0

.macro map begin, end, access
    mov $\begin - VADDR_OFFSET, %esi
1:
    cmpl $(\end - VADDR_OFFSET), %esi
    jge 2f              # move to next section

    movl %esi, %edx     # map physical address
    orl $\access, %edx  # access bits
    movl %edx, (%edi)

    addl $4096, %esi    # Size of page is 4096 bytes.
    addl $4, %edi       # Size of entries in kernel_ptable0 is 4 bytes.
    loop 1b             # Loop to the next entry if we haven't finished.
2:
.endm

.global _start
    .type _start, @function
_start:
    cli

    movl $bootstrap_page0, %edi
    movl $1024, %ecx
    map begin_multiboot, end_multiboot, 0x003

.set VADDR_OFFSET, 0xc0000000 - 0x400000

    # Physical address of kernel_ptable0.
    movl $(kernel_ptable0 - VADDR_OFFSET), %edi
    # Map 1024 pages
    movl $1024, %ecx

    map begin_text, end_text, 0x001
    map begin_rodata, end_rodata, 0x001
    map begin_data, end_data, 0x003
    map begin_bss, end_bss, 0x003

    # Enabling paging does not change the next instruction, which continues to be physical. Therefore, map the kernel
    # to both its physical address and to the higher half.
    # Use the page table at:
    #   - entry   1       starts 0x0040 0000      ends 0x007f ffff
    #   - entry 768       starts 0xc000 0000      ends 0xc03f ffff
    movl $(bootstrap_page0 + 0x003), kernel_pagedir - VADDR_OFFSET + 0 * 4
    movl $(kernel_ptable0 - VADDR_OFFSET + 0x003), kernel_pagedir - VADDR_OFFSET + 1 * 4
    movl $(kernel_ptable0 - VADDR_OFFSET + 0x003), kernel_pagedir - VADDR_OFFSET + 768 * 4

    # Set cr3 to the address of the kernel_pagedir.
    movl $(kernel_pagedir - VADDR_OFFSET), %ecx
    movl %ecx, %cr3

    # Enable paging and the write-protect bit.
    movl %cr0, %ecx
    orl $0x80010000, %ecx
    movl %ecx, %cr0

    # Jump to higher half with an absolute jump.
    lea (kinit), %ecx
    jmp *%ecx

.section .text
.extern kernel_constructors
.extern kernel_main
kinit:
    # At this point, paging is fully set up and enabled.
    # Unmap the identity mapping as it is now unnecessary.
    movl $0, kernel_pagedir + 0 * 4
    movl $0, kernel_pagedir + 1 * 4
    # Reload crc3 to force a TLB flush so the changes to take effect.
    movl %cr3, %ecx
    movl %ecx, %cr3

        #mov $stack_bottom, %ebp
	mov $stack_top, %esp    # point the stack pointer to the stack

	/*
	This is a good place to initialize crucial processor state before the
	high-level kernel is entered. It's best to minimize the early
	environment where crucial features are offline. Note that the
	processor is not fully initialized yet: Features such as floating
	point instructions and instruction set extensions are not initialized
	yet. The GDT should be loaded here. Paging should be enabled here.
	C++ features such as global constructors and exceptions will require
	runtime support to work as well.
	*/

        pushl %ebx      # push the pointer to the multiboot structure
        pushl %eax      # push the multiboot magic value
        call kernel_constructors

	/*
	Enter the high-level kernel. The ABI requires the stack is 16-byte
	aligned at the time of the call instruction (which afterwards pushes
	the return pointer of size 4 bytes). The stack was originally 16-byte
	aligned above and we've pushed a multiple of 16 bytes to the
	stack since (pushed 0 bytes so far), so the alignment has thus been
	preserved and the call is well defined.
	*/
	call kernel_main

	/*
	If the system has nothing more to do, put the computer into an
	infinite loop. To do that:
	1) Disable interrupts with cli (clear interrupt enable in eflags).
	   They are already disabled by the bootloader, so this is not needed.
	   Mind that you might later enable interrupts and return from
	   kernel_main (which is sort of nonsensical to do).
	2) Wait for the next interrupt to arrive with hlt (halt instruction).
	   Since they are disabled, this will lock up the computer.
	3) Jump to the hlt instruction if it ever wakes up due to a
	   non-maskable interrupt occurring or due to system management mode.
	*/
	cli
hang:	hlt
	jmp hang