diff --git a/linker.ld b/linker.ld index f7559dd..d87d503 100644 --- a/linker.ld +++ b/linker.ld @@ -30,4 +30,6 @@ SECTIONS . = ALIGN(16); __stack_top = . + 0x10000; + . = ALIGN(0x1000); + PROVIDE(__memory_start = .); } diff --git a/src/Fdt.zig b/src/Fdt.zig index 89250f9..c62a030 100644 --- a/src/Fdt.zig +++ b/src/Fdt.zig @@ -28,7 +28,7 @@ pub const ReserveEntry = extern struct { address: u64, size: u64, - pub fn toNative(self: *const ReserveEntry) struct { address: u64, size: u64 } { + pub fn toNative(self: *const ReserveEntry) packed struct { address: u64, size: u64 } { return .{ .address = std.mem.bigToNative(u64, self.address), .size = std.mem.bigToNative(u64, self.size), @@ -580,7 +580,16 @@ pub fn chosen(self: *const Fdt) ?Node { } pub fn memory(self: *const Fdt) ?Node { - return self.findNode("/memory") orelse self.findFirstCompatible("memory"); + var n = self.nodes(); + while (n.next()) |node| { + if (node.getProperty("device_type")) |dev_type| { + if (std.mem.eql(u8, dev_type.asString().?, "memory")) { + return node; + } + } + } + + return null; } pub fn cpus(self: *const Fdt) ?Node { @@ -626,7 +635,7 @@ pub const MemoryReservationIterator = struct { data: []const u8, pos: usize, - pub fn next(self: *MemoryReservationIterator) ?struct { address: u64, size: u64 } { + pub fn next(self: *MemoryReservationIterator) ?packed struct { address: u64, size: u64 } { if (self.pos + 16 > self.data.len) return null; const entry: *const ReserveEntry = @alignCast(@ptrCast(self.data.ptr + self.pos)); @@ -636,7 +645,7 @@ pub const MemoryReservationIterator = struct { // End marker is all zeros if (result.address == 0 and result.size == 0) return null; - return result; + return @bitCast(result); } }; @@ -652,5 +661,3 @@ pub fn parseReg(data: []const u8, address_cells: u32, size_cells: u32) RegIterat .pos = 0, }; } - - diff --git a/src/debug.zig b/src/debug.zig new file mode 100644 index 0000000..0450d49 --- /dev/null +++ b/src/debug.zig @@ -0,0 +1,11 @@ +const Console = @import("drivers/Console.zig"); + +var console: Console = undefined; + +pub fn init(c: Console) void { + console = c; +} + +pub fn print(comptime s: []const u8, args: anytype) void { + console.print(s, args); +} diff --git a/src/main.zig b/src/main.zig index f99108d..e40ff07 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2,10 +2,11 @@ const std = @import("std"); const isa = @import("riscv/isa.zig"); const Fdt = @import("Fdt.zig"); const Console = @import("drivers/Console.zig"); +const debug = @import("debug.zig"); +const mem = @import("mem.zig"); const UART_BASE: usize = 0x10000000; - -var console: Console = undefined; +const MEMORY_START = @extern([*]u8, .{.name = "__memory_start"}); fn uart_put(c: u8) void { const uart: *volatile u8 = @ptrFromInt(UART_BASE); @@ -32,9 +33,29 @@ export fn kmain(hartid: u64, fdt_ptr: *const anyopaque) callconv(.c) noreturn { while (true) asm volatile ("wfi"); }; - console = Console.init(fdt).?; + const root = fdt.root().?; - console.print("booting hydra...\n", .{}); + const console = Console.init(fdt).?; + debug.init(console); + + debug.print("booting hydra...\n", .{}); + + var reservations = fdt.memoryReservations(); + + while (reservations.next()) |reservation| { + debug.print("0x{x}:0x{x}\n", .{reservation.address, reservation.size}); + } + const memory = fdt.memory().?; + var reg_iter = Fdt.parseReg(memory.getProperty("reg").?.data, root.addressCells(), root.sizeCells()); + const reg = reg_iter.next().?; + const memory_end = reg.address + reg.size; + + var buddy: mem.BuddyAllocator = .{}; + buddy.init(MEMORY_START[0..memory_end - @intFromPtr(MEMORY_START)]); + debug.print("memory allocator initialized.\n", .{}); + + const allocator = buddy.allocator(); + _ = allocator; while (true) { asm volatile ("wfi"); diff --git a/src/mem.zig b/src/mem.zig new file mode 100644 index 0000000..95ea408 --- /dev/null +++ b/src/mem.zig @@ -0,0 +1,3 @@ +pub const BuddyAllocator = @import("mem/BuddyAllocator.zig"); + +pub const PAGE_SIZE = 0x1000; diff --git a/src/mem/BuddyAllocator.zig b/src/mem/BuddyAllocator.zig new file mode 100644 index 0000000..6ae181b --- /dev/null +++ b/src/mem/BuddyAllocator.zig @@ -0,0 +1,141 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const BuddyAllocator = @This(); +const mem = @import("../mem.zig"); +const math = std.math; + +const MIN_BLOCK_SIZE = mem.PAGE_SIZE; +const MAX_LEVEL = 16; + +const Block = struct { + next: ?*Block, + level: u8, +}; + +freelist: [MAX_LEVEL + 1]?*Block = .{null} ** (MAX_LEVEL + 1), + +pub fn init(self: *BuddyAllocator, range: []u8) void { + self.freeRange(range.ptr, range.len); +} + +fn getLevelSize(level: u8) usize { + return @as(usize, MIN_BLOCK_SIZE) << @intCast(level); +} + +pub fn freeRange(self: *BuddyAllocator, start: [*]u8, size: usize) void { + var current_addr = @intFromPtr(start); + const end_addr = current_addr + size; + + current_addr = std.mem.alignForward(usize, current_addr, MIN_BLOCK_SIZE); + + while (current_addr + getLevelSize(0) <= end_addr) { + var level: u8 = MAX_LEVEL; + while (level > 0) : (level -= 1) { + const blk_size = getLevelSize(level); + if (current_addr + blk_size <= end_addr and current_addr % blk_size == 0) { + break; + } + } + + const b = @as(*Block, @ptrFromInt(current_addr)); + b.* = .{ .next = self.freelist[level], .level = level }; + self.freelist[level] = b; + + current_addr += getLevelSize(level); + } +} + +fn getBuddy(block: *Block) *Block { + const addr = @intFromPtr(block); + const size = getLevelSize(block.level); + return @ptrFromInt(addr ^ size); +} + +fn isFree(self: *BuddyAllocator, buddy: *Block, level: u8) bool { + var curr = self.freelist[level]; + while (curr) |node| : (curr = node.next) { + if (node == buddy) return true; + } + return false; +} + +fn allocBlock(self: *BuddyAllocator, level: u8) ?[*]u8 { + if (level > MAX_LEVEL) return null; + + var target_level = level; + while (target_level <= MAX_LEVEL) : (target_level += 1) { + if (self.freelist[target_level]) |block| { + self.freelist[target_level] = block.next; + + var b = block; + while (b.level > level) { + b.level -= 1; + const buddy = getBuddy(b); + buddy.* = .{ .next = self.freelist[b.level], .level = b.level }; + self.freelist[b.level] = buddy; + } + return @ptrCast(b); + } + } + return null; +} + +fn freeBlock(self: *BuddyAllocator, addr: [*]u8, level: u8) void { + var b = @as(*Block, @alignCast(@ptrCast(addr))); + b.level = level; + + while (b.level < MAX_LEVEL) { + const buddy = getBuddy(b); + if (!self.isFree(buddy, b.level)) break; + + var prev: ?*Block = null; + var curr = self.freelist[b.level]; + while (curr) |node| : (curr = node.next) { + if (node == buddy) { + if (prev) |p| p.next = node.next else self.freelist[b.level] = node.next; + break; + } + prev = node; + } + + if (@intFromPtr(buddy) < @intFromPtr(b)) b = buddy; + b.level += 1; + } + + b.next = self.freelist[b.level]; + self.freelist[b.level] = b; +} + +pub fn allocator(self: *BuddyAllocator) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = Allocator.noResize, + .remap = Allocator.noRemap, + .free = free, + }, + }; +} + +fn alloc(ctx: *anyopaque, len: usize, ptr_align: std.mem.Alignment, ret_addr: usize) ?[*]u8 { + _ = ptr_align; + _ = ret_addr; + const self: *BuddyAllocator = @ptrCast(@alignCast(ctx)); + + const actual_size = @max(len, MIN_BLOCK_SIZE); + const level = math.log2_int_ceil(usize, (actual_size + MIN_BLOCK_SIZE - 1) / MIN_BLOCK_SIZE); + + return self.allocBlock(@intCast(level)); +} + +fn free(ctx: *anyopaque, buf: []u8, buf_align: std.mem.Alignment, ret_addr: usize) void { + _ = buf_align; + _ = ret_addr; + const self: *BuddyAllocator = @ptrCast(@alignCast(ctx)); + + const actual_size = @max(buf.len, MIN_BLOCK_SIZE); + const level = math.log2_int_ceil(usize, (actual_size + MIN_BLOCK_SIZE - 1) / MIN_BLOCK_SIZE); + + self.freeBlock(buf.ptr, @intCast(level)); +}