From a29536599343ab284df7ee6dbdb1a3fd6da78fe6 Mon Sep 17 00:00:00 2001 From: luccie-cmd Date: Tue, 5 Aug 2025 22:59:41 +0200 Subject: [PATCH] [MODS/WASM]: Get tables, elems, data and more VM instructions working --- modding/log.zig | 16 ++-- src/mods/Parser.zig | 157 ++++++++++++++++++++++++++++++---- src/mods/ir.zig | 41 ++++++++- src/mods/vm.zig | 200 +++++++++++++++++++++++++++++++++++--------- src/sideros.zig | 31 +++++++ 5 files changed, 381 insertions(+), 64 deletions(-) diff --git a/modding/log.zig b/modding/log.zig index 53298d2..acaa787 100644 --- a/modding/log.zig +++ b/modding/log.zig @@ -1,19 +1,19 @@ pub extern fn logErr( - format: [*:0]const u8, - ..., + string: *const u8, + len: u64, ) void; pub extern fn logWarn( - format: [*:0]const u8, - ..., + string: *const u8, + len: u64, ) void; pub extern fn logInfo( - format: [*:0]const u8, - ..., + string: *const u8, + len: u64, ) void; pub extern fn logDebug( - format: [*:0]const u8, - ..., + string: *const u8, + len: u64, ) void; diff --git a/src/mods/Parser.zig b/src/mods/Parser.zig index 7006638..0a18b2d 100644 --- a/src/mods/Parser.zig +++ b/src/mods/Parser.zig @@ -14,11 +14,14 @@ exports: vm.Exports, importCount: u32, exported_memory: u32, +parsedData: []u8, +tables: []Tabletype, +elems: [][]u32, globalValues: []vm.Value, globalTypes: []Globaltype, const Parser = @This(); -const PAGE_SIZE = 64_000; +const PAGE_SIZE = 65536; pub const Error = error{ OutOfMemory, @@ -41,12 +44,17 @@ pub const Error = error{ duplicated_funcsec, duplicated_typesec, duplicated_globalsec, + duplicated_tablesec, + duplicated_elemsec, unresolved_branch, unterminated_wasm, }; pub fn init(allocator: Allocator, bytes: []const u8) !Parser { return .{ + .elems = &.{}, + .tables = &.{}, + .parsedData = &.{}, .exported_memory = 0, .importCount = 0, .bytes = bytes, @@ -72,12 +80,14 @@ pub fn deinit(self: Parser) void { self.allocator.free(t.returns); } self.allocator.free(self.types); - self.allocator.free(self.functions); } pub fn module(self: *Parser) vm.Module { defer self.functions = &.{}; return .{ + .elems = self.elems, + .tables = self.tables, + .data = self.parsedData, .memory = .{ .min = self.memory.lim.min, .max = self.memory.lim.max, @@ -165,7 +175,7 @@ fn VectorFnResult(parse_fn: anytype) type { else => ret_type, }; } -fn parseVector(self: *Parser, parse_fn: anytype) ![]VectorFnResult(parse_fn) { +pub fn parseVector(self: *Parser, parse_fn: anytype) ![]VectorFnResult(parse_fn) { const n = try self.readU32(); const ret = try self.allocator.alloc(VectorFnResult(parse_fn), n); for (ret) |*i| { @@ -231,12 +241,12 @@ const Limits = struct { fn parseLimits(self: *Parser) !Limits { return switch (try self.readByte()) { 0x00 => .{ - .min = try self.readU32() * PAGE_SIZE, + .min = try self.readU32(), .max = null, }, 0x01 => .{ - .min = try self.readU32() * PAGE_SIZE, - .max = try self.readU32() * PAGE_SIZE, + .min = try self.readU32(), + .max = try self.readU32(), }, else => Error.invalid_limits, }; @@ -249,7 +259,7 @@ fn parseMemtype(self: *Parser) !Memtype { return .{ .lim = try self.parseLimits() }; } -const Tabletype = struct { +pub const Tabletype = struct { et: std.wasm.RefType, lim: Limits, }; @@ -359,7 +369,6 @@ fn parseImportsec(self: *Parser) !void { const end_idx = self.byte_idx + size; const imports = try self.parseVector(Parser.parseImport); - self.importCount = @intCast(imports.len); var index: u32 = 0; @@ -379,9 +388,17 @@ fn parseImportsec(self: *Parser) !void { } index += 1; }, + .mem => { + self.memory = i.importdesc.mem; + self.memory.lim.min *= PAGE_SIZE; + if (self.memory.lim.max != null) { + self.memory.lim.max.? *= PAGE_SIZE; + } + }, else => std.debug.print("[TODO]: Handle import desc {any}\n", .{i.importdesc}), } } + self.importCount = index; defer self.allocator.free(imports); // TODO: run this check not only on debug @@ -414,10 +431,31 @@ fn parseFuncsec(self: *Parser) !void { std.debug.assert(self.byte_idx == end_idx); } +pub const Table = struct { + t: Tabletype, +}; + +fn parseTable(self: *Parser) !Table { + return .{ + .t = try self.parseTabletype() + }; +} + fn parseTablesec(self: *Parser) !void { - self.warn("tablesec"); const size = try self.readU32(); - _ = try self.read(size); + const end_idx = self.byte_idx + size; + + const tables = try self.parseVector(Parser.parseTable); + defer self.allocator.free(tables); + + if (self.tables.len != 0) return Error.duplicated_tablesec; + self.tables = try self.allocator.alloc(Tabletype, tables.len); + + for (tables, 0..) |t, i| { + self.tables[i] = t.t; + } + + std.debug.assert(self.byte_idx == end_idx); } fn parseMemsec(self: *Parser) !void { @@ -430,6 +468,10 @@ fn parseMemsec(self: *Parser) !void { // WTF? } else if (mems.len == 1) { self.memory = mems[0]; + self.memory.lim.min *= PAGE_SIZE; + if (self.memory.lim.max != null) { + self.memory.lim.max.? *= PAGE_SIZE; + } } else { std.debug.print("[WARN]: Parsing more than one memory is not yet supported\n", .{}); } @@ -526,10 +568,71 @@ fn parseStartsec(self: *Parser) !void { _ = try self.read(size); } +const Elemmode = union(enum) { + Passive, + Active: struct { + tableidx: u32, + offset: vm.Value, + }, + Declarative, +}; + +pub const Elem = struct { + indices: []u32, + elemMode: Elemmode, +}; + +fn parseElem(self: *Parser) !Elem { + const b: u32 = try self.readU32(); + switch (b){ + 0 => { + // if (try self.parseReftype() != std.wasm.RefType.funcref){ + // std.debug.panic("Active function index element table was not a function reference\n", .{}); + // } + const elemMode: Elemmode = .{ + .Active = .{ + .tableidx = 0, + .offset = try vm.handleGlobalInit(self.allocator, try IR.parseGlobalExpr(self)), + } + }; + const n = try self.readU32(); + const indices: []u32 = try self.allocator.alloc(u32, n); + for (0..n) |i| { + indices[i] = try self.readU32(); + } + return .{ + .indices = indices, + .elemMode = elemMode, + }; + }, + else => { + std.debug.panic("TODO: Handle elem type {any}\n", .{b}); + } + } +} + fn parseElemsec(self: *Parser) !void { - self.warn("elemsec"); const size = try self.readU32(); - _ = try self.read(size); + const end_idx = self.byte_idx + size; + + const elems = try self.parseVector(Parser.parseElem); + defer self.allocator.free(elems); + + self.elems = try self.allocator.alloc([]u32, elems.len); + + for (elems) |elem| { + if (elem.elemMode != Elemmode.Active){ + std.debug.panic("No support for non active elements\n", .{}); + } + const tab = self.tables[elem.elemMode.Active.tableidx]; + self.elems[elem.elemMode.Active.tableidx] = try self.allocator.alloc(u32, tab.lim.min); + std.crypto.secureZero(u32, self.elems[elem.elemMode.Active.tableidx]); + for (elem.indices, 0..) |idx, i| { + self.elems[elem.elemMode.Active.tableidx][i + @as(usize, @intCast(elem.elemMode.Active.offset.i32))] = idx; + } + } + + std.debug.assert(self.byte_idx == end_idx); } pub const Func = struct { @@ -599,10 +702,36 @@ fn parseCodesec(self: *Parser) !void { std.debug.assert(self.byte_idx == end_idx); } +pub const Data = struct { + offsetVal: vm.Value, + data: []u8, +}; + +fn parseData(self: *Parser) !Data { + const b: u32 = try self.readU32(); + switch (b) { + 0 => { + return .{ + .offsetVal = try vm.handleGlobalInit(self.allocator, try IR.parseGlobalExpr(self)), + .data = try self.parseVector(readByte), + }; + }, + else => { + std.debug.panic("TODO: Handle data type {any}\n", .{b}); + } + } +} + fn parseDatasec(self: *Parser) !void { - self.warn("datasec"); const size = try self.readU32(); - _ = try self.read(size); + const end_idx = self.byte_idx + size; + const datas = try self.parseVector(Parser.parseData); + defer self.allocator.free(datas); + for (datas) |data| { + self.parsedData = try self.allocator.realloc(self.parsedData, @as(usize, @intCast(data.offsetVal.i32)) + data.data.len); + @memcpy(self.parsedData[@as(usize, @intCast(data.offsetVal.i32))..@as(usize, @intCast(data.offsetVal.i32))+data.data.len], data.data); + } + std.debug.assert(self.byte_idx == end_idx); } fn parseDatacountsec(self: *Parser) !void { diff --git a/src/mods/ir.zig b/src/mods/ir.zig index 5ae26b6..07b6cc1 100644 --- a/src/mods/ir.zig +++ b/src/mods/ir.zig @@ -628,10 +628,10 @@ const IRParserState = struct { 0x02...0x03 => self.parseBlock(b), 0x04 => self.parseIf(), 0x0C...0x0D => self.parseBranch(b), - 0x0E => @panic("UNIMPLEMENTED"), + 0x0E => self.parseTableBranch(b), 0x0F => self.push(@enumFromInt(b), .{ .u64 = 0 }), 0x10 => self.push(@enumFromInt(b), .{ .u32 = try self.parser.readU32() }), - 0x11 => @panic("UNIMPLEMENTED"), + 0x11 => self.push(@enumFromInt(b), .{ .indirect = .{ .y = try self.parser.readU32(), .x = try self.parser.readU32() } }), 0xD0 => self.push(@enumFromInt(b), .{ .reftype = try self.parser.parseReftype() }), 0xD1 => self.push(@enumFromInt(b), .{ .u64 = 0 }), 0xD2 => self.push(@enumFromInt(b), .{ .u32 = try self.parser.readU32() }), @@ -683,7 +683,14 @@ const IRParserState = struct { const n = try self.parser.readU32(); try switch (n) { 0...7 => self.push(@enumFromInt(0xD3 + @as(u8, @intCast(n))), .{ .u64 = 0 }), - 8...11 => @panic("UNIMPLEMENTED"), + 8...9 => @panic("UNIMPLEMENTED"), + 10...11 => { + try self.push(@enumFromInt(0xD3 + @as(u8, @intCast(n))), .{ .u64 = 0 }); + _ = try self.parser.readByte(); + if (n == 10) { + _ = try self.parser.readByte(); + } + }, 12...17 => @panic("UNIMPLEMENTED"), else => { std.log.err("Invalid misc instruction {d} at position {d}\n", .{ n, self.parser.byte_idx }); @@ -807,6 +814,18 @@ const IRParserState = struct { try self.push(@enumFromInt(b), .{ .u64 = 0 }); } + fn parseTableBranch(self: *IRParserState, b: u8) !void { + const n = try self.parser.readU32(); + const idxs = try self.allocator.alloc(u32, n); + // defer self.allocator.free(idxs); + for (idxs) |*i| { + i.* = try self.parser.readU32(); + try self.branches.put(self.allocator, @intCast(self.opcodes.items.len), i.*); + } + try self.branches.put(self.allocator, @intCast(self.opcodes.items.len), try self.parser.readU32()); + try self.push(@enumFromInt(b), .{ .u64 = 0 }); + } + fn parseVector(self: *IRParserState) !void { const n = try self.parser.readU32(); try switch (n) { @@ -859,3 +878,19 @@ pub fn parseGlobalExpr(parser: *Parser) !IR { .select_valtypes = &.{}, }; } + +pub fn parseSingleExpr(parser: *Parser) !IR { + var state = IRParserState{ + .opcodes = .{}, + .indices = .{}, + .branches = .{}, + .parser = parser, + .allocator = parser.allocator, + }; + try state.parseExpression(); + return .{ + .opcodes = try state.opcodes.toOwnedSlice(state.allocator), + .indices = try state.indices.toOwnedSlice(state.allocator), + .select_valtypes = &.{}, + }; +} \ No newline at end of file diff --git a/src/mods/vm.zig b/src/mods/vm.zig index b270978..cb1c355 100644 --- a/src/mods/vm.zig +++ b/src/mods/vm.zig @@ -56,11 +56,13 @@ pub const Module = struct { exports: Exports, exported_memory: u32, imported_funcs: u32, + data: []const u8, + tables: []Parser.Tabletype, + elems: [][]u32, pub fn deinit(self: Module, allocator: Allocator) void { // self.exports.deinit(allocator); for (self.functions) |f| { - std.debug.print("Freeing function parameters at {*}\n", .{f.func_type.parameters.ptr}); allocator.free(f.func_type.parameters); allocator.free(f.func_type.returns); switch (f.typ) { @@ -103,6 +105,8 @@ pub const Runtime = struct { std.log.warn("Growing memory is not yet supported, usign the minimum memory\n", .{}); } const memory = try allocator.alloc(u8, max); + std.crypto.secureZero(u8, memory); + @memcpy(memory[0..module.data.len], module.data); return Runtime{ .module = module, .stack = try std.ArrayList(Value).initCapacity(allocator, 10), @@ -112,7 +116,6 @@ pub const Runtime = struct { } pub fn deinit(self: *Runtime, allocator: Allocator) void { - self.module.deinit(allocator); self.stack.deinit(); allocator.free(self.memory); } @@ -131,7 +134,7 @@ pub const Runtime = struct { continue; }, .br_if => { - if (self.stack.pop().?.i32 != 0) { + if (self.stack.items[self.stack.items.len - 1].i32 != 0) { frame.program_counter = index.u32; continue; } @@ -140,13 +143,29 @@ pub const Runtime = struct { .@"return" => break :loop, .call => { if (index.u32 == self.module.exports.logDebug) { - std.debug.print("TODO: logDebug\n", .{}); + const size: usize = @intCast(self.stack.pop().?.i64); + const offset: usize = @intCast(self.stack.pop().?.i32); + const ptr: []u8 = self.memory[offset .. offset + size]; + const extra: u8 = if (ptr.len > 0 and ptr[ptr.len - 1] != '\n') 0x0a else 0; + std.debug.print("[DEBUG]: {s}{c}", .{ptr, extra}); } else if (index.u32 == self.module.exports.logInfo) { - std.debug.print("TODO: logInfo\n", .{}); + const size: usize = @intCast(self.stack.pop().?.i64); + const offset: usize = @intCast(self.stack.pop().?.i32); + const ptr: []u8 = self.memory[offset .. offset + size]; + const extra: u8 = if (ptr.len > 0 and ptr[ptr.len - 1] != '\n') 0x0a else 0; + std.debug.print("[INFO]: {s}{c}", .{ptr, extra}); } else if (index.u32 == self.module.exports.logWarn) { - std.debug.print("TODO: logWarn\n", .{}); + const size: usize = @intCast(self.stack.pop().?.i64); + const offset: usize = @intCast(self.stack.pop().?.i32); + const ptr: []u8 = self.memory[offset .. offset + size]; + const extra: u8 = if (ptr.len > 0 and ptr[ptr.len - 1] != '\n') 0x0a else 0; + std.debug.print("[WARN]: {s}{c}", .{ptr, extra}); } else if (index.u32 == self.module.exports.logErr) { - std.debug.print("TODO: logErr\n", .{}); + const size: usize = @intCast(self.stack.pop().?.i64); + const offset: usize = @intCast(self.stack.pop().?.i32); + const ptr: []u8 = self.memory[offset .. offset + size]; + const extra: u8 = if (ptr.len > 0 and ptr[ptr.len - 1] != '\n') 0x0a else 0; + std.debug.print("[ERROR]: {s}{c}", .{ptr, extra}); } else { var parameters = std.ArrayList(Value).init(allocator); defer parameters.deinit(); @@ -156,7 +175,19 @@ pub const Runtime = struct { try self.call(allocator, index.u32 - self.module.imported_funcs, parameters.items); } }, - .call_indirect => @panic("UNIMPLEMENTED"), + .call_indirect => { + std.debug.panic("call_indirect: {any}\n", .{self.stack.pop().?}); + if (self.module.tables[index.indirect.x].et != std.wasm.RefType.funcref) { + std.debug.panic("Table at index {any} is not a `funcref` table\n", .{index.indirect.x}); + } + const funcIdx = self.module.elems[index.indirect.x][index.indirect.y]; + var parameters = std.ArrayList(Value).init(allocator); + defer parameters.deinit(); + for (self.module.functions[funcIdx - self.module.imported_funcs].func_type.parameters) |_| { + try parameters.append(self.stack.pop().?); + } + try self.call(allocator, funcIdx - self.module.imported_funcs, parameters.items); + }, .refnull => @panic("UNIMPLEMENTED"), .refisnull => @panic("UNIMPLEMENTED"), @@ -194,27 +225,35 @@ pub const Runtime = struct { // TODO(ernesto): This code is repeated... .i32_load => { - const start = index.memarg.alignment + index.memarg.offset; + const offsetVal = self.stack.pop().?.i32; + if (offsetVal < 0) { + std.debug.panic("offsetVal is negative (val: {any} arg: {any})\n", .{ offsetVal, index.memarg }); + } + const start = index.memarg.offset + @as(u32, @intCast(offsetVal)); const end = start + @sizeOf(i32); try self.stack.append(.{ .i32 = std.mem.littleToNative(i32, std.mem.bytesAsValue(i32, self.memory[start..end]).*) }); }, .i64_load => { - const start = index.memarg.alignment + index.memarg.offset; + const start = index.memarg.offset + @as(u32, @intCast(self.stack.pop().?.i32)); const end = start + @sizeOf(i64); try self.stack.append(.{ .i64 = std.mem.littleToNative(i64, std.mem.bytesAsValue(i64, self.memory[start..end]).*) }); }, .f32_load => { - const start = index.memarg.alignment + index.memarg.offset; + const start = index.memarg.offset + @as(u32, @intCast(self.stack.pop().?.i32)); const end = start + @sizeOf(f32); try self.stack.append(.{ .f32 = std.mem.littleToNative(f32, std.mem.bytesAsValue(f32, self.memory[start..end]).*) }); }, .f64_load => { - const start = index.memarg.alignment + index.memarg.offset; + const start = index.memarg.offset + @as(u32, @intCast(self.stack.pop().?.i32)); const end = start + @sizeOf(f64); try self.stack.append(.{ .f64 = std.mem.littleToNative(f64, std.mem.bytesAsValue(f64, self.memory[start..end]).*) }); }, .i32_load8_s => @panic("UNIMPLEMENTED"), - .i32_load8_u => @panic("UNIMPLEMENTED"), + .i32_load8_u => { + const start = index.memarg.offset + @as(u32, @intCast(self.stack.pop().?.i32)); + const end = start + @sizeOf(u8); + try self.stack.append(.{ .i32 = std.mem.littleToNative(u8, std.mem.bytesAsValue(u8, self.memory[start..end]).*) }); + }, .i32_load16_s => @panic("UNIMPLEMENTED"), .i32_load16_u => @panic("UNIMPLEMENTED"), .i64_load8_s => @panic("UNIMPLEMENTED"), @@ -222,24 +261,58 @@ pub const Runtime = struct { .i64_load16_s => @panic("UNIMPLEMENTED"), .i64_load16_u => @panic("UNIMPLEMENTED"), .i64_load32_s => @panic("UNIMPLEMENTED"), - .i64_load32_u => @panic("UNIMPLEMENTED"), - .i32_store => @panic("UNIMPLEMENTED"), - .i64_store => { - // TODO(ernesto): I'm pretty sure this is wrong - const val = std.mem.nativeToLittle(i64, self.stack.pop().?.i64); + .i64_load32_u => { + const start = index.memarg.offset + @as(u32, @intCast(self.stack.pop().?.i32)); + const end = start + @sizeOf(u32); + const raw_value = std.mem.readInt(u32, @as(*const [4]u8, @ptrCast(self.memory[start..end])), std.builtin.Endian.little); + try self.stack.append(.{ .i64 = @intCast(@as(u64, raw_value)) }); // Zero-extend + }, + .i32_store => { + const val = std.mem.nativeToLittle(i32, self.stack.pop().?.i32); const offsetVal = self.stack.pop().?.i32; if (offsetVal < 0) { std.debug.panic("offsetVal is negative (val: {any})\n", .{offsetVal}); } const offset: u64 = @intCast(offsetVal); const start: usize = @intCast(@as(u64, index.memarg.offset) + offset); + const end = start + @sizeOf(u32); + @memcpy(self.memory[start..end], std.mem.asBytes(&val)); + }, + .i64_store => { + const val = std.mem.nativeToLittle(i64, self.stack.pop().?.i64); + const offsetVal = self.stack.pop().?.i32; + if (offsetVal < 0) { + std.debug.panic("offsetVal is negative (val: {any} ip: {any} prev: {any} next: {any})\n", .{ offsetVal, frame.program_counter, frame.code.opcodes[frame.program_counter - 1], frame.code.opcodes[frame.program_counter + 1] }); + } + const offset: u64 = @intCast(offsetVal); + const start: usize = @intCast(@as(u64, index.memarg.offset) + offset); const end = start + @sizeOf(u64); @memcpy(self.memory[start..end], std.mem.asBytes(&val)); }, .f32_store => @panic("UNIMPLEMENTED"), .f64_store => @panic("UNIMPLEMENTED"), - .i32_store8 => @panic("UNIMPLEMENTED"), - .i32_store16 => @panic("UNIMPLEMENTED"), + .i32_store8 => { + const val = std.mem.nativeToLittle(i8, @as(i8, @intCast(self.stack.pop().?.i32))); + const offsetVal = self.stack.pop().?.i32; + if (offsetVal < 0) { + std.debug.panic("offsetVal is negative (val: {any})\n", .{offsetVal}); + } + const offset: u64 = @intCast(offsetVal); + const start: usize = @intCast(@as(u64, index.memarg.offset) + offset); + const end = start + @sizeOf(u8); + @memcpy(self.memory[start..end], std.mem.asBytes(&val)); + }, + .i32_store16 => { + const val = std.mem.nativeToLittle(i16, @as(i16, @intCast(self.stack.pop().?.i32))); + const offsetVal = self.stack.pop().?.i32; + if (offsetVal < 0) { + std.debug.panic("offsetVal is negative (val: {any})\n", .{offsetVal}); + } + const offset: u64 = @intCast(offsetVal); + const start: usize = @intCast(@as(u64, index.memarg.offset) + offset); + const end = start + @sizeOf(u16); + @memcpy(self.memory[start..end], std.mem.asBytes(&val)); + }, .i64_store8 => @panic("UNIMPLEMENTED"), .i64_store16 => @panic("UNIMPLEMENTED"), .i64_store32 => @panic("UNIMPLEMENTED"), @@ -248,8 +321,18 @@ pub const Runtime = struct { .memorygrow => @panic("UNIMPLEMENTED"), .memoryinit => @panic("UNIMPLEMENTED"), .datadrop => @panic("UNIMPLEMENTED"), - .memorycopy => @panic("UNIMPLEMENTED"), - .memoryfill => @panic("UNIMPLEMENTED"), + .memorycopy => { + const bytes: usize = @intCast(self.stack.pop().?.i32); + const source: usize = @intCast(self.stack.pop().?.i32); + const dest: usize = @intCast(self.stack.pop().?.i32); + @memcpy(self.memory[dest .. dest + bytes], self.memory[source .. source + bytes]); + }, + .memoryfill => { + const bytes: usize = @intCast(self.stack.pop().?.i32); + const val: u8 = @as(u8, @intCast(self.stack.pop().?.i32)); + const dest: usize = @intCast(self.stack.pop().?.i32); + @memset(self.memory[dest .. dest + bytes], val); + }, .i32_const => { try self.stack.append(Value{ .i32 = frame.code.indices[frame.program_counter].i32 }); @@ -264,34 +347,46 @@ pub const Runtime = struct { try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(self.stack.pop().?.i32 == 0))) }); }, .i32_eq => @panic("UNIMPLEMENTED"), - .i32_ne => @panic("UNIMPLEMENTED"), + .i32_ne => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(a != b))) }); + }, .i32_lt_s => @panic("UNIMPLEMENTED"), .i32_lt_u => { - const b = self.stack.pop().?.i32; const a = self.stack.pop().?.i32; - try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(a < b))) }); + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(b < a))) }); }, .i32_gt_s => @panic("UNIMPLEMENTED"), - .i32_gt_u => @panic("UNIMPLEMENTED"), + .i32_gt_u => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(b > a))) }); + }, .i32_le_s => @panic("UNIMPLEMENTED"), - .i32_le_u => @panic("UNIMPLEMENTED"), + .i32_le_u => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(b <= a))) }); + }, .i32_ge_s => @panic("UNIMPLEMENTED"), .i32_ge_u => { - const b = self.stack.pop().?.i32; const a = self.stack.pop().?.i32; - try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(a >= b))) }); + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(b >= a))) }); }, .i64_eqz => { - try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(self.stack.pop().?.i32 == 0))) }); + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(self.stack.pop().?.i64 == 0))) }); }, .i64_eq => @panic("UNIMPLEMENTED"), .i64_ne => @panic("UNIMPLEMENTED"), .i64_lt_s => @panic("UNIMPLEMENTED"), .i64_lt_u => { - const b = self.stack.pop().?.i32; - const a = self.stack.pop().?.i32; - try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(a < b))) }); + const a = self.stack.pop().?.i64; + const b = self.stack.pop().?.i64; + try self.stack.append(Value{ .i32 = @intCast(@as(u1, @bitCast(b < a))) }); }, .i64_gt_s => @panic("UNIMPLEMENTED"), .i64_gt_u => @panic("UNIMPLEMENTED"), @@ -323,23 +418,39 @@ pub const Runtime = struct { try self.stack.append(Value{ .i32 = a + b }); }, .i32_sub => { - const b = self.stack.pop().?.i32; const a = self.stack.pop().?.i32; - try self.stack.append(Value{ .i32 = a - b }); + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = b - a }); }, .i32_and => { const a = self.stack.pop().?.i32; const b = self.stack.pop().?.i32; try self.stack.append(Value{ .i32 = a & b }); }, - .i32_mul => @panic("UNIMPLEMENTED"), + .i32_mul => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = a * b }); + }, .i32_div_s => @panic("UNIMPLEMENTED"), - .i32_div_u => @panic("UNIMPLEMENTED"), + .i32_div_u => { + const a_unsigned = @as(u32, @bitCast(self.stack.pop().?.i32)); + const b_unsigned = @as(u32, @bitCast(self.stack.pop().?.i32)); + try self.stack.append(Value{ .i32 = @bitCast(b_unsigned / a_unsigned) }); + }, .i32_rem_s => @panic("UNIMPLEMENTED"), .i32_rem_u => @panic("UNIMPLEMENTED"), - .i32_or => @panic("UNIMPLEMENTED"), + .i32_or => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = a | b }); + }, .i32_xor => @panic("UNIMPLEMENTED"), - .i32_shl => @panic("UNIMPLEMENTED"), + .i32_shl => { + const a = self.stack.pop().?.i32; + const b = self.stack.pop().?.i32; + try self.stack.append(Value{ .i32 = (b << @as(u5, @intCast(a))) }); + }, .i32_shr_s => @panic("UNIMPLEMENTED"), .i32_shr_u => @panic("UNIMPLEMENTED"), .i32_rotl => @panic("UNIMPLEMENTED"), @@ -463,8 +574,19 @@ pub const Runtime = struct { } } + fn reverseSlice(slice: []Value) void { + var i: usize = 0; + var j = slice.len - 1; + while (i < j) { + std.mem.swap(Value, &slice[i], &slice[j]); + i += 1; + j -= 1; + } + } + pub fn call(self: *Runtime, allocator: Allocator, function: usize, parameters: []Value) AllocationError!void { const f = self.module.functions[function]; + reverseSlice(parameters); switch (f.typ) { .internal => { const ir: IR = f.typ.internal.ir; diff --git a/src/sideros.zig b/src/sideros.zig index 44f334b..68e869b 100644 --- a/src/sideros.zig +++ b/src/sideros.zig @@ -1,5 +1,6 @@ pub const ecs = @import("ecs"); pub const Renderer = @import("renderer"); +pub const mods = @import("mods"); const api = @cImport({ @cInclude("sideros_api.h"); @@ -12,6 +13,35 @@ const allocator = gpa.allocator(); var pool: ecs.Pool = undefined; var renderer: Renderer = undefined; +fn init_mods() void { + var global_runtime = mods.GlobalRuntime.init(allocator); + defer global_runtime.deinit(); + + // const file = std.fs.cwd().openFile("assets/core.wasm", .{}) catch @panic("Couldn't open assets/core.wasm"); + const file = std.fs.cwd().openFile("./test.wasm", .{}) catch @panic("Couldn't open test.wasm"); + const all = file.readToEndAlloc(allocator, 1_000_000) catch @panic("Unable to read the file"); // 1 MB + defer allocator.free(all); + var parser = mods.Parser.init(allocator, all) catch @panic("Failed to init parser"); + defer parser.deinit(); + parser.parseModule() catch |err| { + std.debug.panic("[ERROR]: error {any} at byte {x}(0x{x})\n", .{ err, parser.byte_idx, parser.bytes[parser.byte_idx] }); + }; + const module = parser.module(); + defer module.deinit(allocator); + + for (0..parser.globalTypes.len) |i| { + global_runtime.addGlobal(@intCast(i), parser.globalTypes[i], parser.globalValues[i]) catch @panic("Failed to add runtime global"); + } + + var runtime = mods.Runtime.init(allocator, module, &global_runtime) catch @panic("Failed to init runtime"); + defer runtime.deinit(allocator); + + var parameters = [_]mods.VM.Value{.{ .i32 = 17 }}; + runtime.callExternal(allocator, .preinit, ¶meters) catch @panic("Failed to call to preinit"); + const result = runtime.stack.pop().?; + std.debug.print("Result of preinit: {any}\n", .{result}); +} + export fn sideros_init(init: api.GameInit) callconv(.c) void { pool = ecs.Pool.init(allocator, .{ .camera = .{ @@ -26,6 +56,7 @@ export fn sideros_init(init: api.GameInit) callconv(.c) void { pool.addSystemGroup(&[_]ecs.System{Renderer.render}, true) catch @panic("TODO: Gracefuly handle error"); pool.resources.renderer = &renderer; pool.tick(); + // init_mods(); } export fn sideros_update(gameUpdate: api.GameUpdate) callconv(.c) void {