From 38aa431e03b19897a7c3c8505f66f4305d8c29b2 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 26 May 2022 19:48:50 +0700 Subject: [PATCH 01/20] stage2: sparc64: Fix CompareOperator <-> ICondition mapping --- src/arch/sparc64/bits.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index f4226b49da9b..27c4a79c7d8e 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -644,7 +644,7 @@ pub const Instruction = union(enum) { .gt => .gu, .neq => .ne, .lt => .cs, - .lte => .le, + .lte => .leu, .eq => .eq, }; } From 093332c02ed828c6c24d6c7e113ae2804ba7e6f3 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 26 May 2022 20:20:48 +0700 Subject: [PATCH 02/20] stage2: sparc64: Implement condition code spilling --- src/arch/sparc64/CodeGen.zig | 88 ++++++++++++++++++++++++++++-------- src/arch/sparc64/Emit.zig | 2 + src/arch/sparc64/Mir.zig | 20 ++++++++ 3 files changed, 90 insertions(+), 20 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index eb9d9a4ad9c2..42dfc882546c 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -131,12 +131,18 @@ const MCValue = union(enum) { stack_offset: u32, /// The value is a pointer to one of the stack variables (payload is stack offset). ptr_stack_offset: u32, - /// The value is in the compare flags assuming an unsigned operation, - /// with this operator applied on top of it. - compare_flags_unsigned: math.CompareOperator, - /// The value is in the compare flags assuming a signed operation, - /// with this operator applied on top of it. - compare_flags_signed: math.CompareOperator, + /// The value is in the specified CCR assuming an unsigned operation, + /// with the operator applied on top of it. + compare_flags_unsigned: struct { + cmp: math.CompareOperator, + ccr: Instruction.CCR, + }, + /// The value is in the specified CCR assuming an signed operation, + /// with the operator applied on top of it. + compare_flags_signed: struct { + cmp: math.CompareOperator, + ccr: Instruction.CCR, + }, fn isMemory(mcv: MCValue) bool { return switch (mcv) { @@ -1086,8 +1092,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { self.compare_flags_inst = inst; break :result switch (int_info.signedness) { - .signed => MCValue{ .compare_flags_signed = op }, - .unsigned => MCValue{ .compare_flags_unsigned = op }, + .signed => MCValue{ .compare_flags_signed = .{ .cmp = op, .ccr = .xcc } }, + .unsigned => MCValue{ .compare_flags_unsigned = .{ .cmp = op, .ccr = .xcc } }, }; } else { return self.fail("TODO SPARCv9 cmp for ints > 64 bits", .{}); @@ -1113,16 +1119,20 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { .tag = .bpcc, .data = .{ .branch_predict_int = .{ - .ccr = .xcc, + .ccr = switch (cond) { + .compare_flags_signed => |cmp_op| cmp_op.ccr, + .compare_flags_unsigned => |cmp_op| cmp_op.ccr, + else => unreachable, + }, .cond = switch (cond) { .compare_flags_signed => |cmp_op| blk: { // Here we map to the opposite condition because the jump is to the false branch. - const condition = Instruction.ICondition.fromCompareOperatorSigned(cmp_op); + const condition = Instruction.ICondition.fromCompareOperatorSigned(cmp_op.cmp); break :blk condition.negate(); }, .compare_flags_unsigned => |cmp_op| blk: { // Here we map to the opposite condition because the jump is to the false branch. - const condition = Instruction.ICondition.fromCompareOperatorUnsigned(cmp_op); + const condition = Instruction.ICondition.fromCompareOperatorUnsigned(cmp_op.cmp); break :blk condition.negate(); }, else => unreachable, @@ -2290,8 +2300,47 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. - .compare_flags_signed => return self.fail("TODO: genSetReg for compare_flags_signed", .{}), - .compare_flags_unsigned => return self.fail("TODO: genSetReg for compare_flags_unsigned", .{}), + .compare_flags_signed, + .compare_flags_unsigned, + => { + const condition = switch (mcv) { + .compare_flags_unsigned => |op| Instruction.ICondition.fromCompareOperatorUnsigned(op.cmp), + .compare_flags_signed => |op| Instruction.ICondition.fromCompareOperatorSigned(op.cmp), + else => unreachable, + }; + + const ccr = switch (mcv) { + .compare_flags_unsigned => |op| op.ccr, + .compare_flags_signed => |op| op.ccr, + else => unreachable, + }; + // TODO handle floating point CCRs + assert(ccr == .xcc or ccr == .icc); + + _ = try self.addInst(.{ + .tag = .mov, + .data = .{ + .arithmetic_2op = .{ + .is_imm = false, + .rs1 = reg, + .rs2_or_imm = .{ .rs2 = .g0 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .movcc, + .data = .{ + .conditional_move = .{ + .ccr = ccr, + .cond = .{ .icond = condition }, + .is_imm = true, + .rd = reg, + .rs2_or_imm = .{ .imm = 1 }, + }, + }, + }); + }, .undef => { if (!self.wantSafety()) return; // The already existing value will do just fine. @@ -2644,7 +2693,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { } }, }); - return MCValue{ .compare_flags_unsigned = .gt }; + return MCValue{ .compare_flags_unsigned = .{ .cmp = .gt, .ccr = .xcc } }; } else { return self.fail("TODO isErr for errors with size > 8", .{}); } @@ -2658,8 +2707,8 @@ fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const is_err_result = try self.isErr(ty, operand); switch (is_err_result) { .compare_flags_unsigned => |op| { - assert(op == .gt); - return MCValue{ .compare_flags_unsigned = .lte }; + assert(op.cmp == .gt); + return MCValue{ .compare_flags_unsigned = .{ .cmp = .gt, .ccr = op.ccr } }; }, .immediate => |imm| { assert(imm == 0); @@ -3014,14 +3063,13 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { const mcv = self.getResolvedInstValue(inst_to_save); - switch (mcv) { + const new_mcv = switch (mcv) { .compare_flags_signed, .compare_flags_unsigned, - => {}, + => try self.allocRegOrMem(inst_to_save, true), else => unreachable, // mcv doesn't occupy the compare flags - } + }; - const new_mcv = try self.allocRegOrMem(inst_to_save, true); try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 1b40cc6e215b..55de28d4b355 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -94,6 +94,8 @@ pub fn emitMir( .@"or" => try emit.mirArithmetic3Op(inst), + .movcc => @panic("TODO implement sparc64 movcc"), + .mulx => try emit.mirArithmetic3Op(inst), .nop => try emit.mirNop(), diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index 2ef66a1fa4da..dada29ac18aa 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -74,6 +74,10 @@ pub const Inst = struct { // TODO add other operations. @"or", + /// A.35 Move Integer Register on Condition (MOVcc) + /// This uses the conditional_move field. + movcc, + /// A.37 Multiply and Divide (64-bit) /// This uses the arithmetic_3op field. // TODO add other operations. @@ -216,6 +220,22 @@ pub const Inst = struct { inst: Index, }, + /// Conditional move. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. + /// + /// Used by e.g. movcc + conditional_move: struct { + is_imm: bool, + ccr: Instruction.CCR, + cond: Instruction.Condition, + rd: Register, + rs2_or_imm: union { + rs2: Register, + imm: i11, + }, + }, + /// No additional data /// /// Used by e.g. flushw From 3d662cfaf4579cb01546c3e5123eece70fd60b9a Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 27 May 2022 19:53:16 +0700 Subject: [PATCH 03/20] stage2: sparc64: Implement airAddSubOverflow --- src/arch/sparc64/CodeGen.zig | 150 ++++++++++++++++++++++++++++++++--- src/arch/sparc64/Emit.zig | 1 + src/arch/sparc64/Mir.zig | 1 + 3 files changed, 143 insertions(+), 9 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 42dfc882546c..088ee8dcdb8b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -123,6 +123,16 @@ const MCValue = union(enum) { immediate: u64, /// The value is in a target-specific register. register: Register, + /// The value is a tuple { wrapped, overflow } where + /// wrapped is stored in the register and the overflow bit is + /// stored in the C (signed) or V (unsigned) flag of the CCR. + /// + /// This MCValue is only generated by a add_with_overflow or + /// sub_with_overflow instruction operating on 32- or 64-bit values. + register_with_overflow: struct { + reg: Register, + flag: struct { cond: Instruction.ICondition, ccr: Instruction.CCR }, + }, /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, @@ -525,8 +535,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .trunc_float, => @panic("TODO try self.airUnaryMath(inst)"), - .add_with_overflow => @panic("TODO try self.airAddWithOverflow(inst)"), - .sub_with_overflow => @panic("TODO try self.airSubWithOverflow(inst)"), + .add_with_overflow => try self.airAddSubWithOverflow(inst), + .sub_with_overflow => try self.airAddSubWithOverflow(inst), .mul_with_overflow => @panic("TODO try self.airMulWithOverflow(inst)"), .shl_with_overflow => @panic("TODO try self.airShlWithOverflow(inst)"), @@ -684,6 +694,88 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } } +fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const tag = self.air.instructions.items(.tag)[inst]; + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + const lhs_ty = self.air.typeOf(extra.lhs); + const rhs_ty = self.air.typeOf(extra.rhs); + + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + switch (int_info.bits) { + 32, 64 => { + // Only say yes if the operation is + // commutative, i.e. we can swap both of the + // operands + const lhs_immediate_ok = switch (tag) { + .add_with_overflow => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12), + .sub_with_overflow => false, + else => unreachable, + }; + const rhs_immediate_ok = switch (tag) { + .add_with_overflow, + .sub_with_overflow, + => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), + else => unreachable, + }; + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add_with_overflow => .addcc, + .sub_with_overflow => .subcc, + else => unreachable, + }; + + try self.spillCompareFlagsIfOccupied(); + self.compare_flags_inst = inst; + + const dest = blk: { + if (rhs_immediate_ok) { + break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null); + } else { + break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null); + } + }; + + const cond = switch (int_info.signedness) { + .unsigned => switch (tag) { + .add_with_overflow => Instruction.ICondition.cs, + .sub_with_overflow => Instruction.ICondition.cc, + else => unreachable, + }, + .signed => Instruction.ICondition.vs, + }; + + const ccr = switch (int_info.bits) { + 32 => Instruction.CCR.icc, + 64 => Instruction.CCR.xcc, + else => unreachable, + }; + + break :result MCValue{ .register_with_overflow = .{ + .reg = dest.register, + .flag = .{ .cond = cond, .ccr = ccr }, + } }; + }, + else => return self.fail("TODO overflow operations on other integer sizes", .{}), + } + }, + else => unreachable, + } + }; + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); +} + fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); @@ -955,13 +1047,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. switch (mc_arg) { .none => continue, - .undef => unreachable, - .immediate => unreachable, - .unreach => unreachable, - .dead => unreachable, - .memory => unreachable, - .compare_flags_signed => unreachable, - .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); try self.genSetReg(arg_ty, reg, arg_mcv); @@ -972,6 +1057,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, + else => unreachable, } } @@ -1894,6 +1980,7 @@ fn binOpImmediate( const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, + .addcc, .mulx, .subcc, => .{ @@ -2010,6 +2097,7 @@ fn binOpRegister( const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, + .addcc, .mulx, .subcc, => .{ @@ -2473,6 +2561,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, }); }, + .register_with_overflow => unreachable, .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. @@ -2519,6 +2608,47 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro return self.fail("TODO larger stack offsets", .{}); return self.genStore(reg, .sp, i13, simm13, abi_size); }, + .register_with_overflow => |rwo| { + const reg_lock = self.register_manager.lockReg(rwo.reg); + defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); + + const wrapped_ty = ty.structFieldType(0); + try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); + + const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const cond_reg = try self.register_manager.allocReg(null, gp); + + // TODO handle floating point CCRs + assert(rwo.flag.ccr == .xcc or rwo.flag.ccr == .icc); + + _ = try self.addInst(.{ + .tag = .mov, + .data = .{ + .arithmetic_2op = .{ + .is_imm = false, + .rs1 = cond_reg, + .rs2_or_imm = .{ .rs2 = .g0 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .movcc, + .data = .{ + .conditional_move = .{ + .ccr = rwo.flag.ccr, + .cond = .{ .icond = rwo.flag.cond }, + .is_imm = true, + .rd = cond_reg, + .rs2_or_imm = .{ .imm = 1 }, + }, + }, + }); + try self.genSetStack(overflow_bit_ty, stack_offset - overflow_bit_offset, .{ + .register = cond_reg, + }); + }, .memory, .stack_offset => { switch (mcv) { .stack_offset => |off| { @@ -2760,6 +2890,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .dead => unreachable, .compare_flags_unsigned, .compare_flags_signed, + .register_with_overflow, => unreachable, // cannot hold an address .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }), .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }), @@ -3100,6 +3231,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .dead => unreachable, .compare_flags_unsigned, .compare_flags_signed, + .register_with_overflow, => unreachable, // cannot hold an address .immediate => |imm| { try self.setRegOrMem(value_ty, .{ .memory = imm }, value); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 55de28d4b355..2383e6c14601 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -79,6 +79,7 @@ pub fn emitMir( .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), .add => try emit.mirArithmetic3Op(inst), + .addcc => @panic("TODO implement sparc64 addcc"), .bpr => try emit.mirConditionalBranch(inst), .bpcc => try emit.mirConditionalBranch(inst), diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index dada29ac18aa..36849eb48aad 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -42,6 +42,7 @@ pub const Inst = struct { /// This uses the arithmetic_3op field. // TODO add other operations. add, + addcc, /// A.3 Branch on Integer Register with Prediction (BPr) /// This uses the branch_predict_reg field. From 9db81fee5df6abcf1eb1212ecc005d22f7b2ca32 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 27 May 2022 23:00:11 +0700 Subject: [PATCH 04/20] stage2: sparc64: Implement airStructFieldVal --- src/arch/sparc64/CodeGen.zig | 92 +++++++++++++++++++++++++++++++++--- 1 file changed, 85 insertions(+), 7 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 088ee8dcdb8b..f63995cf5c15 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -592,7 +592,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst), .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), - .struct_field_val=> @panic("TODO try self.airStructFieldVal(inst)"), + .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => @panic("TODO try self.airArrayToSlice(inst)"), .int_to_float => @panic("TODO try self.airIntToFloat(inst)"), .float_to_int => @panic("TODO try self.airFloatToInt(inst)"), @@ -1598,6 +1598,75 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + const operand = extra.struct_operand; + const index = extra.field_index; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mcv = try self.resolveInst(operand); + const struct_ty = self.air.typeOf(operand); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + + switch (mcv) { + .dead, .unreach => unreachable, + .stack_offset => |off| { + break :result MCValue{ .stack_offset = off - struct_field_offset }; + }, + .memory => |addr| { + break :result MCValue{ .memory = addr + struct_field_offset }; + }, + .register_with_overflow => |rwo| { + switch (index) { + 0 => { + // get wrapped value: return register + break :result MCValue{ .register = rwo.reg }; + }, + 1 => { + // TODO return special MCValue condition flags + // get overflow bit: set register to C flag + // resp. V flag + const dest_reg = try self.register_manager.allocReg(null, gp); + + // TODO handle floating point CCRs + assert(rwo.flag.ccr == .xcc or rwo.flag.ccr == .icc); + + _ = try self.addInst(.{ + .tag = .mov, + .data = .{ + .arithmetic_2op = .{ + .is_imm = false, + .rs1 = dest_reg, + .rs2_or_imm = .{ .rs2 = .g0 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .movcc, + .data = .{ + .conditional_move = .{ + .ccr = rwo.flag.ccr, + .cond = .{ .icond = rwo.flag.cond }, + .is_imm = true, + .rd = dest_reg, + .rs2_or_imm = .{ .imm = 1 }, + }, + }, + }); + + break :result MCValue{ .register = dest_reg }; + }, + else => unreachable, + } + }, + else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}), + } + }; + + return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); +} + fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { _ = self; _ = inst; @@ -1664,8 +1733,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); - self.next_stack_offset = offset + abi_size; + const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; try self.stack.putNoClobber(self.gpa, offset, .{ @@ -2436,8 +2505,9 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .ptr_stack_offset => |off| { - const simm13 = math.cast(u12, off + abi.stack_bias + abi.stack_reserved_area) orelse - return self.fail("TODO larger stack offsets", .{}); + const real_offset = off + abi.stack_bias + abi.stack_reserved_area; + const simm13 = math.cast(i13, real_offset) orelse + return self.fail("TODO larger stack offsets: {}", .{real_offset}); _ = try self.addInst(.{ .tag = .add, @@ -2571,7 +2641,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .stack_offset => |off| { const real_offset = off + abi.stack_bias + abi.stack_reserved_area; const simm13 = math.cast(i13, real_offset) orelse - return self.fail("TODO larger stack offsets", .{}); + return self.fail("TODO larger stack offsets: {}", .{real_offset}); try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); }, } @@ -2605,7 +2675,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .register => |reg| { const real_offset = stack_offset + abi.stack_bias + abi.stack_reserved_area; const simm13 = math.cast(i13, real_offset) orelse - return self.fail("TODO larger stack offsets", .{}); + return self.fail("TODO larger stack offsets: {}", .{real_offset}); return self.genStore(reg, .sp, i13, simm13, abi_size); }, .register_with_overflow => |rwo| { @@ -3198,6 +3268,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { .compare_flags_signed, .compare_flags_unsigned, => try self.allocRegOrMem(inst_to_save, true), + .register_with_overflow => try self.allocRegOrMem(inst_to_save, false), else => unreachable, // mcv doesn't occupy the compare flags }; @@ -3208,6 +3279,13 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { try branch.inst_table.put(self.gpa, inst_to_save, new_mcv); self.compare_flags_inst = null; + + // TODO consolidate with register manager and spillInstruction + // this call should really belong in the register manager! + switch (mcv) { + .register_with_overflow => |rwo| self.register_manager.freeReg(rwo.reg), + else => {}, + } } } From 3220e0b61c127b60188a040b5f4679765e8f6f40 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sat, 28 May 2022 09:05:57 +0700 Subject: [PATCH 05/20] stage2: sparc64: Proper handling of compare flags --- src/arch/sparc64/CodeGen.zig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f63995cf5c15..a4c6635b4c09 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2029,6 +2029,7 @@ fn binOpImmediate( defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { + .cmp => undefined, // cmp has no destination register else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand( md.inst, @@ -2148,6 +2149,7 @@ fn binOpRegister( defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { + .cmp => undefined, // cmp has no destination register else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { break :blk lhs_reg; @@ -3064,6 +3066,10 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void { .register => |reg| { self.register_manager.freeReg(reg); }, + .register_with_overflow => |rwo| { + self.register_manager.freeReg(rwo.reg); + self.compare_flags_inst = null; + }, .compare_flags_signed, .compare_flags_unsigned => { self.compare_flags_inst = null; }, From 89b4195c69c53e8b1fdd3f5ddd2c9658a61daaac Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sat, 28 May 2022 09:06:25 +0700 Subject: [PATCH 06/20] stage2: sparc64: Account for delay slot in airBlock --- src/arch/sparc64/CodeGen.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a4c6635b4c09..bc73ea02b33e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -991,9 +991,14 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { const relocs = &self.blocks.getPtr(inst).?.relocs; if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) { // If the last Mir instruction is the last relocation (which - // would just jump one instruction further), it can be safely + // would just jump two instruction further), it can be safely // removed - self.mir_instructions.orderedRemove(relocs.pop()); + const index = relocs.pop(); + + // First, remove the delay slot, then remove + // the branch instruction itself. + self.mir_instructions.orderedRemove(index + 1); + self.mir_instructions.orderedRemove(index); } for (relocs.items) |reloc| { try self.performReloc(reloc); From 2dfe307d6003f36279742621b8da29a162a7149e Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 08:45:51 +0700 Subject: [PATCH 07/20] stage2: sparc64: Some bookkeeping fixes --- src/arch/sparc64/CodeGen.zig | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index bc73ea02b33e..616ff45b43e7 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -686,6 +686,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { // zig fmt: on } + assert(!self.register_manager.lockedRegsExist()); + if (std.debug.runtime_safety) { if (self.air_bookkeeping < old_air_bookkeeping + 1) { std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); @@ -2915,7 +2917,7 @@ fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { switch (is_err_result) { .compare_flags_unsigned => |op| { assert(op.cmp == .gt); - return MCValue{ .compare_flags_unsigned = .{ .cmp = .gt, .ccr = op.ccr } }; + return MCValue{ .compare_flags_unsigned = .{ .cmp = .lte, .ccr = op.ccr } }; }, .immediate => |imm| { assert(imm == 0); @@ -3176,7 +3178,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBits() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -3184,7 +3186,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBits() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); From 97c43afefe2938119b0c4d14032f9e4b41926716 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 08:58:14 +0700 Subject: [PATCH 08/20] stage2: sparc64: Spill CCR before doing calls --- src/arch/sparc64/CodeGen.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 616ff45b43e7..5af1e856c889 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1047,6 +1047,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. var info = try self.resolveCallingConventionValues(fn_ty, .caller); defer info.deinit(self); + + // CCR is volatile across function calls + // (SCD 2.4.1, page 3P-10) + try self.spillCompareFlagsIfOccupied(); + for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; const arg_ty = self.air.typeOf(arg); From 9ad74b60874447b81747651e28246d3f2168940a Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 09:41:40 +0700 Subject: [PATCH 09/20] stage2: sparc64: Implement SPARCv9 addcc and movcc --- src/arch/sparc64/Emit.zig | 34 ++++++++++++++++++++++++++++++++-- src/arch/sparc64/bits.zig | 16 ++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 2383e6c14601..44c14752e411 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -79,7 +79,7 @@ pub fn emitMir( .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), .add => try emit.mirArithmetic3Op(inst), - .addcc => @panic("TODO implement sparc64 addcc"), + .addcc => try emit.mirArithmetic3Op(inst), .bpr => try emit.mirConditionalBranch(inst), .bpcc => try emit.mirConditionalBranch(inst), @@ -95,7 +95,7 @@ pub fn emitMir( .@"or" => try emit.mirArithmetic3Op(inst), - .movcc => @panic("TODO implement sparc64 movcc"), + .movcc => try emit.mirConditionalMove(inst), .mulx => try emit.mirArithmetic3Op(inst), @@ -212,6 +212,7 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { const imm = data.rs2_or_imm.imm; switch (tag) { .add => try emit.writeInstruction(Instruction.add(i13, rs1, imm, rd)), + .addcc => try emit.writeInstruction(Instruction.addcc(i13, rs1, imm, rd)), .jmpl => try emit.writeInstruction(Instruction.jmpl(i13, rs1, imm, rd)), .ldub => try emit.writeInstruction(Instruction.ldub(i13, rs1, imm, rd)), .lduh => try emit.writeInstruction(Instruction.lduh(i13, rs1, imm, rd)), @@ -233,6 +234,7 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { const rs2 = data.rs2_or_imm.rs2; switch (tag) { .add => try emit.writeInstruction(Instruction.add(Register, rs1, rs2, rd)), + .addcc => try emit.writeInstruction(Instruction.addcc(Register, rs1, rs2, rd)), .jmpl => try emit.writeInstruction(Instruction.jmpl(Register, rs1, rs2, rd)), .ldub => try emit.writeInstruction(Instruction.ldub(Register, rs1, rs2, rd)), .lduh => try emit.writeInstruction(Instruction.lduh(Register, rs1, rs2, rd)), @@ -297,6 +299,34 @@ fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + + switch (tag) { + .movcc => { + const data = emit.mir.instructions.items(.data)[inst].conditional_move; + if (data.is_imm) { + try emit.writeInstruction(Instruction.movcc( + i11, + data.cond, + data.ccr, + data.rs2_or_imm.imm, + data.rd, + )); + } else { + try emit.writeInstruction(Instruction.movcc( + Register, + data.cond, + data.ccr, + data.rs2_or_imm.rs2, + data.rd, + )); + } + }, + else => unreachable, + } +} + fn mirNop(emit: *Emit) !void { try emit.writeInstruction(Instruction.nop()); } diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 27c4a79c7d8e..615c224ae7b7 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -1141,6 +1141,14 @@ pub const Instruction = union(enum) { }; } + pub fn addcc(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b10, 0b01_0000, rs1, rs2, rd), + i13 => format3b(0b10, 0b01_0000, rs1, rs2, rd), + else => unreachable, + }; + } + pub fn bpcc(cond: ICondition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction { return format2c(0b001, .{ .icond = cond }, annul, pt, ccr, disp); } @@ -1197,6 +1205,14 @@ pub const Instruction = union(enum) { }; } + pub fn movcc(comptime s2: type, cond: Condition, ccr: CCR, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format4c(0b10_1100, cond, ccr, rs2, rd), + i11 => format4d(0b10_1100, cond, ccr, rs2, rd), + else => unreachable, + }; + } + pub fn mulx(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { return switch (s2) { Register => format3a(0b10, 0b00_1001, rs1, rs2, rd), From 23150de9c41330351bc5f7b62418617d5714203d Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 19:13:43 +0700 Subject: [PATCH 10/20] stage2: sparc64: Implement airNot --- src/arch/sparc64/CodeGen.zig | 122 ++++++++++++++++++++++++++++++++++- src/arch/sparc64/Emit.zig | 4 ++ src/arch/sparc64/Mir.zig | 9 +++ 3 files changed, 134 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 5af1e856c889..8e1e3678a938 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -586,7 +586,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .is_err_ptr => @panic("TODO try self.airIsErrPtr(inst)"), .load => try self.airLoad(inst), .loop => try self.airLoop(inst), - .not => @panic("TODO try self.airNot(inst)"), + .not => try self.airNot(inst), .ptrtoint => @panic("TODO try self.airPtrToInt(inst)"), .ret => try self.airRet(inst), .ret_load => try self.airRetLoad(inst), @@ -1507,6 +1507,126 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirBookkeeping(); } +fn airNot(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); + switch (operand) { + .dead => unreachable, + .unreach => unreachable, + .compare_flags_unsigned => |op| { + const r = MCValue{ + .compare_flags_unsigned = .{ + .cmp = switch (op.cmp) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + .ccr = op.ccr, + }, + }; + break :result r; + }, + .compare_flags_signed => |op| { + const r = MCValue{ + .compare_flags_signed = .{ + .cmp = switch (op.cmp) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + .ccr = op.ccr, + }, + }; + break :result r; + }, + else => { + switch (operand_ty.zigTypeTag()) { + .Bool => { + // TODO convert this to mvn + and + const op_reg = switch (operand) { + .register => |r| r, + else => try self.copyToTmpRegister(operand_ty, operand), + }; + const reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); + defer self.register_manager.unlockReg(reg_lock); + + const dest_reg = blk: { + if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { + break :blk op_reg; + } + + const reg = try self.register_manager.allocReg(null, gp); + break :blk reg; + }; + + _ = try self.addInst(.{ + .tag = .xor, + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = dest_reg, + .rs1 = op_reg, + .rs2_or_imm = .{ .imm = 1 }, + }, + }, + }); + + break :result MCValue{ .register = dest_reg }; + }, + .Vector => return self.fail("TODO bitwise not for vectors", .{}), + .Int => { + const int_info = operand_ty.intInfo(self.target.*); + if (int_info.bits <= 64) { + const op_reg = switch (operand) { + .register => |r| r, + else => try self.copyToTmpRegister(operand_ty, operand), + }; + const reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); + defer self.register_manager.unlockReg(reg_lock); + + const dest_reg = blk: { + if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { + break :blk op_reg; + } + + const reg = try self.register_manager.allocReg(null, gp); + break :blk reg; + }; + + _ = try self.addInst(.{ + .tag = .not, + .data = .{ + .arithmetic_2op = .{ + .is_imm = false, + .rs1 = dest_reg, + .rs2_or_imm = .{ .rs2 = op_reg }, + }, + }, + }); + + try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits); + + break :result MCValue{ .register = dest_reg }; + } else { + return self.fail("TODO AArch64 not on integers > u64/i64", .{}); + } + }, + else => unreachable, + } + }, + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 44c14752e411..2fcb5935855a 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -94,6 +94,8 @@ pub fn emitMir( .ldx => try emit.mirArithmetic3Op(inst), .@"or" => try emit.mirArithmetic3Op(inst), + .xor => @panic("TODO implement sparc64 xor"), + .xnor => @panic("TODO implement sparc64 xnor"), .movcc => try emit.mirConditionalMove(inst), @@ -128,6 +130,8 @@ pub fn emitMir( .cmp => try emit.mirArithmetic2Op(inst), .mov => try emit.mirArithmetic2Op(inst), + + .not => @panic("TODO implement sparc64 not"), } } } diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index 36849eb48aad..14867dde3060 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -74,6 +74,8 @@ pub const Inst = struct { /// This uses the arithmetic_3op field. // TODO add other operations. @"or", + xor, + xnor, /// A.35 Move Integer Register on Condition (MOVcc) /// This uses the conditional_move field. @@ -147,6 +149,13 @@ pub const Inst = struct { /// being the *destination* register. // TODO is it okay to abuse rs1 in this way? mov, // mov rs2/imm, rs1 -> or %g0, rs2/imm, rs1 + + /// Bitwise negation + /// This uses the arithmetic_2op field, with rs1 + /// being the *destination* register. + // TODO is it okay to abuse rs1 in this way? + // TODO this differs from official encoding for convenience, fix it later + not, // not rs2/imm, rs1 -> xnor %g0, rs2/imm, rs1 }; /// The position of an MIR instruction within the `Mir` instructions array. From 97f9bf7e90ab10119b8225799590886e911775db Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 19:20:40 +0700 Subject: [PATCH 11/20] stage2: sparc64: Add BPr relocation to performReloc --- src/arch/sparc64/CodeGen.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 8e1e3678a938..8279c8e12326 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3182,6 +3182,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { .bpcc => self.mir_instructions.items(.data)[inst].branch_predict_int.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + .bpr => self.mir_instructions.items(.data)[inst].branch_predict_reg.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), else => unreachable, } } From 4d50e52c37cad45bd3d8fd520359cdaee70aaf1f Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 19:29:09 +0700 Subject: [PATCH 12/20] stage2: sparc64: Implement SPARCv9 xor, xnor, & not --- src/arch/sparc64/Emit.zig | 12 +++++++++--- src/arch/sparc64/bits.zig | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 2fcb5935855a..5a082f163aed 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -94,8 +94,8 @@ pub fn emitMir( .ldx => try emit.mirArithmetic3Op(inst), .@"or" => try emit.mirArithmetic3Op(inst), - .xor => @panic("TODO implement sparc64 xor"), - .xnor => @panic("TODO implement sparc64 xnor"), + .xor => try emit.mirArithmetic3Op(inst), + .xnor => try emit.mirArithmetic3Op(inst), .movcc => try emit.mirConditionalMove(inst), @@ -131,7 +131,7 @@ pub fn emitMir( .mov => try emit.mirArithmetic2Op(inst), - .not => @panic("TODO implement sparc64 not"), + .not => try emit.mirArithmetic2Op(inst), } } } @@ -192,6 +192,7 @@ fn mirArithmetic2Op(emit: *Emit, inst: Mir.Inst.Index) !void { .@"return" => try emit.writeInstruction(Instruction.@"return"(i13, rs1, imm)), .cmp => try emit.writeInstruction(Instruction.subcc(i13, rs1, imm, .g0)), .mov => try emit.writeInstruction(Instruction.@"or"(i13, .g0, imm, rs1)), + .not => try emit.writeInstruction(Instruction.xnor(i13, .g0, imm, rs1)), else => unreachable, } } else { @@ -200,6 +201,7 @@ fn mirArithmetic2Op(emit: *Emit, inst: Mir.Inst.Index) !void { .@"return" => try emit.writeInstruction(Instruction.@"return"(Register, rs1, rs2)), .cmp => try emit.writeInstruction(Instruction.subcc(Register, rs1, rs2, .g0)), .mov => try emit.writeInstruction(Instruction.@"or"(Register, .g0, rs2, rs1)), + .not => try emit.writeInstruction(Instruction.xnor(Register, .g0, rs2, rs1)), else => unreachable, } } @@ -223,6 +225,8 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { .lduw => try emit.writeInstruction(Instruction.lduw(i13, rs1, imm, rd)), .ldx => try emit.writeInstruction(Instruction.ldx(i13, rs1, imm, rd)), .@"or" => try emit.writeInstruction(Instruction.@"or"(i13, rs1, imm, rd)), + .xor => try emit.writeInstruction(Instruction.xor(i13, rs1, imm, rd)), + .xnor => try emit.writeInstruction(Instruction.xnor(i13, rs1, imm, rd)), .mulx => try emit.writeInstruction(Instruction.mulx(i13, rs1, imm, rd)), .save => try emit.writeInstruction(Instruction.save(i13, rs1, imm, rd)), .restore => try emit.writeInstruction(Instruction.restore(i13, rs1, imm, rd)), @@ -245,6 +249,8 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { .lduw => try emit.writeInstruction(Instruction.lduw(Register, rs1, rs2, rd)), .ldx => try emit.writeInstruction(Instruction.ldx(Register, rs1, rs2, rd)), .@"or" => try emit.writeInstruction(Instruction.@"or"(Register, rs1, rs2, rd)), + .xor => try emit.writeInstruction(Instruction.xor(Register, rs1, rs2, rd)), + .xnor => try emit.writeInstruction(Instruction.xnor(Register, rs1, rs2, rd)), .mulx => try emit.writeInstruction(Instruction.mulx(Register, rs1, rs2, rd)), .save => try emit.writeInstruction(Instruction.save(Register, rs1, rs2, rd)), .restore => try emit.writeInstruction(Instruction.restore(Register, rs1, rs2, rd)), diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 615c224ae7b7..7b0342e2a323 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -1205,6 +1205,22 @@ pub const Instruction = union(enum) { }; } + pub fn xor(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b10, 0b00_0011, rs1, rs2, rd), + i13 => format3b(0b10, 0b00_0011, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn xnor(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b10, 0b00_0111, rs1, rs2, rd), + i13 => format3b(0b10, 0b00_0111, rs1, rs2, rd), + else => unreachable, + }; + } + pub fn movcc(comptime s2: type, cond: Condition, ccr: CCR, rs2: s2, rd: Register) Instruction { return switch (s2) { Register => format4c(0b10_1100, cond, ccr, rs2, rd), From 8b70abfcc66e375d71acf1e753d3589e38ba1a3d Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 19:32:14 +0700 Subject: [PATCH 13/20] stage2: sparc64: Fix & optimize 64-bit truncRegister --- src/arch/sparc64/CodeGen.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 8279c8e12326..a195da231842 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3569,11 +3569,13 @@ fn truncRegister( }); }, 64 => { + if (dest_reg == operand_reg) + return; // Copy register to itself; nothing to do. _ = try self.addInst(.{ .tag = .mov, .data = .{ .arithmetic_2op = .{ - .is_imm = true, + .is_imm = false, .rs1 = dest_reg, .rs2_or_imm = .{ .rs2 = operand_reg }, }, From 5d61f32887c553415215ceadf44cba1bebc7b9da Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 1 Jun 2022 19:54:22 +0700 Subject: [PATCH 14/20] stage2: sparc64: Implement airSlice --- src/arch/sparc64/CodeGen.zig | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a195da231842..3accb2b6482b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -517,7 +517,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .shl_sat => @panic("TODO try self.airShlSat(inst)"), .min => @panic("TODO try self.airMin(inst)"), .max => @panic("TODO try self.airMax(inst)"), - .slice => @panic("TODO try self.airSlice(inst)"), + .slice => try self.airSlice(inst), .sqrt, .sin, @@ -1647,6 +1647,26 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } +fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); + const len = try self.resolveInst(bin_op.rhs); + const len_ty = self.air.typeOf(bin_op.rhs); + + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes = @divExact(ptr_bits, 8); + + const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); + try self.genSetStack(ptr_ty, stack_offset, ptr); + try self.genSetStack(len_ty, stack_offset - ptr_bytes, len); + break :result MCValue{ .stack_offset = stack_offset }; + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +} + fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; From c00d493a0001cbc9ebc0c02197ae70d921242b22 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 2 Jun 2022 23:28:11 +0700 Subject: [PATCH 15/20] stage2: sparc64: Add some notes about stack space allocation --- src/arch/sparc64/abi.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/arch/sparc64/abi.zig b/src/arch/sparc64/abi.zig index 6cdd183b36a0..a26ab9a20b7d 100644 --- a/src/arch/sparc64/abi.zig +++ b/src/arch/sparc64/abi.zig @@ -13,6 +13,11 @@ pub const stack_bias = 2047; // The first 128 bytes of the stack is reserved for register saving purposes. // The ABI also requires to reserve space in the stack for the first six // outgoing arguments, even though they are usually passed in registers. +// TODO Don't allocate the argument space in leaf functions +// TODO Save an RO copy of outgoing arguments in reserved area when building in Debug +// TODO Should we also save it in ReleaseSafe? Solaris and OpenBSD binaries seem to ship +// with argument copying enabled and it doesn't seem to give them big slowdowns so +// I guess it would be okay to do in ReleaseSafe? pub const stack_reserved_area = 128 + 48; // There are no callee-preserved registers since the windowing From 31f24dbc5544fbebbd6259ee272aa9a7244b4d87 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 2 Jun 2022 23:34:21 +0700 Subject: [PATCH 16/20] stage2: sparc64: Implement airWrapErrUnionErr --- src/arch/sparc64/CodeGen.zig | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3accb2b6482b..0cab96d609af 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -679,7 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .wrap_optional => @panic("TODO try self.airWrapOptional(inst)"), .wrap_errunion_payload => @panic("TODO try self.airWrapErrUnionPayload(inst)"), - .wrap_errunion_err => @panic("TODO try self.airWrapErrUnionErr(inst)"), + .wrap_errunion_err => try self.airWrapErrUnionErr(inst), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, @@ -1851,6 +1851,20 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +/// E to E!T +fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_ty = self.air.getRefType(ty_op.ty); + const payload_ty = error_union_ty.errorUnionPayload(); + const mcv = try self.resolveInst(ty_op.operand); + if (!payload_ty.hasRuntimeBits()) break :result mcv; + + return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + // Common helper functions /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, From f87dd285bbbea90ee186550e5ca64b743b05451d Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 2 Jun 2022 23:45:52 +0700 Subject: [PATCH 17/20] stage2: sparc64: binOp/mul: Use template from `add` --- src/arch/sparc64/CodeGen.zig | 41 ++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0cab96d609af..2b6945810c04 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2036,21 +2036,34 @@ fn binOp( assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { - // If LHS is immediate, then swap it with RHS. - const lhs_is_imm = lhs == .immediate; - const new_lhs = if (lhs_is_imm) rhs else lhs; - const new_rhs = if (lhs_is_imm) lhs else rhs; - const new_lhs_ty = if (lhs_is_imm) rhs_ty else lhs_ty; - const new_rhs_ty = if (lhs_is_imm) lhs_ty else rhs_ty; - - // At this point, RHS might be an immediate - // If it's a power of two immediate then we emit an shl instead - // TODO add similar checks for LHS - if (new_rhs == .immediate and math.isPowerOfTwo(new_rhs.immediate)) { - return try self.binOp(.shl, new_lhs, .{ .immediate = math.log2(new_rhs.immediate) }, new_lhs_ty, Type.usize, metadata); - } + // Only say yes if the operation is + // commutative, i.e. we can swap both of the + // operands + const lhs_immediate_ok = switch (tag) { + .mul => lhs == .immediate and lhs.immediate <= std.math.maxInt(u12), + else => unreachable, + }; + const rhs_immediate_ok = switch (tag) { + .mul => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), + else => unreachable, + }; - return try self.binOpRegister(.mulx, new_lhs, new_rhs, new_lhs_ty, new_rhs_ty, metadata); + const mir_tag: Mir.Inst.Tag = switch (tag) { + .mul => .mulx, + else => unreachable, + }; + + if (rhs_immediate_ok) { + // At this point, rhs is an immediate + return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + // At this point, lhs is an immediate + return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); + } else { + // TODO convert large immediates to register before adding + return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); + } } else { return self.fail("TODO binary operations on int with bits > 64", .{}); } From f6eb83c91cce2419a01f217b9d4c989e08e6e729 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 6 Jun 2022 06:09:01 +0700 Subject: [PATCH 18/20] stage2: sparc64: Implement airArrayToSlice --- src/arch/sparc64/CodeGen.zig | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 2b6945810c04..c02d279631bd 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -593,7 +593,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .store => try self.airStore(inst), .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), .struct_field_val=> try self.airStructFieldVal(inst), - .array_to_slice => @panic("TODO try self.airArrayToSlice(inst)"), + .array_to_slice => try self.airArrayToSlice(inst), .int_to_float => @panic("TODO try self.airIntToFloat(inst)"), .float_to_int => @panic("TODO try self.airFloatToInt(inst)"), .cmpxchg_strong => @panic("TODO try self.airCmpxchg(inst)"), @@ -783,6 +783,25 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } +fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr = try self.resolveInst(ty_op.operand); + const array_ty = ptr_ty.childType(); + const array_len = @intCast(u32, array_ty.arrayLen()); + + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes = @divExact(ptr_bits, 8); + + const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); + try self.genSetStack(ptr_ty, stack_offset, ptr); + try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + break :result MCValue{ .stack_offset = stack_offset }; + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); From ec7f2a105fa2b9c0d7a89e17f467c27008098c50 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 6 Jun 2022 06:09:16 +0700 Subject: [PATCH 19/20] stage2: sparc64: Implement airPtrElemPtr --- src/arch/sparc64/CodeGen.zig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c02d279631bd..c0d76d5b9ba4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -660,7 +660,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice_elem_val => try self.airSliceElemVal(inst), .slice_elem_ptr => @panic("TODO try self.airSliceElemPtr(inst)"), .ptr_elem_val => @panic("TODO try self.airPtrElemVal(inst)"), - .ptr_elem_ptr => @panic("TODO try self.airPtrElemPtr(inst)"), + .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies @@ -1646,6 +1646,13 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_ptr for {}", .{self.target.cpu.arch}); + return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); +} + fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); From 97d35a5147bb88c5b9ccf26c62bf303162fbf613 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 6 Jun 2022 06:30:08 +0700 Subject: [PATCH 20/20] behaviortest: Skip 'align(N) on functions' on sparc64 for now --- test/behavior/align.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/test/behavior/align.zig b/test/behavior/align.zig index ad35db21718f..056354f23772 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -505,6 +505,7 @@ test "align(N) on functions" { if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;