Skip to content

Commit

Permalink
all: zig fmt and rename "@xtoy" to "@YFromX"
Browse files Browse the repository at this point in the history
Signed-off-by: Eric Joldasov <bratishkaerik@getgoogleoff.me>
  • Loading branch information
BratishkaErik authored and andrewrk committed Jun 19, 2023
1 parent a6c8ee5 commit 50339f5
Show file tree
Hide file tree
Showing 665 changed files with 6,214 additions and 5,899 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_to_int.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_from_float.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdisf.zig"
Expand Down Expand Up @@ -417,7 +417,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/getf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/gexf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_to_float.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_from_int.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
Expand Down
2 changes: 1 addition & 1 deletion build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
.cpu_arch = .wasm32,
.os_tag = .wasi,
};
target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory));
target.cpu_features_add.addFeature(@intFromEnum(std.Target.wasm.Feature.bulk_memory));

const exe = addCompilerStep(b, .ReleaseSmall, target);

Expand Down
150 changes: 75 additions & 75 deletions doc/langref.html.in

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions lib/compiler_rt.zig
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ comptime {
_ = @import("compiler_rt/trunctfdf2.zig");
_ = @import("compiler_rt/trunctfxf2.zig");

_ = @import("compiler_rt/float_to_int.zig");
_ = @import("compiler_rt/int_from_float.zig");
_ = @import("compiler_rt/fixhfsi.zig");
_ = @import("compiler_rt/fixhfdi.zig");
_ = @import("compiler_rt/fixhfti.zig");
Expand Down Expand Up @@ -87,7 +87,7 @@ comptime {
_ = @import("compiler_rt/fixunsxfdi.zig");
_ = @import("compiler_rt/fixunsxfti.zig");

_ = @import("compiler_rt/int_to_float.zig");
_ = @import("compiler_rt/float_from_int.zig");
_ = @import("compiler_rt/floatsihf.zig");
_ = @import("compiler_rt/floatsisf.zig");
_ = @import("compiler_rt/floatsidf.zig");
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler_rt/aarch64_outline_atomics.zig
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ const always_has_lse = std.Target.aarch64.featureSetHas(builtin.cpu.features, .l
/// It is intentionally not exported in order to make the machine code that
/// uses it a statically predicted direct branch rather than using the PLT,
/// which ARM is concerned would have too much overhead.
var __aarch64_have_lse_atomics: u8 = @boolToInt(always_has_lse);
var __aarch64_have_lse_atomics: u8 = @intFromBool(always_has_lse);

fn __aarch64_cas1_relax() align(16) callconv(.Naked) void {
@setRuntimeSafety(false);
Expand Down
22 changes: 11 additions & 11 deletions lib/compiler_rt/atomics.zig
Original file line number Diff line number Diff line change
Expand Up @@ -119,21 +119,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};

fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(src));
var sl = spinlocks.get(@intFromPtr(src));
defer sl.release();
@memcpy(dest[0..size], src);
}

fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(dest));
var sl = spinlocks.get(@intFromPtr(dest));
defer sl.release();
@memcpy(dest[0..size], src);
}

fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(ptr));
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();
@memcpy(old[0..size], ptr);
@memcpy(ptr[0..size], val);
Expand All @@ -149,7 +149,7 @@ fn __atomic_compare_exchange(
) callconv(.C) i32 {
_ = success;
_ = failure;
var sl = spinlocks.get(@ptrToInt(ptr));
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();
for (ptr[0..size], 0..) |b, i| {
if (expected[i] != b) break;
Expand All @@ -168,7 +168,7 @@ fn __atomic_compare_exchange(
inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(src));
var sl = spinlocks.get(@intFromPtr(src));
defer sl.release();
return src.*;
} else {
Expand Down Expand Up @@ -199,7 +199,7 @@ fn __atomic_load_16(src: *u128, model: i32) callconv(.C) u128 {
inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(dst));
var sl = spinlocks.get(@intFromPtr(dst));
defer sl.release();
dst.* = value;
} else {
Expand Down Expand Up @@ -230,9 +230,9 @@ fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.C) void {
fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8);

const addr = @ptrToInt(ptr);
const addr = @intFromPtr(ptr);
const wide_addr = addr & ~(@as(T, smallest_atomic_fetch_exch_size) - 1);
const wide_ptr = @alignCast(smallest_atomic_fetch_exch_size, @intToPtr(*WideAtomic, wide_addr));
const wide_ptr = @alignCast(smallest_atomic_fetch_exch_size, @ptrFromInt(*WideAtomic, wide_addr));

const inner_offset = addr & (@as(T, smallest_atomic_fetch_exch_size) - 1);
const inner_shift = @intCast(std.math.Log2Int(T), inner_offset * 8);
Expand All @@ -255,7 +255,7 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();
const value = ptr.*;
ptr.* = val;
Expand Down Expand Up @@ -305,7 +305,7 @@ inline fn atomic_compare_exchange_N(
_ = success;
_ = failure;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();
const value = ptr.*;
if (value == expected.*) {
Expand Down Expand Up @@ -362,7 +362,7 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
};

if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
var sl = spinlocks.get(@intFromPtr(ptr));
defer sl.release();

const value = ptr.*;
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler_rt/clear_cache.zig
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
.addr = start,
.len = end - start,
};
const result = sysarch(ARM_SYNC_ICACHE, @ptrToInt(&arg));
const result = sysarch(ARM_SYNC_ICACHE, @intFromPtr(&arg));
std.debug.assert(result == 0);
exportIt();
},
Expand Down
8 changes: 4 additions & 4 deletions lib/compiler_rt/cmpdf2.zig
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ comptime {
/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
/// and `__ltdf2`.
fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f64, comparef.LE, a, b));
return @intFromEnum(comparef.cmpf2(f64, comparef.LE, a, b));
}

/// "These functions return a value less than or equal to zero if neither argument is NaN,
Expand Down Expand Up @@ -56,13 +56,13 @@ pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
}

fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
}

fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
}

fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
return @intFromBool(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
}
2 changes: 1 addition & 1 deletion lib/compiler_rt/cmphf2.zig
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ comptime {
/// Note that this matches the definition of `__lehf2`, `__eqhf2`, `__nehf2`, `__cmphf2`,
/// and `__lthf2`.
fn __cmphf2(a: f16, b: f16) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f16, comparef.LE, a, b));
return @intFromEnum(comparef.cmpf2(f16, comparef.LE, a, b));
}

/// "These functions return a value less than or equal to zero if neither argument is NaN,
Expand Down
8 changes: 4 additions & 4 deletions lib/compiler_rt/cmpsf2.zig
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ comptime {
/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
/// and `__ltsf2`.
fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f32, comparef.LE, a, b));
return @intFromEnum(comparef.cmpf2(f32, comparef.LE, a, b));
}

/// "These functions return a value less than or equal to zero if neither argument is NaN,
Expand Down Expand Up @@ -56,13 +56,13 @@ pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
}

fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
}

fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
}

fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
return @intFromBool(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
}
16 changes: 8 additions & 8 deletions lib/compiler_rt/cmptf2.zig
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ comptime {
/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
/// and `__lttf2`.
fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f128, comparef.LE, a, b));
return @intFromEnum(comparef.cmpf2(f128, comparef.LE, a, b));
}

/// "These functions return a value less than or equal to zero if neither argument is NaN,
Expand Down Expand Up @@ -71,34 +71,34 @@ const SparcFCMP = enum(i32) {
};

fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
return @intFromEnum(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
}

fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
}

fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
}

fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Less;
return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Less;
}

fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
return @enumFromInt(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
}

fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) {
.Equal, .Greater => true,
.Less, .Unordered => false,
};
}

fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
return switch (@enumFromInt(SparcFCMP, _Qp_cmp(a, b))) {
.Equal, .Less => true,
.Greater, .Unordered => false,
};
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler_rt/cmpxf2.zig
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ comptime {
/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
/// and `__ltxf2`.
fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
return @enumToInt(comparef.cmp_f80(comparef.LE, a, b));
return @intFromEnum(comparef.cmp_f80(comparef.LE, a, b));
}

/// "These functions return a value less than or equal to zero if neither argument is NaN,
Expand Down
4 changes: 2 additions & 2 deletions lib/compiler_rt/comparef.zig
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
return .Equal;

if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
if (@intFromBool(a_rep.exp == b_rep.exp) & @intFromBool(a_rep.fraction == b_rep.fraction) != 0) {
return .Equal;
} else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
// signs are different
Expand Down Expand Up @@ -109,7 +109,7 @@ pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
const bAbs: rep_t = @bitCast(rep_t, b) & absMask;

return @boolToInt(aAbs > infRep or bAbs > infRep);
return @intFromBool(aAbs > infRep or bAbs > infRep);
}

test {
Expand Down
4 changes: 2 additions & 2 deletions lib/compiler_rt/divdf3.zig
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ inline fn div(a: f64, b: f64) f64 {
} else if (writtenExponent < 1) {
if (writtenExponent == 0) {
// Check whether the rounded result is normal.
const round = @boolToInt((residual << 1) > bSignificand);
const round = @intFromBool((residual << 1) > bSignificand);
// Clear the implicit bit.
var absResult = quotient & significandMask;
// Round.
Expand All @@ -213,7 +213,7 @@ inline fn div(a: f64, b: f64) f64 {
// code to round them correctly.
return @bitCast(f64, quotientSign);
} else {
const round = @boolToInt((residual << 1) > bSignificand);
const round = @intFromBool((residual << 1) > bSignificand);
// Clear the implicit bit
var absResult = quotient & significandMask;
// Insert the exponent
Expand Down
4 changes: 2 additions & 2 deletions lib/compiler_rt/divsf3.zig
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ inline fn div(a: f32, b: f32) f32 {
} else if (writtenExponent < 1) {
if (writtenExponent == 0) {
// Check whether the rounded result is normal.
const round = @boolToInt((residual << 1) > bSignificand);
const round = @intFromBool((residual << 1) > bSignificand);
// Clear the implicit bit.
var absResult = quotient & significandMask;
// Round.
Expand All @@ -193,7 +193,7 @@ inline fn div(a: f32, b: f32) f32 {
// code to round them correctly.
return @bitCast(f32, quotientSign);
} else {
const round = @boolToInt((residual << 1) > bSignificand);
const round = @intFromBool((residual << 1) > bSignificand);
// Clear the implicit bit
var absResult = quotient & significandMask;
// Insert the exponent
Expand Down
4 changes: 2 additions & 2 deletions lib/compiler_rt/divtf3.zig
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ inline fn div(a: f128, b: f128) f128 {
} else if (writtenExponent < 1) {
if (writtenExponent == 0) {
// Check whether the rounded result is normal.
const round = @boolToInt((residual << 1) > bSignificand);
const round = @intFromBool((residual << 1) > bSignificand);
// Clear the implicit bit.
var absResult = quotient & significandMask;
// Round.
Expand All @@ -228,7 +228,7 @@ inline fn div(a: f128, b: f128) f128 {
// code to round them correctly.
return @bitCast(f128, quotientSign);
} else {
const round = @boolToInt((residual << 1) >= bSignificand);
const round = @intFromBool((residual << 1) >= bSignificand);
// Clear the implicit bit
var absResult = quotient & significandMask;
// Insert the exponent
Expand Down
2 changes: 1 addition & 1 deletion lib/compiler_rt/divxf3.zig
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
// code to round them correctly.
return @bitCast(T, quotientSign);
} else {
const round = @boolToInt(residual > (bSignificand >> 1));
const round = @intFromBool(residual > (bSignificand >> 1));
// Insert the exponent
var absResult = quotient | (@intCast(Z, writtenExponent) << significandBits);
// Round
Expand Down
8 changes: 4 additions & 4 deletions lib/compiler_rt/exp.zig
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,12 @@ pub fn expf(x_: f32) callconv(.C) f32 {
if (hx > 0x3EB17218) {
// |x| > 1.5 * ln2
if (hx > 0x3F851592) {
k = @floatToInt(i32, invln2 * x + half[@intCast(usize, sign)]);
k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]);
} else {
k = 1 - sign - sign;
}

const fk = @intToFloat(f32, k);
const fk = @floatFromInt(f32, k);
hi = x - fk * ln2hi;
lo = fk * ln2lo;
x = hi - lo;
Expand Down Expand Up @@ -157,12 +157,12 @@ pub fn exp(x_: f64) callconv(.C) f64 {
if (hx > 0x3FD62E42) {
// |x| >= 1.5 * ln2
if (hx > 0x3FF0A2B2) {
k = @floatToInt(i32, invln2 * x + half[@intCast(usize, sign)]);
k = @intFromFloat(i32, invln2 * x + half[@intCast(usize, sign)]);
} else {
k = 1 - sign - sign;
}

const dk = @intToFloat(f64, k);
const dk = @floatFromInt(f64, k);
hi = x - dk * ln2hi;
lo = dk * ln2lo;
x = hi - lo;
Expand Down
Loading

0 comments on commit 50339f5

Please sign in to comment.