Skip to content

Commit 13d4f0e

Browse files
authored
remove hand-coded methods for (U)Int128 on 32 bit systems (#53867)
I believe this code existed to work around bugs that LLVM used to have with 128 bit numbers on 32 bit systems, but I'm not entirely sure.
1 parent a82a28f commit 13d4f0e

File tree

1 file changed

+6
-158
lines changed

1 file changed

+6
-158
lines changed

base/int.jl

Lines changed: 6 additions & 158 deletions
Original file line numberDiff line numberDiff line change
@@ -843,166 +843,14 @@ widemul(x::Bool,y::Number) = x * y
843843
widemul(x::Number,y::Bool) = x * y
844844

845845

846-
## wide multiplication, Int128 multiply and divide ##
847-
848-
if Core.sizeof(Int) == 4
849-
function widemul(u::Int64, v::Int64)
850-
local u0::UInt64, v0::UInt64, w0::UInt64
851-
local u1::Int64, v1::Int64, w1::UInt64, w2::Int64, t::UInt64
852-
853-
u0 = u & 0xffffffff; u1 = u >> 32
854-
v0 = v & 0xffffffff; v1 = v >> 32
855-
w0 = u0 * v0
856-
t = reinterpret(UInt64, u1) * v0 + (w0 >>> 32)
857-
w2 = reinterpret(Int64, t) >> 32
858-
w1 = u0 * reinterpret(UInt64, v1) + (t & 0xffffffff)
859-
hi = u1 * v1 + w2 + (reinterpret(Int64, w1) >> 32)
860-
lo = w0 & 0xffffffff + (w1 << 32)
861-
return Int128(hi) << 64 + Int128(lo)
862-
end
863-
864-
function widemul(u::UInt64, v::UInt64)
865-
local u0::UInt64, v0::UInt64, w0::UInt64
866-
local u1::UInt64, v1::UInt64, w1::UInt64, w2::UInt64, t::UInt64
867-
868-
u0 = u & 0xffffffff; u1 = u >>> 32
869-
v0 = v & 0xffffffff; v1 = v >>> 32
870-
w0 = u0 * v0
871-
t = u1 * v0 + (w0 >>> 32)
872-
w2 = t >>> 32
873-
w1 = u0 * v1 + (t & 0xffffffff)
874-
hi = u1 * v1 + w2 + (w1 >>> 32)
875-
lo = w0 & 0xffffffff + (w1 << 32)
876-
return UInt128(hi) << 64 + UInt128(lo)
877-
end
878-
879-
function *(u::Int128, v::Int128)
880-
u0 = u % UInt64; u1 = Int64(u >> 64)
881-
v0 = v % UInt64; v1 = Int64(v >> 64)
882-
lolo = widemul(u0, v0)
883-
lohi = widemul(reinterpret(Int64, u0), v1)
884-
hilo = widemul(u1, reinterpret(Int64, v0))
885-
t = reinterpret(UInt128, hilo) + (lolo >>> 64)
886-
w1 = reinterpret(UInt128, lohi) + (t & 0xffffffffffffffff)
887-
return Int128(lolo & 0xffffffffffffffff) + reinterpret(Int128, w1) << 64
888-
end
889-
890-
function *(u::UInt128, v::UInt128)
891-
u0 = u % UInt64; u1 = UInt64(u>>>64)
892-
v0 = v % UInt64; v1 = UInt64(v>>>64)
893-
lolo = widemul(u0, v0)
894-
lohi = widemul(u0, v1)
895-
hilo = widemul(u1, v0)
896-
t = hilo + (lolo >>> 64)
897-
w1 = lohi + (t & 0xffffffffffffffff)
898-
return (lolo & 0xffffffffffffffff) + UInt128(w1) << 64
899-
end
900-
901-
function _setbit(x::UInt128, i)
902-
# faster version of `return x | (UInt128(1) << i)`
903-
j = i >> 5
904-
y = UInt128(one(UInt32) << (i & 0x1f))
905-
if j == 0
906-
return x | y
907-
elseif j == 1
908-
return x | (y << 32)
909-
elseif j == 2
910-
return x | (y << 64)
911-
elseif j == 3
912-
return x | (y << 96)
913-
end
914-
return x
915-
end
846+
# Int128 multiply and divide
847+
*(x::T, y::T) where {T<:Union{Int128,UInt128}} = mul_int(x, y)
916848

917-
function divrem(x::UInt128, y::UInt128)
918-
iszero(y) && throw(DivideError())
919-
if (x >> 64) % UInt64 == 0
920-
if (y >> 64) % UInt64 == 0
921-
# fast path: upper 64 bits are zero, so we can fallback to UInt64 division
922-
q64, x64 = divrem(x % UInt64, y % UInt64)
923-
return UInt128(q64), UInt128(x64)
924-
else
925-
# this implies y>x, so
926-
return zero(UInt128), x
927-
end
928-
end
929-
n = leading_zeros(y) - leading_zeros(x)
930-
q = zero(UInt128)
931-
ys = y << n
932-
while n >= 0
933-
# ys == y * 2^n
934-
if ys <= x
935-
x -= ys
936-
q = _setbit(q, n)
937-
if (x >> 64) % UInt64 == 0
938-
# exit early, similar to above fast path
939-
if (y >> 64) % UInt64 == 0
940-
q64, x64 = divrem(x % UInt64, y % UInt64)
941-
q |= q64
942-
x = UInt128(x64)
943-
end
944-
return q, x
945-
end
946-
end
947-
ys >>>= 1
948-
n -= 1
949-
end
950-
return q, x
951-
end
849+
div(x::Int128, y::Int128) = checked_sdiv_int(x, y)
850+
div(x::UInt128, y::UInt128) = checked_udiv_int(x, y)
952851

953-
function div(x::Int128, y::Int128)
954-
(x == typemin(Int128)) & (y == -1) && throw(DivideError())
955-
return Int128(div(BigInt(x), BigInt(y)))::Int128
956-
end
957-
div(x::UInt128, y::UInt128) = divrem(x, y)[1]
958-
959-
function rem(x::Int128, y::Int128)
960-
return Int128(rem(BigInt(x), BigInt(y)))::Int128
961-
end
962-
963-
function rem(x::UInt128, y::UInt128)
964-
iszero(y) && throw(DivideError())
965-
if (x >> 64) % UInt64 == 0
966-
if (y >> 64) % UInt64 == 0
967-
# fast path: upper 64 bits are zero, so we can fallback to UInt64 division
968-
return UInt128(rem(x % UInt64, y % UInt64))
969-
else
970-
# this implies y>x, so
971-
return x
972-
end
973-
end
974-
n = leading_zeros(y) - leading_zeros(x)
975-
ys = y << n
976-
while n >= 0
977-
# ys == y * 2^n
978-
if ys <= x
979-
x -= ys
980-
if (x >> 64) % UInt64 == 0
981-
# exit early, similar to above fast path
982-
if (y >> 64) % UInt64 == 0
983-
x = UInt128(rem(x % UInt64, y % UInt64))
984-
end
985-
return x
986-
end
987-
end
988-
ys >>>= 1
989-
n -= 1
990-
end
991-
return x
992-
end
993-
994-
function mod(x::Int128, y::Int128)
995-
return Int128(mod(BigInt(x), BigInt(y)))::Int128
996-
end
997-
else
998-
*(x::T, y::T) where {T<:Union{Int128,UInt128}} = mul_int(x, y)
999-
1000-
div(x::Int128, y::Int128) = checked_sdiv_int(x, y)
1001-
div(x::UInt128, y::UInt128) = checked_udiv_int(x, y)
1002-
1003-
rem(x::Int128, y::Int128) = checked_srem_int(x, y)
1004-
rem(x::UInt128, y::UInt128) = checked_urem_int(x, y)
1005-
end
852+
rem(x::Int128, y::Int128) = checked_srem_int(x, y)
853+
rem(x::UInt128, y::UInt128) = checked_urem_int(x, y)
1006854

1007855
# issue #15489: since integer ops are unchecked, they shouldn't check promotion
1008856
for op in (:+, :-, :*, :&, :|, :xor)

0 commit comments

Comments
 (0)