Skip to content

Conversation

christiangnrd
Copy link
Member

@christiangnrd christiangnrd commented Mar 27, 2025

Remaining issues:

  • return_type failure (fixed by 22a58d3):
julia> Base.Experimental.@MethodTable method_table

julia> interp = GPUCompiler.GPUInterpreter(; method_table, token=:whatever, inf_params=Core.Compiler.InferenceParams(), opt_params=Core.Compiler.OptimizationParams())
GPUCompiler.GPUInterpreter(0x00000000000096dd, Compiler.CachedMethodTable{Compiler.OverlayMethodTable}(0 entries), :whatever, Compiler.InferenceResult[], Compiler.InferenceParams(3, 4, 8, 32, 3, true, false, false, false, false), Compiler.OptimizationParams(true, 100, 1000, 250, 32, true, false, false))

julia> Core.Compiler.return_type(interp, Tuple{typeof(identity), Int64})
Union{}

julia> Core.Compiler.return_type(Tuple{typeof(identity), Int64})
Int64
  • show of CompilerJob broken with new binding semantics. (Seems fixed as of 1.12.0-rc1)
  UndefVarError: `f` not defined in `Main.Testsuite`
  Suggestion: define the const at top-level before running function that uses it (stricter Julia v1.12+ rule).
  Note: the binding state changed since the error occurred (was: 11, now: 0).
  Stacktrace:
    [1] is_global_function(tn::Core.TypeName, globname::Symbol)
      @ Base ./show.jl:1046
    [2] show_type_name(io::IOContext{IOBuffer}, tn::Core.TypeName)
      @ Base ./show.jl:1080
    [3] _show_type(io::IOContext{IOBuffer}, x::Type)
      @ Base ./show.jl:973
    [4] show(io::IOContext{IOBuffer}, x::Type)
      @ Base ./show.jl:970
    [5] show_typeparams(io::IOContext{IOBuffer}, env::Core.SimpleVector, orig::Core.SimpleVector, wheres::Vector{TypeVar})
      @ Base ./show.jl:727
    [6] show_datatype(io::IOContext{IOBuffer}, x::DataType, wheres::Vector{TypeVar})
      @ Base ./show.jl:1219
    [7] show_datatype
      @ ./show.jl:1127 [inlined]
    [8] _show_type(io::IOContext{IOBuffer}, x::Type)
      @ Base ./show.jl:978
    [9] show(io::IOContext{IOBuffer}, x::Type)
      @ Base ./show.jl:970
   [10] print(io::IOContext{IOBuffer}, x::Type)
      @ Base ./strings/io.jl:35
   [11] print(::IOContext{IOBuffer}, ::String, ::Type, ::String)
      @ Base ./strings/io.jl:46
   [12] print_within_stacktrace(::IOContext{IOBuffer}, ::String, ::Vararg{Any}; color::Symbol, bold::Bool)
      @ Base ./show.jl:0
   [13] print_within_stacktrace
      @ ./show.jl:2577 [inlined]
   [14] show_signature_function(io::IOContext{IOBuffer}, ft::Any, demangle::Bool, fargname::String, html::Bool, qualified::Bool)
      @ Base ./show.jl:2571
   [15] show_tuple_as_call(out::IOContext{IOBuffer}, name::Symbol, sig::Type; demangle::Bool, kwargs::Nothing, argnames::Nothing, qualified::Bool, hasfirst::Bool)
      @ Base ./show.jl:2605
   [16] show_tuple_as_call
      @ ./show.jl:2585 [inlined]
   [17] show_mi(io::IOContext{IOBuffer}, mi::Core.MethodInstance, from_stackframe::Bool)
      @ Base ./show.jl:1388
   [18] show_mi
      @ ./show.jl:1381 [inlined]
   [19] show(io::IOContext{IOBuffer}, mi::Core.MethodInstance)
      @ Base ./show.jl:1368
   [20] _show_default(io::IOBuffer, x::Any)
      @ Base ./show.jl:504
   [21] show_default
      @ ./show.jl:487 [inlined]
   [22] show
      @ ./show.jl:482 [inlined]
   [23] print(io::IOBuffer, x::GPUCompiler.CompilerJob{GPUCompiler.MetalCompilerTarget, Metal.MetalCompilerParams})
      @ Base ./strings/io.jl:35
   [24] print_to_string(::String, ::GPUCompiler.CompilerJob{GPUCompiler.MetalCompilerTarget, Metal.MetalCompilerParams})
      @ Base ./strings/io.jl:151
   [25] string(::String, ::GPUCompiler.CompilerJob{GPUCompiler.MetalCompilerTarget, Metal.MetalCompilerParams})
      @ Base ./strings/io.jl:193
   [26] macro expansion
      @ ~/.julia/packages/ObjectiveC/TgrW6/src/os.jl:313 [inlined]
   [27] compile(job::GPUCompiler.CompilerJob)
      @ Metal ~/Julia/pkg/Metal/src/compiler/compilation.jl:103
   [28] actual_compilation(cache::Dict{Any, Any}, src::Core.MethodInstance, world::UInt64, cfg::GPUCompiler.CompilerConfig{GPUCompiler.MetalCompilerTarget, Metal.MetalCompilerParams}, compiler::typeof(Metal.compile), linker::typeof(Metal.link))
      @ GPUCompiler ~/.julia/packages/GPUCompiler/Emuht/src/execution.jl:245
   [29] cached_compilation(cache::Dict{Any, Any}, src::Core.MethodInstance, cfg::GPUCompiler.CompilerConfig{GPUCompiler.MetalCompilerTarget, Metal.MetalCompilerParams}, compiler::Function, linker::Function)
      @ GPUCompiler ~/.julia/packages/GPUCompiler/Emuht/src/execution.jl:159
   [30] macro expansion
      @ ~/Julia/pkg/Metal/src/compiler/execution.jl:189 [inlined]
   [31] macro expansion
      @ ./lock.jl:376 [inlined]
   [32] mtlfunction(f::Main.Testsuite.var"#gpu_context_kernel#39"{Main.Testsuite.var"#f#38"}, tt::Type{Tuple{KernelAbstractions.CompilerMetadata{KernelAbstractions.NDIteration.DynamicSize, KernelAbstractions.NDIteration.DynamicCheck, Nothing, CartesianIndices{1, Tuple{Base.OneTo{Int64}}}, KernelAbstractions.NDIteration.NDRange{1, KernelAbstractions.NDIteration.DynamicSize, KernelAbstractions.NDIteration.DynamicSize, CartesianIndices{1, Tuple{Base.OneTo{Int64}}}, CartesianIndices{1, Tuple{Base.OneTo{Int64}}}}}, MtlDeviceVector{Int64, 1}}}; name::Nothing, kwargs::@Kwargs{})
      @ Metal ~/Julia/pkg/Metal/src/compiler/execution.jl:184
   [33] mtlfunction
      @ ~/Julia/pkg/Metal/src/compiler/execution.jl:182 [inlined]
   [34] macro expansion
      @ ~/Julia/pkg/Metal/src/compiler/execution.jl:85 [inlined]
   [35] (::KernelAbstractions.Kernel{MetalBackend, KernelAbstractions.NDIteration.DynamicSize, KernelAbstractions.NDIteration.DynamicSize, Main.Testsuite.var"#gpu_context_kernel#39"{Main.Testsuite.var"#f#38"}})(args::MtlVector{Int64, Metal.PrivateStorage}; ndrange::Tuple{Int64}, workgroupsize::Nothing)
      @ Metal.MetalKernels ~/Julia/pkg/Metal/src/MetalKernels.jl:113
   [36] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/test/test.jl:282 [inlined]
   [37] macro expansion
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/Test/src/Test.jl:1771 [inlined]
   [38] unittest_testsuite(Backend::var"#4#5", backend_str::String, backend_mod::Module, BackendArrayT::Type; skip_tests::Set{String})
      @ Main.Testsuite ~/.julia/packages/KernelAbstractions/C3nYQ/test/test.jl:280
   [39] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/test/testsuite.jl:44 [inlined]
   [40] macro expansion
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/Test/src/Test.jl:1771 [inlined]
   [41] testsuite(backend::Function, backend_str::String, backend_mod::Module, AT::Type, DAT::Type; skip_tests::Set{String})
      @ Main.Testsuite ~/.julia/packages/KernelAbstractions/C3nYQ/test/testsuite.jl:17
   [42] top-level scope
      @ ~/Julia/pkg/Metal/test/kernelabstractions.jl:6
  • diag(A::MtlMatrix{Float32, Metal.PrivateStorage}, k::Int64) uses i128 with --check-bounds=yes, probably needs a quirk. (Fixed by f5eec2e)
  Reason: unsupported use of i128 value
  Stacktrace:
    [1] <=
      @ ./int.jl:520
    [2] <=
      @ ./promotion.jl:489
    [3] >=
      @ ./operators.jl:472
    [4] checkbounds
      @ ./range.jl:959
    [5] checkbounds
      @ ./abstractarray.jl:699
    [6] _getindex
      @ ./range.jl:935
    [7] getindex
      @ ./array.jl:3134
    [8] macro expansion
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:92
    [9] getindex_generated
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:89
   [10] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/src/macros.jl:322
   [11] gpu_getindex_kernel
      @ ./none:0

from

   [24] Kernel
      @ ~/Julia/pkg/Metal/src/MetalKernels.jl:109 [inlined]
   [25] vectorized_getindex!
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:75 [inlined]
   [26] vectorized_getindex
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:82 [inlined]
   [27] _getindex
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:21 [inlined]
   [28] getindex
      @ ~/.julia/packages/GPUArrays/uiVyU/src/host/indexing.jl:17 [inlined]
   [29] diag(A::MtlMatrix{Float32, Metal.PrivateStorage}, k::Int64)
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/dense.jl:291
   [30] LinearAlgebra.Tridiagonal(A::MtlMatrix{Float32, Metal.PrivateStorage})
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/tridiag.jl:586
   [31] macro expansion
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/Test/src/Test.jl:676 [inlined]
   [32] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/test/test.jl:85 [inlined]
   [33] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/test/testsuite.jl:18 [inlined]
   [34] macro expansion
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/Test/src/Test.jl:1771 [inlined]
   [35] macro expansion
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/test/testsuite.jl:17 [inlined]
   [36] unittest_testsuite(Backend::var"#4#5", backend_str::String, backend_mod::Module, BackendArrayT::Type; skip_tests::Set{String})
      @ Main.Testsuite ~/.julia/packages/KernelAbstractions/C3nYQ/test/test.jl:75
  • mul! dispatch failure. (Fixed in GPUArrays v11.2.5)
Error During Test at /Users/tim/.julia/packages/KernelAbstractions/C3nYQ/examples/matmul.jl:36
  Test threw exception
  Expression: isapprox(output, a * b)
  ArgumentError: cannot take the CPU address of a MtlMatrix{Float32, Metal.PrivateStorage}
  Stacktrace:
    [1] unsafe_convert(::Type{Ptr{Float32}}, x::MtlMatrix{Float32, Metal.PrivateStorage})
      @ Metal ~/Julia/pkg/Metal/src/array.jl:266
    [2] gemm!(transA::Char, transB::Char, alpha::Float32, A::MtlMatrix{Float32, Metal.PrivateStorage}, B::MtlMatrix{Float32, Metal.PrivateStorage}, beta::Float32, C::MtlMatrix{Float32, Metal.PrivateStorage})
      @ LinearAlgebra.BLAS ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/blas.jl:1648
    [3] gemm_wrapper!(C::MtlMatrix{Float32, Metal.PrivateStorage}, tA::Char, tB::Char, A::MtlMatrix{Float32, Metal.PrivateStorage}, B::MtlMatrix{Float32, Metal.PrivateStorage}, α::Bool, β::Bool)
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:808
    [4] _syrk_herk_gemm_wrapper!(C::MtlMatrix{Float32, Metal.PrivateStorage}, tA::Char, tB::Char, A::MtlMatrix{Float32, Metal.PrivateStorage}, B::MtlMatrix{Float32, Metal.PrivateStorage}, α::Bool, β::Bool, ::Val{LinearAlgebra.BlasFlag.GEMM})
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:527
    [5] generic_matmatmul_wrapper!(C::MtlMatrix{Float32, Metal.PrivateStorage}, tA::Char, tB::Char, A::MtlMatrix{Float32, Metal.PrivateStorage}, B::MtlMatrix{Float32, Metal.PrivateStorage}, α::Bool, β::Bool, val::Val{LinearAlgebra.BlasFlag.GEMM})
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:507
    [6] _mul!
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:328 [inlined]
    [7] mul!
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:297 [inlined]
    [8] mul!
      @ ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:265 [inlined]
    [9] *(A::MtlMatrix{Float32, Metal.PrivateStorage}, B::MtlMatrix{Float32, Metal.PrivateStorage})
      @ LinearAlgebra ~/Julia/depot/juliaup/julia-1.12-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/matmul.jl:136
   [10] top-level scope
      @ ~/.julia/packages/KernelAbstractions/C3nYQ/examples/matmul.jl:515

Copy link

codecov bot commented Mar 27, 2025

Codecov Report

✅ All modified and coverable lines are covered by tests.
✅ Project coverage is 80.31%. Comparing base (bc1d702) to head (b853262).
⚠️ Report is 1 commits behind head on main.

Additional details and impacted files
@@            Coverage Diff             @@
##             main     #576      +/-   ##
==========================================
+ Coverage   79.58%   80.31%   +0.73%     
==========================================
  Files          61       61              
  Lines        2738     2738              
==========================================
+ Hits         2179     2199      +20     
+ Misses        559      539      -20     

☔ View full report in Codecov by Sentry.
📢 Have feedback on the report? Share it here.

🚀 New features to boost your workflow:
  • ❄️ Test Analytics: Detect flaky tests, report on failures, and find test suite problems.

Copy link
Contributor

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Metal Benchmarks

Benchmark suite Current: b853262 Previous: bc1d702 Ratio
latency/precompile 10670477833.5 ns 10844034395.5 ns 0.98
latency/ttfp 3998126542 ns 4062392000 ns 0.98
latency/import 1295514896 ns 1323297834 ns 0.98
integration/metaldevrt 897333 ns 897084 ns 1.00
integration/byval/slices=1 1619875 ns 1649541.5 ns 0.98
integration/byval/slices=3 8863292 ns 20432291 ns 0.43
integration/byval/reference 1602500 ns 1620917 ns 0.99
integration/byval/slices=2 2676604 ns 2762083 ns 0.97
kernel/indexing 626708 ns 561625 ns 1.12
kernel/indexing_checked 623875 ns 568020.5 ns 1.10
kernel/launch 8958 ns 8291 ns 1.08
array/construct 5958 ns 6167 ns 0.97
array/broadcast 629875 ns 525521 ns 1.20
array/random/randn/Float32 805959 ns 904792 ns 0.89
array/random/randn!/Float32 624041 ns 603125 ns 1.03
array/random/rand!/Int64 554833 ns 556042 ns 1.00
array/random/rand!/Float32 586458 ns 549375 ns 1.07
array/random/rand/Int64 770833 ns 920291.5 ns 0.84
array/random/rand/Float32 607458 ns 868312.5 ns 0.70
array/accumulate/Int64/1d 1334750 ns 1391041.5 ns 0.96
array/accumulate/Int64/dims=1 1924521 ns 1938459 ns 0.99
array/accumulate/Int64/dims=2 2192583 ns 2314708 ns 0.95
array/accumulate/Int64/dims=1L 11842709 ns 12532979.5 ns 0.94
array/accumulate/Int64/dims=2L 9799875 ns 9877687.5 ns 0.99
array/accumulate/Float32/1d 1214500 ns 1176667 ns 1.03
array/accumulate/Float32/dims=1 1632395.5 ns 1686625 ns 0.97
array/accumulate/Float32/dims=2 1970167 ns 2110166.5 ns 0.93
array/accumulate/Float32/dims=1L 9980749.5 ns 10483479.5 ns 0.95
array/accumulate/Float32/dims=2L 7368667 ns 7583083 ns 0.97
array/reductions/reduce/Int64/1d 1536646 ns 1280666 ns 1.20
array/reductions/reduce/Int64/dims=1 1121854 ns 1173000 ns 0.96
array/reductions/reduce/Int64/dims=2 1275541 ns 1305833 ns 0.98
array/reductions/reduce/Int64/dims=1L 2063542 ns 2058250 ns 1.00
array/reductions/reduce/Int64/dims=2L 3552562.5 ns 3592333 ns 0.99
array/reductions/reduce/Float32/1d 1054167 ns 811062.5 ns 1.30
array/reductions/reduce/Float32/dims=1 863875 ns 848229 ns 1.02
array/reductions/reduce/Float32/dims=2 801083 ns 747229 ns 1.07
array/reductions/reduce/Float32/dims=1L 1377896 ns 1415271 ns 0.97
array/reductions/reduce/Float32/dims=2L 1872917 ns 1896625 ns 0.99
array/reductions/mapreduce/Int64/1d 1559437.5 ns 1293666.5 ns 1.21
array/reductions/mapreduce/Int64/dims=1 1127000 ns 1174292 ns 0.96
array/reductions/mapreduce/Int64/dims=2 1290625 ns 1310916 ns 0.98
array/reductions/mapreduce/Int64/dims=1L 2086708.5 ns 2042625 ns 1.02
array/reductions/mapreduce/Int64/dims=2L 3568854 ns 3598750 ns 0.99
array/reductions/mapreduce/Float32/1d 1050604 ns 830417 ns 1.27
array/reductions/mapreduce/Float32/dims=1 858583 ns 850729.5 ns 1.01
array/reductions/mapreduce/Float32/dims=2 824417 ns 750792 ns 1.10
array/reductions/mapreduce/Float32/dims=1L 1373792 ns 1403500 ns 0.98
array/reductions/mapreduce/Float32/dims=2L 1881666 ns 1906291 ns 0.99
array/private/copyto!/gpu_to_gpu 652875 ns 576250 ns 1.13
array/private/copyto!/cpu_to_gpu 833375 ns 671312 ns 1.24
array/private/copyto!/gpu_to_cpu 829875 ns 675875 ns 1.23
array/private/iteration/findall/int 1709000 ns 1647812.5 ns 1.04
array/private/iteration/findall/bool 1498833.5 ns 1560583 ns 0.96
array/private/iteration/findfirst/int 1977000 ns 1804521.5 ns 1.10
array/private/iteration/findfirst/bool 1800667 ns 1741750 ns 1.03
array/private/iteration/scalar 3995667 ns 2804750 ns 1.42
array/private/iteration/logical 2697334 ns 2711583 ns 0.99
array/private/iteration/findmin/1d 2056625 ns 1938375 ns 1.06
array/private/iteration/findmin/2d 1588708 ns 1607812.5 ns 0.99
array/private/copy 594708 ns 841875 ns 0.71
array/shared/copyto!/gpu_to_gpu 83458 ns 82041 ns 1.02
array/shared/copyto!/cpu_to_gpu 82458 ns 85084 ns 0.97
array/shared/copyto!/gpu_to_cpu 82625 ns 80125 ns 1.03
array/shared/iteration/findall/int 1667895.5 ns 1650167 ns 1.01
array/shared/iteration/findall/bool 1514791 ns 1573833 ns 0.96
array/shared/iteration/findfirst/int 1424625 ns 1462979.5 ns 0.97
array/shared/iteration/findfirst/bool 1411292 ns 1415896 ns 1.00
array/shared/iteration/scalar 151375 ns 161084 ns 0.94
array/shared/iteration/logical 2538979.5 ns 2510521 ns 1.01
array/shared/iteration/findmin/1d 1643292 ns 1589708 ns 1.03
array/shared/iteration/findmin/2d 1597958 ns 1627562.5 ns 0.98
array/shared/copy 250833 ns 209625 ns 1.20
array/permutedims/4d 2501083 ns 2614646 ns 0.96
array/permutedims/2d 1256917 ns 1280958 ns 0.98
array/permutedims/3d 1807021 ns 1954875 ns 0.92
metal/synchronization/stream 14083 ns 15375 ns 0.92
metal/synchronization/context 14791.5 ns 16417 ns 0.90

This comment was automatically generated by workflow using github-action-benchmark.

@maleadt
Copy link
Member

maleadt commented Mar 27, 2025

Oh right, I forgot we don't actually support 1.12 yet. I guess we need to update the downgrader...

@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch from f5bfc05 to 6801da3 Compare May 23, 2025 02:03
Copy link
Member

@maleadt maleadt left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure this is valuable without the bumped downgrader; now you're just removing opaque pointer testing from the CI roster without a replacement that will ever pass.

So I wouldn't merge this until we have that JLL at the least.

@christiangnrd christiangnrd marked this pull request as draft May 23, 2025 10:46
@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch 2 times, most recently from d5ecb95 to 02eab26 Compare June 11, 2025 13:34
@maleadt maleadt changed the title Test 1.12-nightly Updates for Julia 1.12 Jun 14, 2025
@maleadt maleadt force-pushed the christiangnrd-patch-1 branch from 02eab26 to 74a91e7 Compare June 14, 2025 09:21
@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch from 74a91e7 to 21faf97 Compare June 25, 2025 15:41
@maleadt
Copy link
Member

maleadt commented Jun 26, 2025

FWIW, simply rebasing this PR is not going to help. It needs somebody to go in and look at the specific failures, which I currently do not have the time for.

@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch 2 times, most recently from d72818a to df8ec84 Compare September 11, 2025 17:37
@christiangnrd christiangnrd linked an issue Sep 17, 2025 that may be closed by this pull request
@christiangnrd christiangnrd marked this pull request as ready for review September 23, 2025 14:22
Copy link
Contributor

github-actions bot commented Sep 23, 2025

Your PR requires formatting changes to meet the project's style guidelines.
Please consider running Runic (git runic main) to apply these changes.

Click here to view the suggested changes.
diff --git a/src/compiler/reflection.jl b/src/compiler/reflection.jl
index 91c4848e..198fc872 100644
--- a/src/compiler/reflection.jl
+++ b/src/compiler/reflection.jl
@@ -69,5 +69,5 @@ function return_type(@nospecialize(func), @nospecialize(tt))
     job = CompilerJob(source, config)
     interp = GPUCompiler.get_interpreter(job)
     sig = Base.signature_type(func, tt)
-    Core.Compiler._return_type(interp, sig)
+    return Core.Compiler._return_type(interp, sig)
 end
diff --git a/src/device/quirks.jl b/src/device/quirks.jl
index 08394d35..c1fda1c2 100644
--- a/src/device/quirks.jl
+++ b/src/device/quirks.jl
@@ -85,7 +85,7 @@ end
 end
 
 @static if VERSION >= v"1.12.0-DEV.1736" # Partially reverts JuliaLang/julia PR #56750
-    let BitInteger64 = Union{Int64,UInt64}
+    let BitInteger64 = Union{Int64, UInt64}
         @device_override function Base.checkbounds(::Type{Bool}, v::StepRange{<:BitInteger64, <:BitInteger64}, i::BitInteger64)
             @inline
             return checkindex(Bool, eachindex(IndexLinear(), v), i)
diff --git a/test/array.jl b/test/array.jl
index e64dbf9b..08424c36 100644
--- a/test/array.jl
+++ b/test/array.jl
@@ -56,7 +56,7 @@ end
     @test Adapt.adapt(MtlArray{Float16}, Float64[1]) isa MtlArray{Float16}
 
     # Test a few explicitly unsupported types
-    @test_throws "MtlArray only supports element types that are stored inline" MtlArray(BigInt[1])
+        @test_throws "MtlArray only supports element types that are stored inline" MtlArray(BigInt[1])
     @test_throws "Metal does not support Float64 values" MtlArray(Float64[1])
     @test_throws "Metal does not support Int128 values" MtlArray(Int128[1])
     @test_throws "Metal does not support UInt128 values" MtlArray(UInt128[1])

@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch 2 times, most recently from 4e02b75 to 22a58d3 Compare September 30, 2025 16:29
@christiangnrd christiangnrd force-pushed the christiangnrd-patch-1 branch from 22a58d3 to b853262 Compare October 5, 2025 19:46
Copy link
Member

@maleadt maleadt left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for looking into this!

@maleadt maleadt merged commit 46d8ad0 into main Oct 6, 2025
7 checks passed
@maleadt maleadt deleted the christiangnrd-patch-1 branch October 6, 2025 11:21
This was referenced Oct 6, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

1.12 Support

2 participants