diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 3765cc34d6869a..727ba1e4d2cb6b 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -315,7 +315,7 @@ mutable struct InferenceState dont_work_on_me = false parent = nothing - valid_worlds = WorldRange(src.min_world, src.max_world == typemax(UInt) ? get_world_counter() : src.max_world) + valid_worlds = WorldRange(1, get_world_counter()) bestguess = Bottom exc_bestguess = Bottom ipo_effects = EFFECTS_TOTAL @@ -335,13 +335,21 @@ mutable struct InferenceState InferenceParams(interp).unoptimize_throw_blocks && mark_throw_blocks!(src, handler_at) !iszero(cache_mode & CACHE_MODE_LOCAL) && push!(get_inference_cache(interp), result) - return new( + this = new( linfo, world, mod, sptypes, slottypes, src, cfg, method_info, currbb, currpc, ip, handlers, handler_at, ssavalue_uses, bb_vartables, ssavaluetypes, stmt_edges, stmt_info, pclimitations, limitations, cycle_backedges, callers_in_cycle, dont_work_on_me, parent, result, unreachable, valid_worlds, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, interp) + + # Apply generated function restrictions + if src.min_world != 1 || src.max_world != typemax(UInt) + # From generated functions + this.valid_worlds = WorldRange(src.min_world, src.max_world) + end + + return this end end @@ -796,7 +804,7 @@ function IRInterpretationState(interp::AbstractInterpreter, method_info = MethodInfo(src) ir = inflate_ir(src, mi) return IRInterpretationState(interp, method_info, ir, mi, argtypes, world, - src.min_world, src.max_world) + code.min_world, code.max_world) end # AbsIntState diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 2e8c6f913d3650..7ddb1b736cd77c 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -107,19 +107,17 @@ is_declared_noinline(@nospecialize src::MaybeCompressed) = # OptimizationState # ##################### -is_source_inferred(@nospecialize src::MaybeCompressed) = - ccall(:jl_ir_flag_inferred, Bool, (Any,), src) - function inlining_policy(interp::AbstractInterpreter, @nospecialize(src), @nospecialize(info::CallInfo), stmt_flag::UInt32) if isa(src, MaybeCompressed) - is_source_inferred(src) || return nothing src_inlineable = is_stmt_inline(stmt_flag) || is_inlineable(src) return src_inlineable ? src : nothing elseif isa(src, IRCode) return src elseif isa(src, SemiConcreteResult) return src + elseif isa(src, CodeInstance) + return inlining_policy(interp, src.inferred, info, stmt_flag) end return nothing end @@ -222,7 +220,6 @@ end function ir_to_codeinf!(src::CodeInfo, ir::IRCode) replace_code_newstyle!(src, ir) widen_all_consts!(src) - src.inferred = true return src end @@ -240,8 +237,6 @@ function widen_all_consts!(src::CodeInfo) end end - src.rettype = widenconst(src.rettype) - return src end diff --git a/base/compiler/ssair/legacy.jl b/base/compiler/ssair/legacy.jl index 3e9a4e2a746dc3..b1ce14f28cf142 100644 --- a/base/compiler/ssair/legacy.jl +++ b/base/compiler/ssair/legacy.jl @@ -55,8 +55,6 @@ Mainly used for testing or interactive use. inflate_ir(ci::CodeInfo, linfo::MethodInstance) = inflate_ir!(copy(ci), linfo) inflate_ir(ci::CodeInfo, sptypes::Vector{VarState}, argtypes::Vector{Any}) = inflate_ir!(copy(ci), sptypes, argtypes) function inflate_ir(ci::CodeInfo) - parent = ci.parent - isa(parent, MethodInstance) && return inflate_ir(ci, parent) # XXX the length of `ci.slotflags` may be different from the actual number of call # arguments, but we really don't know that information in this case argtypes = Any[ Any for i = 1:length(ci.slotflags) ] diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index afae4abc2a07dd..ccb236c079d194 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -231,8 +231,6 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState) result.src = opt = ir_to_codeinf!(opt) end if opt isa CodeInfo - opt.min_world = first(valid_worlds) - opt.max_world = last(valid_worlds) caller.src = opt else # In this case `caller.src` is invalid for clients (such as `typeinf_ext`) to use @@ -278,8 +276,8 @@ function is_result_constabi_eligible(result::InferenceResult) result_type = result.result return isa(result_type, Const) && is_foldable_nothrow(result.ipo_effects) && is_inlineable_constant(result_type.val) end -function CodeInstance(interp::AbstractInterpreter, result::InferenceResult, - valid_worlds::WorldRange) +function CodeInstance(interp::AbstractInterpreter, result::InferenceResult; + can_discard_trees=may_discard_trees(interp)) local const_flags::Int32 result_type = result.result @assert !(result_type === nothing || result_type isa LimitedAccuracy) @@ -312,11 +310,11 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult, end end relocatability = 0x0 - if const_flags == 0x3 && may_discard_trees(interp) + if const_flags == 0x3 && can_discard_trees inferred_result = nothing relocatability = 0x1 else - inferred_result = transform_result_for_cache(interp, result.linfo, valid_worlds, result) + inferred_result = transform_result_for_cache(interp, result.linfo, result.valid_worlds, result, can_discard_trees) if isa(inferred_result, String) t = @_gc_preserve_begin inferred_result relocatability = unsafe_load(unsafe_convert(Ptr{UInt8}, inferred_result), Core.sizeof(inferred_result)) @@ -328,19 +326,19 @@ function CodeInstance(interp::AbstractInterpreter, result::InferenceResult, # relocatability = isa(inferred_result, String) ? inferred_result[end] : UInt8(0) return CodeInstance(result.linfo, widenconst(result_type), widenconst(result.exc_result), rettype_const, inferred_result, - const_flags, first(valid_worlds), last(valid_worlds), + const_flags, first(result.valid_worlds), last(result.valid_worlds), # TODO: Actually do something with non-IPO effects encode_effects(result.ipo_effects), encode_effects(result.ipo_effects), result.analysis_results, relocatability) end -function maybe_compress_codeinfo(interp::AbstractInterpreter, linfo::MethodInstance, ci::CodeInfo) +function maybe_compress_codeinfo(interp::AbstractInterpreter, linfo::MethodInstance, ci::CodeInfo, + can_discard_trees::Bool=may_discard_trees(interp)) def = linfo.def isa(def, Method) || return ci # don't compress toplevel code - if may_discard_trees(interp) - cache_the_tree = ci.inferred && (is_inlineable(ci) || isa_compileable_sig(linfo.specTypes, linfo.sparam_vals, def)) - else - cache_the_tree = true + cache_the_tree = true + if can_discard_trees + cache_the_tree = is_inlineable(ci) || isa_compileable_sig(linfo.specTypes, linfo.sparam_vals, def) end if cache_the_tree if may_compress(interp) @@ -357,11 +355,12 @@ function maybe_compress_codeinfo(interp::AbstractInterpreter, linfo::MethodInsta end function transform_result_for_cache(interp::AbstractInterpreter, - linfo::MethodInstance, valid_worlds::WorldRange, result::InferenceResult) + linfo::MethodInstance, valid_worlds::WorldRange, result::InferenceResult, + can_discard_trees::Bool=may_discard_trees(interp)) inferred_result = result.src if inferred_result isa CodeInfo uncompressed = inferred_result - inferred_result = maybe_compress_codeinfo(interp, linfo, inferred_result) + inferred_result = maybe_compress_codeinfo(interp, linfo, inferred_result, can_discard_trees) result.is_src_volatile |= uncompressed !== inferred_result end # The global cache can only handle objects that codegen understands @@ -372,23 +371,30 @@ function transform_result_for_cache(interp::AbstractInterpreter, end function cache_result!(interp::AbstractInterpreter, result::InferenceResult) - valid_worlds = result.valid_worlds - if last(valid_worlds) == get_world_counter() + if last(result.valid_worlds) == get_world_counter() # if we've successfully recorded all of the backedges in the global reverse-cache, # we can now widen our applicability in the global cache too - valid_worlds = WorldRange(first(valid_worlds), typemax(UInt)) + result.valid_worlds = WorldRange(first(result.valid_worlds), typemax(UInt)) end # check if the existing linfo metadata is also sufficient to describe the current inference result # to decide if it is worth caching this mi = result.linfo already_inferred = already_inferred_quick_test(interp, mi) - if !already_inferred && haskey(WorldView(code_cache(interp), valid_worlds), mi) - already_inferred = true + cache = WorldView(code_cache(interp), result.valid_worlds) + if !already_inferred && haskey(cache, mi) + ci = cache[mi] + # Even if we already have a CI for this, it's possible that the new CI has more + # information (E.g. because the source was limited before, but is no longer - this + # happens during bootstrap). In that case, allow the result to be recached. + if result.src === nothing || (ci.inferred !== nothing || ci.invoke != C_NULL) + already_inferred = true + end end # TODO: also don't store inferred code if we've previously decided to interpret this function if !already_inferred - code_cache(interp)[mi] = ci = CodeInstance(interp, result, valid_worlds) + code_cache(interp)[mi] = ci = CodeInstance(interp, result) + result.ci = ci if track_newly_inferred[] m = mi.def if isa(m, Method) && m.module != Core @@ -529,7 +535,6 @@ function finish(me::InferenceState, interp::AbstractInterpreter) end if me.src.edges !== nothing append!(s_edges, me.src.edges::Vector) - me.src.edges = nothing end # inspect whether our inference had a limited result accuracy, # else it may be suitable to cache @@ -635,13 +640,6 @@ function record_slot_assign!(sv::InferenceState) return nothing end -function record_bestguess!(sv::InferenceState) - bestguess = sv.bestguess - @assert !(bestguess isa LimitedAccuracy) - sv.src.rettype = bestguess - return nothing -end - # find the dominating assignment to the slot `id` in the block containing statement `idx`, # returns `nothing` otherwise function find_dominating_assignment(id::Int, idx::Int, sv::InferenceState) @@ -670,8 +668,6 @@ function type_annotate!(interp::AbstractInterpreter, sv::InferenceState) # to hold all of the items assigned into it record_slot_assign!(sv) - record_bestguess!(sv) - # annotate variables load types src = sv.src stmts = src.code @@ -872,9 +868,8 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) # propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization: # note that this result is cached globally exclusively, we can use this local result destructively - volatile_inf_result = isinferred && let inferred_src = result.src - isa(inferred_src, CodeInfo) && (is_inlineable(inferred_src) || force_inline) - end ? VolatileInferenceResult(result) : nothing + volatile_inf_result = isinferred && (force_inline || inlining_policy(interp, result.src, NoCallInfo(), IR_FLAG_NULL) !== nothing) ? + VolatileInferenceResult(result) : nothing return EdgeCallResult(frame.bestguess, exc_bestguess, edge, effects, volatile_inf_result) elseif frame === true # unresolvable cycle @@ -909,7 +904,15 @@ end #### entry points for inferring a MethodInstance given a type signature #### -function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, worlds::WorldRange, @nospecialize(val)) +""" + codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, worlds::WorldRange, @nospecialize(val)) + +Return a fake CodeInfo that just contains `return \$val`. This function is used in various reflection APIs when asking +for the code of a function that inference has found to just return a constant. For such functions, no code is actually +stored - the constant is used directly. However, because this is an ABI implementation detail, it is nice to maintain +consistency and just synthesize a CodeInfo when the reflection APIs ask for them - this function does that. +""" +function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, @nospecialize(val)) method = mi.def::Method tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) tree.code = Any[ ReturnNode(quoted(val)) ] @@ -921,14 +924,24 @@ function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, wor tree.linetable = LineInfoNode[LineInfoNode(method.module, method.name, method.file, method.line, Int32(0))] tree.ssaflags = UInt32[0] set_inlineable!(tree, true) - tree.parent = mi - tree.rettype = Core.Typeof(val) - tree.min_world = worlds.min_world - tree.max_world = worlds.max_world - tree.inferred = true return tree end +""" + codeinstance_for_const_with_code(interp::AbstractInterpreter, code::CodeInstance) + +Given a constabi `CodeInstance`, create another (uncached) CodeInstance that contains the dummy code created +by [`codeinfo_for_const`](@ref) for use in reflection functions that require this. See [`codeinfo_for_const`](@ref) for +more details. +""" +function codeinstance_for_const_with_code(interp::AbstractInterpreter, code::CodeInstance) + src = codeinfo_for_const(interp, code.def, code.rettype_const) + return CodeInstance(code.def, code.rettype, code.exctype, code.rettype_const, src, + Int32(0x3), code.min_world, code.max_world, + code.ipo_purity_bits, code.purity_bits, code.analysis_results, + code.relocatability) +end + result_is_constabi(interp::AbstractInterpreter, run_optimizer::Bool, result::InferenceResult) = run_optimizer && may_discard_trees(interp) && is_result_constabi_eligible(result) @@ -944,7 +957,7 @@ function typeinf_code(interp::AbstractInterpreter, mi::MethodInstance, run_optim is_inferred(frame) || return nothing, Any if result_is_constabi(interp, run_optimizer, frame.result) rt = frame.result.result::Const - return codeinfo_for_const(interp, frame.linfo, frame.result.valid_worlds, rt.val), widenconst(rt) + return codeinfo_for_const(interp, frame.linfo, rt.val), widenconst(rt) end code = frame.src rt = widenconst(ignorelimited(frame.result.result)) @@ -1001,36 +1014,61 @@ function typeinf_frame(interp::AbstractInterpreter, mi::MethodInstance, run_opti return frame end +# We only care about types/effects, no source required +const SOURCE_MODE_NOT_REQUIRED = 0x0 + +# We need something that can be invoked or compiled (i.e. constabi or inferred) +const SOURCE_MODE_ABI = 0x1 + +# We need source, even for constabi +const SOURCE_MODE_FORCE_SOURCE = 0x2 + +function ci_has_source(code::CodeInstance) + inf = @atomic :monotonic code.inferred + return isa(inf, CodeInfo) || isa(inf, String) +end + +""" + ci_has_abi(code::CodeInstance) + +Determine whether this CodeInstance is something that could be invoked if we gave it +to the runtime system (either because it already has an ->invoke ptr, or because it +has source that could be compiled). +""" +function ci_has_abi(code::CodeInstance) + ci_has_source(code) && return true + return code.invoke !== C_NULL +end + +function ci_meets_requirement(code::CodeInstance, source_mode::UInt8) + source_mode == SOURCE_MODE_NOT_REQUIRED && return true + source_mode == SOURCE_MODE_ABI && return ci_has_abi(code) + source_mode == SOURCE_MODE_FORCE_SOURCE && return ci_has_source(code) + return false +end + # compute (and cache) an inferred AST and return type -function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance) - method = mi.def::Method +function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mode::UInt8) start_time = ccall(:jl_typeinf_timing_begin, UInt64, ()) code = get(code_cache(interp), mi, nothing) if code isa CodeInstance # see if this code already exists in the cache - inf = @atomic :monotonic code.inferred - if use_const_api(code) - ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time) - return codeinfo_for_const(interp, mi, WorldRange(code.min_world, code.max_world), code.rettype_const) - elseif isa(inf, CodeInfo) - ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time) - if !(inf.min_world == code.min_world && - inf.max_world == code.max_world && - inf.rettype === code.rettype) - inf = copy(inf) - inf.min_world = code.min_world - inf.max_world = code.max_world - inf.rettype = code.rettype - end - return inf - elseif isa(inf, String) + if source_mode == SOURCE_MODE_FORCE_SOURCE && use_const_api(code) + code = codeinstance_for_const_with_code(interp, code) + end + if ci_meets_requirement(code, source_mode) ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time) - inf = _uncompressed_ir(code, inf) - return inf + return code end end - if ccall(:jl_get_module_infer, Cint, (Any,), method.module) == 0 && !generating_output(#=incremental=#false) - return retrieve_code_info(mi, get_world_counter(interp)) + def = mi.def + if isa(def, Method) + if ccall(:jl_get_module_infer, Cint, (Any,), def.module) == 0 && !generating_output(#=incremental=#false) + src = retrieve_code_info(mi, get_world_counter(interp)) + return CodeInstance(mi, Any, Any, nothing, src, Int32(0), + get_world_counter(interp), get_world_counter(interp), + UInt32(0), UInt32(0), nothing, UInt8(0)) + end end lock_mi_inference(interp, mi) result = InferenceResult(mi, typeinf_lattice(interp)) @@ -1038,11 +1076,20 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance) frame === nothing && return nothing typeinf(interp, frame) ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time) - if result_is_constabi(interp, true, frame.result) - return codeinfo_for_const(interp, frame.linfo, frame.result.valid_worlds, frame.result.result.val) + if isdefined(result, :ci) + if ci_meets_requirement(result.ci, source_mode) + # Inference result was cacheable and is in global cache. Return it. + return result.ci + elseif use_const_api(result.ci) + code = codeinstance_for_const_with_code(interp, result.ci) + @assert ci_meets_requirement(code, source_mode) + return code + end end - frame.src.inferred || return nothing - return frame.src + # Inference result is not cacheable or is was cacheable, but we do not want to + # store the source in the cache, but the caller wanted it anyway (e.g. for reflection). + # We construct a new CodeInstance for it that is not part of the cache hierachy. + return CodeInstance(interp, result, can_discard_trees=source_mode != SOURCE_MODE_FORCE_SOURCE) end # compute (and cache) an inferred AST and return the inferred return type @@ -1070,27 +1117,9 @@ function typeinf_type(interp::AbstractInterpreter, mi::MethodInstance) end # This is a bridge for the C code calling `jl_typeinf_func()` -typeinf_ext_toplevel(mi::MethodInstance, world::UInt) = typeinf_ext_toplevel(NativeInterpreter(world), mi) -function typeinf_ext_toplevel(interp::AbstractInterpreter, mi::MethodInstance) - if isa(mi.def, Method) - # method lambda - infer this specialization via the method cache - src = typeinf_ext(interp, mi) - else - src = mi.uninferred::CodeInfo - if !src.inferred - # toplevel lambda - infer directly - start_time = ccall(:jl_typeinf_timing_begin, UInt64, ()) - if !src.inferred - result = InferenceResult(mi, typeinf_lattice(interp)) - frame = InferenceState(result, src, #=cache_mode=#:global, interp) - typeinf(interp, frame) - @assert is_inferred(frame) # TODO: deal with this better - src = frame.src - end - ccall(:jl_typeinf_timing_end, Cvoid, (UInt64,), start_time) - end - end - return src +typeinf_ext_toplevel(mi::MethodInstance, world::UInt, source_mode::UInt8) = typeinf_ext_toplevel(NativeInterpreter(world), mi, source_mode) +function typeinf_ext_toplevel(interp::AbstractInterpreter, mi::MethodInstance, source_mode::UInt8) + return typeinf_ext(interp, mi, source_mode) end function return_type(@nospecialize(f), t::DataType) # this method has a special tfunc diff --git a/base/compiler/types.jl b/base/compiler/types.jl index b98cf09ff7cf1b..4d2fc9718bf890 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -88,6 +88,7 @@ mutable struct InferenceResult effects::Effects # if optimization is finished analysis_results::AnalysisResults # AnalysisResults with e.g. result::ArgEscapeCache if optimized, otherwise NULL_ANALYSIS_RESULTS is_src_volatile::Bool # `src` has been cached globally as the compressed format already, allowing `src` to be used destructively + ci::CodeInstance # CodeInstance if this result has been added to the cache function InferenceResult(linfo::MethodInstance, cache_argtypes::Vector{Any}, overridden_by_const::BitVector) # def = linfo.def # nargs = def isa Method ? Int(def.nargs) : 0 diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index 368395e7140541..25e29efd85aed0 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -128,25 +128,27 @@ function get_staged(mi::MethodInstance, world::UInt) end function retrieve_code_info(linfo::MethodInstance, world::UInt) - m = linfo.def::Method + def = linfo.def + if !isa(def, Method) + return linfo.uninferred + end c = nothing - if isdefined(m, :generator) + if isdefined(def, :generator) # user code might throw errors – ignore them c = get_staged(linfo, world) end - if c === nothing && isdefined(m, :source) - src = m.source + if c === nothing && isdefined(def, :source) + src = def.source if src === nothing # can happen in images built with --strip-ir return nothing elseif isa(src, String) - c = _uncompressed_ir(m, src) + c = _uncompressed_ir(def, src) else c = copy(src::CodeInfo) end end if c isa CodeInfo - c.parent = linfo return c end return nothing @@ -439,7 +441,9 @@ function find_ssavalue_uses(e::Expr, uses::Vector{BitSet}, line::Int) end function find_ssavalue_uses(e::PhiNode, uses::Vector{BitSet}, line::Int) - for val in e.values + for i = 1:length(e.values) + isassigned(e.values, i) || continue + val = e.values[i] if isa(val, SSAValue) push!(uses[val.id], line) end diff --git a/base/opaque_closure.jl b/base/opaque_closure.jl index cb3c00b128dcb6..9891db20249fb8 100644 --- a/base/opaque_closure.jl +++ b/base/opaque_closure.jl @@ -68,19 +68,12 @@ function Core.OpaqueClosure(ir::IRCode, @nospecialize env...; src.slotnames = fill(:none, nargs+1) src.slotflags = fill(zero(UInt8), length(ir.argtypes)) src.slottypes = copy(ir.argtypes) - src.rettype = rt src = Core.Compiler.ir_to_codeinf!(src, ir) return generate_opaque_closure(sig, Union{}, rt, src, nargs, isva, env...; do_compile) end -function Core.OpaqueClosure(src::CodeInfo, @nospecialize env...) - src.inferred || throw(ArgumentError("Expected inferred src::CodeInfo")) - mi = src.parent::Core.MethodInstance - sig = Base.tuple_type_tail(mi.specTypes) - method = mi.def::Method - nargs = method.nargs-1 - isva = method.isva - return generate_opaque_closure(sig, Union{}, src.rettype, src, nargs, isva, env...) +function Core.OpaqueClosure(src::CodeInfo, @nospecialize env...; rettype, sig, nargs, isva=false) + return generate_opaque_closure(sig, Union{}, rettype, src, nargs, isva, env...) end function generate_opaque_closure(@nospecialize(sig), @nospecialize(rt_lb), @nospecialize(rt_ub), @@ -88,7 +81,8 @@ function generate_opaque_closure(@nospecialize(sig), @nospecialize(rt_lb), @nosp mod::Module=@__MODULE__, lineno::Int=0, file::Union{Nothing,Symbol}=nothing, + isinferred::Bool=true, do_compile::Bool=true) - return ccall(:jl_new_opaque_closure_from_code_info, Any, (Any, Any, Any, Any, Any, Cint, Any, Cint, Cint, Any, Cint), - sig, rt_lb, rt_ub, mod, src, lineno, file, nargs, isva, env, do_compile) + return ccall(:jl_new_opaque_closure_from_code_info, Any, (Any, Any, Any, Any, Any, Cint, Any, Cint, Cint, Any, Cint, Cint), + sig, rt_lb, rt_ub, mod, src, lineno, file, nargs, isva, env, do_compile, isinferred) end diff --git a/base/reflection.jl b/base/reflection.jl index 31e9d6de3a70cf..a93acc84df2ebf 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1313,7 +1313,7 @@ uncompressed_ir(m::Method) = isdefined(m, :source) ? _uncompressed_ir(m, m.sourc isdefined(m, :generator) ? error("Method is @generated; try `code_lowered` instead.") : error("Code for this Method is not available.") _uncompressed_ir(m::Method, s::CodeInfo) = copy(s) -_uncompressed_ir(m::Method, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Ptr{Cvoid}, Any), m, C_NULL, s)::CodeInfo +_uncompressed_ir(m::Method, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Any), m, s)::CodeInfo _uncompressed_ir(ci::Core.CodeInstance, s::String) = ccall(:jl_uncompress_ir, Any, (Any, Any, Any), ci.def.def::Method, ci, s)::CodeInfo # for backwards compat const uncompressed_ast = uncompressed_ir @@ -1615,21 +1615,24 @@ function code_typed_by_type(@nospecialize(tt::Type); return asts end -function code_typed_opaque_closure(@nospecialize(oc::Core.OpaqueClosure); - debuginfo::Symbol=:default, _...) +function get_oc_code_rt(@nospecialize(oc::Core.OpaqueClosure)) ccall(:jl_is_in_pure_context, Bool, ()) && error("code reflection cannot be used from generated functions") m = oc.source if isa(m, Method) code = _uncompressed_ir(m, m.source) - debuginfo === :none && remove_linenums!(code) - # intersect the declared return type and the inferred return type (if available) - rt = typeintersect(code.rettype, typeof(oc).parameters[2]) - return Any[code => rt] + return code => typeof(oc).parameters[2] else error("encountered invalid Core.OpaqueClosure object") end end +function code_typed_opaque_closure(@nospecialize(oc::Core.OpaqueClosure); + debuginfo::Symbol=:default, _...) + (code, rt) = get_oc_code_rt(oc) + debuginfo === :none && remove_linenums!(code) + return Any[code=>rt] +end + """ code_ircode(f, [types]) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 7b51579ce430fa..19afd8ebf0ca77 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -301,21 +301,14 @@ static void jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance if ((jl_value_t*)*src_out == jl_nothing) *src_out = NULL; if (*src_out && jl_is_method(def)) - *src_out = jl_uncompress_ir(def, codeinst, (jl_value_t*)*src_out); + *src_out = jl_uncompress_ir(def, (jl_value_t*)*src_out); } if (*src_out == NULL || !jl_is_code_info(*src_out)) { if (cgparams.lookup != jl_rettype_inferred_addr) { jl_error("Refusing to automatically run type inference with custom cache lookup."); } else { - *src_out = jl_type_infer(mi, world, 0); - if (*src_out) { - codeinst = jl_get_codeinst_for_src(mi, *src_out); - if ((*src_out)->inferred) { - jl_value_t *null = nullptr; - jl_atomic_cmpswap_relaxed(&codeinst->inferred, &null, jl_nothing); - } - } + *ci_out = jl_type_infer(mi, world, 0, SOURCE_MODE_ABI); } } *ci_out = codeinst; @@ -374,11 +367,11 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm size_t compile_for[] = { jl_typeinf_world, _world }; for (int worlds = 0; worlds < 2; worlds++) { JL_TIMING(NATIVE_AOT, NATIVE_Codegen); - params.world = compile_for[worlds]; - if (!params.world) + params.min_world = params.max_world = compile_for[worlds]; + if (!params.min_world) continue; // Don't emit methods for the typeinf_world with extern policy - if (policy != CompilationPolicy::Default && params.world == jl_typeinf_world) + if (policy != CompilationPolicy::Default && params.min_world == jl_typeinf_world) continue; size_t i, l; for (i = 0, l = jl_array_nrows(methods); i < l; i++) { @@ -395,10 +388,10 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm // if this method is generally visible to the current compilation world, // and this is either the primary world, or not applicable in the primary world // then we want to compile and emit this - if (jl_atomic_load_relaxed(&mi->def.method->primary_world) <= params.world && params.world <= jl_atomic_load_relaxed(&mi->def.method->deleted_world)) { + if (jl_atomic_load_relaxed(&mi->def.method->primary_world) <= params.min_world && params.max_world <= jl_atomic_load_relaxed(&mi->def.method->deleted_world)) { // find and prepare the source code to compile jl_code_instance_t *codeinst = NULL; - jl_ci_cache_lookup(*cgparams, mi, params.world, &codeinst, &src); + jl_ci_cache_lookup(*cgparams, mi, params.min_world, &codeinst, &src); if (src && !params.compiled_functions.count(codeinst)) { // now add it to our compilation results JL_GC_PROMISE_ROOTED(codeinst->rettype); @@ -1951,13 +1944,10 @@ extern "C" JL_DLLEXPORT_CODEGEN jl_code_info_t *jl_gdbdumpcode(jl_method_instanc src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) { JL_GC_PUSH2(&codeinst, &src); - src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); + src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); JL_GC_POP(); } } - if (!src || (jl_value_t*)src == jl_nothing) { - src = jl_type_infer(mi, world, 0); - } return src; } @@ -1970,7 +1960,7 @@ extern "C" JL_DLLEXPORT_CODEGEN void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, size_t world, char getwrapper, char optimize, const jl_cgparams_t params) { if (jl_is_method(mi->def.method) && mi->def.method->source == NULL && - mi->def.method->generator == NULL) { + mi->def.method->generator == NULL && !mi->def.method->is_for_opaque_closure) { // not a generic function dump->F = NULL; return; @@ -1981,34 +1971,23 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz jl_code_info_t *src = NULL; jl_code_instance_t *codeinst = NULL; JL_GC_PUSH3(&src, &jlrettype, &codeinst); - if (jl_is_method(mi->def.method) && mi->def.method->source != NULL && mi->def.method->source != jl_nothing && jl_ir_flag_inferred(mi->def.method->source)) { - // uninferred opaque closure - src = (jl_code_info_t*)mi->def.method->source; - if (src && !jl_is_code_info(src)) - src = jl_uncompress_ir(mi->def.method, NULL, (jl_value_t*)src); + jl_value_t *ci = params.lookup(mi, world, world); + if (ci && ci != jl_nothing) { + codeinst = (jl_code_instance_t*)ci; + src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); } - else { - jl_value_t *ci = params.lookup(mi, world, world); - if (ci != jl_nothing) { - codeinst = (jl_code_instance_t*)ci; + if (!src || (jl_value_t*)src == jl_nothing) { + codeinst = jl_type_infer(mi, world, 0, SOURCE_MODE_FORCE_SOURCE); + if (codeinst) { src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); - if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) - src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); - jlrettype = codeinst->rettype; - codeinst = NULL; // not needed outside of this branch - } - if (!src || (jl_value_t*)src == jl_nothing) { - src = jl_type_infer(mi, world, 0); - if (src) - jlrettype = src->rettype; - else if (jl_is_method(mi->def.method)) { - src = mi->def.method->generator ? jl_code_for_staged(mi, world) : (jl_code_info_t*)mi->def.method->source; - if (src && (jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) - src = jl_uncompress_ir(mi->def.method, NULL, (jl_value_t*)src); - } - // TODO: use mi->uninferred } } + if (src) { + if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) + src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); + jlrettype = codeinst->rettype; + } + codeinst = NULL; // not needed outside of this branch // emit this function into a new llvm module if (src && jl_is_code_info(src)) { @@ -2023,7 +2002,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, siz return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple())); }); jl_codegen_params_t output(*ctx, std::move(target_info.first), std::move(target_info.second)); - output.world = world; + output.min_world = output.max_world = world; output.params = ¶ms; output.imaging_mode = imaging_default(); // This would be nice, but currently it causes some assembly regressions that make printed output diff --git a/src/ast.c b/src/ast.c index 8816c905ee1011..bd3460cbe7fd28 100644 --- a/src/ast.c +++ b/src/ast.c @@ -945,11 +945,6 @@ JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr) new_ci->ssaflags = jl_array_copy(new_ci->ssaflags); jl_gc_wb(new_ci, new_ci->ssaflags); - if (new_ci->edges != jl_nothing) { - new_ci->edges = (jl_value_t*)jl_array_copy((jl_array_t*)new_ci->edges); - jl_gc_wb(new_ci, new_ci->edges); - } - if (jl_is_array(new_ci->ssavaluetypes)) { new_ci->ssavaluetypes = (jl_value_t*)jl_array_copy((jl_array_t*)new_ci->ssavaluetypes); jl_gc_wb(new_ci, new_ci->ssavaluetypes); diff --git a/src/codegen-stubs.c b/src/codegen-stubs.c index 26b8226b6e4be1..dda9bbee184a08 100644 --- a/src/codegen-stubs.c +++ b/src/codegen-stubs.c @@ -38,17 +38,16 @@ JL_DLLEXPORT void jl_register_fptrs_fallback(uint64_t image_base, const struct _ (void)image_base; (void)fptrs; (void)linfos; (void)n; } -JL_DLLEXPORT jl_code_instance_t *jl_generate_fptr_fallback(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) -{ - return NULL; -} - JL_DLLEXPORT void jl_generate_fptr_for_unspecialized_fallback(jl_code_instance_t *unspec) { jl_atomic_store_release(&unspec->invoke, &jl_fptr_interpret_call); } -JL_DLLEXPORT void jl_generate_fptr_for_oc_wrapper_fallback(jl_code_instance_t *unspec) UNAVAILABLE +JL_DLLEXPORT void jl_compile_codeinst_fallback(jl_code_instance_t *unspec) +{ + // Do nothing. The caller will notice that we failed to provide a an ->invoke and trigger + // appropriate fallbacks. +} JL_DLLEXPORT uint32_t jl_get_LLVM_VERSION_fallback(void) { diff --git a/src/codegen.cpp b/src/codegen.cpp index c91e70b5e508ea..ceb2915a747dab 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1857,7 +1857,8 @@ class jl_codectx_t { jl_value_t *rettype = NULL; jl_code_info_t *source = NULL; jl_array_t *code = NULL; - size_t world = 0; + size_t min_world = 0; + size_t max_world = -1; const char *name = NULL; StringRef file{}; ssize_t *line = NULL; @@ -1884,7 +1885,8 @@ class jl_codectx_t { : builder(llvmctx), emission_context(params), call_targets(), - world(params.world), + min_world(params.min_world), + max_world(params.max_world), use_cache(params.cache), external_linkage(params.external_linkage), params(params.params) { } @@ -4665,7 +4667,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, const } } else { - jl_value_t *ci = ctx.params->lookup(mi, ctx.world, ctx.world); // TODO: need to use the right pair world here + jl_value_t *ci = ctx.params->lookup(mi, ctx.min_world, ctx.max_world); if (ci != jl_nothing) { jl_code_instance_t *codeinst = (jl_code_instance_t*)ci; auto invoke = jl_atomic_load_acquire(&codeinst->invoke); @@ -5666,7 +5668,7 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met sigtype = jl_apply_tuple_type_v(jl_svec_data(sig_args), nsig); jl_method_instance_t *mi = jl_specializations_get_linfo(closure_method, sigtype, jl_emptysvec); - jl_code_instance_t *ci = (jl_code_instance_t*)jl_rettype_inferred_addr(mi, ctx.world, ctx.world); + jl_code_instance_t *ci = (jl_code_instance_t*)jl_rettype_inferred_addr(mi, ctx.min_world, ctx.max_world); if (ci == NULL || (jl_value_t*)ci == jl_nothing) { JL_GC_POP(); @@ -5682,7 +5684,7 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met if (it == ctx.emission_context.compiled_functions.end()) { ++EmittedOpaqueClosureFunctions; - jl_code_info_t *ir = jl_uncompress_ir(closure_method, ci, (jl_value_t*)inferred); + jl_code_info_t *ir = jl_uncompress_ir(closure_method, (jl_value_t*)inferred); JL_GC_PUSH1(&ir); // TODO: Emit this inline and outline it late using LLVM's coroutine support. orc::ThreadSafeModule closure_m = jl_create_ts_module( @@ -6478,7 +6480,7 @@ static Function* gen_cfun_wrapper( jl_codectx_t ctx(M->getContext(), params); ctx.f = cw; - ctx.world = world; + ctx.min_world = ctx.max_world = world; ctx.name = name; ctx.funcName = name; @@ -7156,7 +7158,7 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlret ctx.f = w; ctx.linfo = lam; ctx.rettype = jlretty; - ctx.world = 0; + ctx.min_world = ctx.max_world = 0; BasicBlock *b0 = BasicBlock::Create(ctx.builder.getContext(), "top", w); ctx.builder.SetInsertPoint(b0); @@ -7623,8 +7625,6 @@ static jl_llvm_functions_t bool specsig, needsparams; std::tie(specsig, needsparams) = uses_specsig(lam, jlrettype, params.params->prefer_specsig); - if (!src->inferred) - specsig = false; // step 3. some variable analysis size_t i; @@ -7659,7 +7659,7 @@ static jl_llvm_functions_t jl_varinfo_t &varinfo = ctx.slots[i]; uint8_t flags = jl_array_uint8_ref(src->slotflags, i); varinfo.isSA = (jl_vinfo_sa(flags) != 0) || varinfo.isArgument; - varinfo.usedUndef = (jl_vinfo_usedundef(flags) != 0) || (!varinfo.isArgument && !src->inferred); + varinfo.usedUndef = (jl_vinfo_usedundef(flags) != 0) || !varinfo.isArgument; if (!varinfo.isArgument) { varinfo.value = mark_julia_type(ctx, (Value*)NULL, false, (jl_value_t*)jl_any_type); } @@ -9236,7 +9236,7 @@ jl_llvm_functions_t jl_emit_codeinst( return jl_emit_oc_wrapper(m, params, codeinst->def, codeinst->rettype); } if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def)) - src = jl_uncompress_ir(def, codeinst, (jl_value_t*)src); + src = jl_uncompress_ir(def, (jl_value_t*)src); if (!src || !jl_is_code_info(src)) { JL_GC_POP(); m = orc::ThreadSafeModule(); @@ -9264,7 +9264,7 @@ jl_llvm_functions_t jl_emit_codeinst( jl_add_code_in_flight(f, codeinst, DL); } - if (params.world) {// don't alter `inferred` when the code is not directly being used + if (params.min_world) {// don't alter `inferred` when the code is not directly being used jl_value_t *inferred = jl_atomic_load_relaxed(&codeinst->inferred); // don't change inferred state if (inferred) { @@ -9289,6 +9289,7 @@ jl_llvm_functions_t jl_emit_codeinst( // because we already emitted LLVM code from it and the native // Julia-level optimization will never need to see it else if (jl_is_method(def) && // don't delete toplevel code + def->source != NULL && // don't delete code from optimized opaque closures that can't be reconstructed inferred != jl_nothing && // and there is something to delete (test this before calling jl_ir_inlining_cost) !effects_foldable(codeinst->ipo_purity_bits) && // don't delete code we may want for irinterp ((jl_ir_inlining_cost(inferred) == UINT16_MAX) || // don't delete inlineable code @@ -9350,17 +9351,9 @@ void jl_compile_workqueue( // method body. See #34993 if (policy != CompilationPolicy::Default && jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) { - src = jl_type_infer(codeinst->def, jl_atomic_load_acquire(&jl_world_counter), 0); - if (src) { - orc::ThreadSafeModule result_m = - jl_create_ts_module(name_from_method_instance(codeinst->def), - params.tsctx, params.DL, params.TargetTriple); - auto decls = jl_emit_code(result_m, codeinst->def, src, src->rettype, params); - if (result_m) - it = params.compiled_functions.insert(std::make_pair(codeinst, std::make_pair(std::move(result_m), std::move(decls)))).first; - } + codeinst = jl_type_infer(codeinst->def, jl_atomic_load_acquire(&jl_world_counter), 0, SOURCE_MODE_FORCE_SOURCE); } - else { + if (codeinst) { orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); diff --git a/src/gf.c b/src/gf.c index efe624f518a2b6..55e6523f4ac0c2 100644 --- a/src/gf.c +++ b/src/gf.c @@ -135,7 +135,7 @@ static int speccache_eq(size_t idx, const void *ty, jl_value_t *data, uint_t hv) // get or create the MethodInstance for a specialization static jl_method_instance_t *jl_specializations_get_linfo_(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams, jl_method_instance_t *mi_insert) { - if (m->sig == (jl_value_t*)jl_anytuple_type && jl_atomic_load_relaxed(&m->unspecialized) != NULL && m != jl_opaque_closure_method) + if (m->sig == (jl_value_t*)jl_anytuple_type && jl_atomic_load_relaxed(&m->unspecialized) != NULL && m != jl_opaque_closure_method && !m->is_for_opaque_closure) return jl_atomic_load_relaxed(&m->unspecialized); // handle builtin methods jl_value_t *ut = jl_is_unionall(type) ? jl_unwrap_unionall(type) : type; JL_TYPECHK(specializations, datatype, ut); @@ -336,7 +336,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a // returns the inferred source, and may cache the result in mi // if successful, also updates the mi argument to describe the validity of this src // if inference doesn't occur (or can't finish), returns NULL instead -jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) +jl_code_instance_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force, uint8_t source_mode) { if (jl_typeinf_func == NULL) return NULL; @@ -350,16 +350,17 @@ jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) if ((ct->reentrant_timing & 0b1111) >= 0b110) return NULL; - jl_code_info_t *src = NULL; + jl_code_instance_t *ci = NULL; #ifdef ENABLE_INFERENCE if (mi->inInference && !force) return NULL; JL_TIMING(INFERENCE, INFERENCE); jl_value_t **fargs; - JL_GC_PUSHARGS(fargs, 3); + JL_GC_PUSHARGS(fargs, 4); fargs[0] = (jl_value_t*)jl_typeinf_func; fargs[1] = (jl_value_t*)mi; fargs[2] = jl_box_ulong(world); + fargs[3] = jl_box_uint8(source_mode); jl_timing_show_method_instance(mi, JL_TIMING_DEFAULT_BLOCK); #ifdef TRACE_INFERENCE @@ -385,7 +386,7 @@ jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) // allocate another bit for the counter. ct->reentrant_timing += 0b10; JL_TRY { - src = (jl_code_info_t*)jl_apply(fargs, 3); + ci = (jl_code_instance_t*)jl_apply(fargs, 4); } JL_CATCH { jl_value_t *e = jl_current_exception(); @@ -402,7 +403,7 @@ jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) jl_printf((JL_STREAM*)STDERR_FILENO, "\n"); jlbacktrace(); // written to STDERR_FILENO } - src = NULL; + ci = NULL; #ifndef JL_NDEBUG abort(); #endif @@ -415,13 +416,13 @@ jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) #endif errno = last_errno; - if (src && !jl_is_code_info(src)) { - src = NULL; + if (ci && !jl_is_code_instance(ci)) { + ci = NULL; } JL_GC_POP(); #endif - return src; + return ci; } JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs) @@ -434,21 +435,6 @@ JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs) return ret; } -JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT -{ - jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); - while (codeinst) { - if (jl_atomic_load_relaxed(&codeinst->min_world) <= min_world && max_world <= jl_atomic_load_relaxed(&codeinst->max_world)) { - jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); - if (code && (code == jl_nothing || jl_ir_flag_inferred(code))) - return (jl_value_t*)codeinst; - } - codeinst = jl_atomic_load_relaxed(&codeinst->next); - } - return (jl_value_t*)jl_nothing; -} -JL_DLLEXPORT jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT = jl_rettype_inferred; - JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, @@ -470,16 +456,6 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( return codeinst; } -JL_DLLEXPORT jl_code_instance_t *jl_get_codeinst_for_src( - jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_code_info_t *src) -{ - // TODO: copy backedges from src to mi - size_t max_world = src->max_world; - if (max_world >= jl_atomic_load_acquire(&jl_world_counter)) - max_world = ~(size_t)0; - return jl_get_method_inferred(mi, src->rettype, src->min_world, max_world); -} - JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *exctype, jl_value_t *inferred_const, jl_value_t *inferred, @@ -676,7 +652,7 @@ JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f) for (i = 0, l = jl_array_nrows(unspec); i < l; i++) { jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i); if (jl_rettype_inferred(mi, world, world) == jl_nothing) - jl_type_infer(mi, world, 1); + jl_type_infer(mi, world, 1, SOURCE_MODE_NOT_REQUIRED); } JL_GC_POP(); } @@ -2380,6 +2356,21 @@ jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT) return unspec; } +JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +{ + jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); + while (codeinst) { + if (jl_atomic_load_relaxed(&codeinst->min_world) <= min_world && max_world <= jl_atomic_load_relaxed(&codeinst->max_world)) { + jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); + if (code) + return (jl_value_t*)codeinst; + } + codeinst = jl_atomic_load_relaxed(&codeinst->next); + } + return (jl_value_t*)jl_nothing; +} +JL_DLLEXPORT jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT = jl_rettype_inferred; + jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi, size_t world) { @@ -2394,6 +2385,20 @@ jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi, size_t world) return NULL; } +jl_code_instance_t *jl_method_inferred_with_source(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) +{ + jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); + while (codeinst) { + if (jl_atomic_load_relaxed(&codeinst->min_world) <= world && world <= jl_atomic_load_relaxed(&codeinst->max_world)) { + jl_value_t *code = jl_atomic_load_relaxed(&codeinst->inferred); + if (code && code != jl_nothing) + return codeinst; + } + codeinst = jl_atomic_load_relaxed(&codeinst->next); + } + return NULL; +} + jl_mutex_t precomp_statement_out_lock; static void record_precompile_statement(jl_method_instance_t *mi) @@ -2542,7 +2547,41 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t } } - codeinst = jl_generate_fptr(mi, world); + // Ok, compilation is enabled. We'll need to try to compile something (probably). + // Try to find a codeinst we have already inferred (e.g. while we were compiling + // something else). + codeinst = jl_method_inferred_with_source(mi, world); + + // Everything from here on our is considered (user facing) compile time + uint64_t start = jl_typeinf_timing_begin(); + int is_recompile = jl_atomic_load_relaxed(&mi->cache) != NULL; + + // This codeinst hasn't been previously inferred do that now + if (!codeinst) { + // Don't bother inferring toplevel thunks or macros - the performance cost of inference is likely + // to significantly exceed the actual runtime. + int should_skip_inference = !jl_is_method(mi->def.method) || jl_symbol_name(mi->def.method->name)[0] == '@'; + + if (!should_skip_inference) { + codeinst = jl_type_infer(mi, world, 0, SOURCE_MODE_ABI); + } + } + + if (codeinst) { + // Something went very wrong. Bail out. + if (jl_atomic_load_acquire(&codeinst->invoke) != NULL) { + jl_typeinf_timing_end(start, is_recompile); + // Already compiled - e.g. constabi, or compiled by a different thread while we were waiting. + return codeinst; + } + + jl_compile_codeinst(codeinst); + + if (jl_atomic_load_relaxed(&codeinst->invoke) == NULL) { + // Something went wrong. Bail to the fallback path. + codeinst = NULL; + } + } if (!codeinst) { jl_method_instance_t *unspec = jl_get_unspecialized_from_mi(mi); jl_code_instance_t *ucache = jl_get_method_inferred(unspec, (jl_value_t*)jl_any_type, 1, ~(size_t)0); @@ -2560,6 +2599,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t if (ucache_invoke != jl_fptr_sparam && ucache_invoke != jl_fptr_interpret_call) { // only these care about the exact specTypes, otherwise we can use it directly + jl_typeinf_timing_end(start, is_recompile); return ucache; } codeinst = jl_new_codeinst(mi, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, @@ -2583,6 +2623,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t record_precompile_statement(mi); } jl_atomic_store_relaxed(&codeinst->precompile, 1); + jl_typeinf_timing_end(start, is_recompile); return codeinst; } @@ -2814,7 +2855,7 @@ static void _generate_from_hint(jl_method_instance_t *mi, size_t world) { jl_value_t *codeinst = jl_rettype_inferred(mi, world, world); if (codeinst == jl_nothing) { - (void)jl_type_infer(mi, world, 1); + (void)jl_type_infer(mi, world, 1, SOURCE_MODE_NOT_REQUIRED); codeinst = jl_rettype_inferred(mi, world, world); } if (codeinst != jl_nothing) { @@ -2855,10 +2896,10 @@ JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tuplet JL_GC_POP(); jl_atomic_store_relaxed(&mi2->precompiled, 1); if (jl_rettype_inferred(mi2, world, world) == jl_nothing) - (void)jl_type_infer(mi2, world, 1); + (void)jl_type_infer(mi2, world, 1, SOURCE_MODE_NOT_REQUIRED); if (jl_typeinf_func && jl_atomic_load_relaxed(&mi->def.method->primary_world) <= tworld) { if (jl_rettype_inferred(mi2, tworld, tworld) == jl_nothing) - (void)jl_type_infer(mi2, tworld, 1); + (void)jl_type_infer(mi2, tworld, 1, SOURCE_MODE_NOT_REQUIRED); } } } @@ -4021,7 +4062,7 @@ JL_DLLEXPORT uint64_t jl_typeinf_timing_begin(void) return jl_hrtime(); } -JL_DLLEXPORT void jl_typeinf_timing_end(uint64_t start) +JL_DLLEXPORT void jl_typeinf_timing_end(uint64_t start, int is_recompile) { if (!start) return; @@ -4030,6 +4071,9 @@ JL_DLLEXPORT void jl_typeinf_timing_end(uint64_t start) if (jl_atomic_load_relaxed(&jl_measure_compile_time_enabled)) { uint64_t inftime = jl_hrtime() - start; jl_atomic_fetch_add_relaxed(&jl_cumulative_compile_time, inftime); + if (is_recompile) { + jl_atomic_fetch_add_relaxed(&jl_cumulative_recompile_time, inftime); + } } } diff --git a/src/interpreter.c b/src/interpreter.c index 5102d1417c9399..ffc8c6d8c12ea0 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -696,7 +696,7 @@ jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *mi, size_t world) } if (src && (jl_value_t*)src != jl_nothing) { JL_GC_PUSH1(&src); - src = jl_uncompress_ir(mi->def.method, NULL, (jl_value_t*)src); + src = jl_uncompress_ir(mi->def.method, (jl_value_t*)src); jl_atomic_store_release(&mi->uninferred, (jl_value_t*)src); jl_gc_wb(mi, src); JL_GC_POP(); @@ -758,7 +758,7 @@ JL_DLLEXPORT const jl_callptr_t jl_fptr_interpret_call_addr = &jl_fptr_interpret jl_value_t *jl_interpret_opaque_closure(jl_opaque_closure_t *oc, jl_value_t **args, size_t nargs) { jl_method_t *source = oc->source; - jl_code_info_t *code = jl_uncompress_ir(source, NULL, (jl_value_t*)source->source); + jl_code_info_t *code = jl_uncompress_ir(source, (jl_value_t*)source->source); interpreter_state *s; unsigned nroots = jl_source_nslots(code) + jl_source_nssavalues(code) + 2; jl_task_t *ct = jl_current_task; diff --git a/src/ircode.c b/src/ircode.c index 09f039db76424b..8765bd43a2da47 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -457,11 +457,10 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) } } -static jl_code_info_flags_t code_info_flags(uint8_t inferred, uint8_t propagate_inbounds, uint8_t has_fcall, +static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t has_fcall, uint8_t nospecializeinfer, uint8_t inlining, uint8_t constprop) { jl_code_info_flags_t flags; - flags.bits.inferred = inferred; flags.bits.propagate_inbounds = propagate_inbounds; flags.bits.has_fcall = has_fcall; flags.bits.nospecializeinfer = nospecializeinfer; @@ -824,7 +823,7 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) 1 }; - jl_code_info_flags_t flags = code_info_flags(code->inferred, code->propagate_inbounds, code->has_fcall, + jl_code_info_flags_t flags = code_info_flags(code->propagate_inbounds, code->has_fcall, code->nospecializeinfer, code->inlining, code->constprop); write_uint8(s.s, flags.packed); static_assert(sizeof(flags.packed) == IR_DATASIZE_FLAGS, "ir_datasize_flags is mismatched with the actual size"); @@ -899,7 +898,7 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) return v; } -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t *metadata, jl_string_t *data) +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_string_t *data) { if (jl_is_code_info(data)) return (jl_code_info_t*)data; @@ -925,7 +924,6 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t flags.packed = read_uint8(s.s); code->inlining = flags.bits.inlining; code->constprop = flags.bits.constprop; - code->inferred = flags.bits.inferred; code->propagate_inbounds = flags.bits.propagate_inbounds; code->has_fcall = flags.bits.has_fcall; code->nospecializeinfer = flags.bits.nospecializeinfer; @@ -975,27 +973,10 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t jl_gc_enable(en); JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); - if (metadata) { - code->min_world = jl_atomic_load_relaxed(&metadata->min_world); - // n.b. this should perhaps be capped to jl_world_counter max here, since we don't have backedges on it after return - code->max_world = jl_atomic_load_relaxed(&metadata->max_world); - code->rettype = metadata->rettype; - code->parent = metadata->def; - } return code; } -JL_DLLEXPORT uint8_t jl_ir_flag_inferred(jl_string_t *data) -{ - if (jl_is_code_info(data)) - return ((jl_code_info_t*)data)->inferred; - assert(jl_is_string(data)); - jl_code_info_flags_t flags; - flags.packed = jl_string_data(data)[ir_offset_flags]; - return flags.bits.inferred; -} - JL_DLLEXPORT uint8_t jl_ir_flag_inlining(jl_string_t *data) { if (jl_is_code_info(data)) diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index cb6267866eac17..cd3188da7baa5d 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -184,9 +184,7 @@ static orc::ThreadSafeModule jl_get_globals_module(orc::ThreadSafeContext &ctx, static jl_callptr_t _jl_compile_codeinst( jl_code_instance_t *codeinst, jl_code_info_t *src, - size_t world, - orc::ThreadSafeContext context, - bool is_recompile) + orc::ThreadSafeContext context) { // caller must hold codegen_lock // and have disabled finalizers @@ -196,23 +194,14 @@ static jl_callptr_t _jl_compile_codeinst( start_time = jl_hrtime(); assert(jl_is_code_instance(codeinst)); -#ifndef NDEBUG - size_t max_world = jl_atomic_load_relaxed(&codeinst->max_world); - assert(jl_atomic_load_relaxed(&codeinst->min_world) <= world && (max_world >= world || max_world == 0) && - "invalid world for method-instance"); -#endif JL_TIMING(CODEINST_COMPILE, CODEINST_COMPILE); -#ifdef USE_TRACY - if (is_recompile) { - TracyCZoneColor(JL_TIMING_DEFAULT_BLOCK->tracy_ctx, 0xFFA500); - } -#endif jl_callptr_t fptr = NULL; // emit the code in LLVM IR form jl_codegen_params_t params(std::move(context), jl_ExecutionEngine->getDataLayout(), jl_ExecutionEngine->getTargetTriple()); // Locks the context params.cache = true; - params.world = world; + params.min_world = codeinst->min_world; + params.max_world = codeinst->max_world; params.imaging_mode = imaging_default(); params.debug_level = jl_options.debug_level; { @@ -468,92 +457,18 @@ void jl_extern_c_impl(jl_value_t *declrt, jl_tupletype_t *sigt) jl_error("@ccallable was already defined for this method name"); } -// this compiles li and emits fptr extern "C" JL_DLLEXPORT_CODEGEN -jl_code_instance_t *jl_generate_fptr_impl(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) +void jl_compile_codeinst_impl(jl_code_instance_t *ci) { - auto ct = jl_current_task; - bool timed = (ct->reentrant_timing & 1) == 0; - if (timed) - ct->reentrant_timing |= 1; - uint64_t compiler_start_time = 0; - uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed(&jl_measure_compile_time_enabled); - bool is_recompile = false; - if (measure_compile_time_enabled) - compiler_start_time = jl_hrtime(); - // if we don't have any decls already, try to generate it now - jl_code_info_t *src = NULL; - jl_code_instance_t *codeinst = NULL; - JL_GC_PUSH2(&src, &codeinst); - JL_LOCK(&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion - jl_value_t *ci = jl_rettype_inferred_addr(mi, world, world); - if (ci != jl_nothing) - codeinst = (jl_code_instance_t*)ci; - if (codeinst) { - src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); - if ((jl_value_t*)src == jl_nothing) - src = NULL; - else if (jl_is_method(mi->def.method)) - src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); - } - else { - // identify whether this is an invalidated method that is being recompiled - is_recompile = jl_atomic_load_relaxed(&mi->cache) != NULL; - } - if (src == NULL && jl_is_method(mi->def.method) && - jl_symbol_name(mi->def.method->name)[0] != '@') { - if (mi->def.method->source != jl_nothing) { - // If the caller didn't provide the source and IR is available, - // see if it is inferred, or try to infer it for ourself. - // (but don't bother with typeinf on macros or toplevel thunks) - src = jl_type_infer(mi, world, 0); - codeinst = nullptr; - } - } - jl_code_instance_t *compiled = jl_method_compiled(mi, world); - if (compiled) { - codeinst = compiled; - } - else if (src && jl_is_code_info(src)) { - if (!codeinst) { - codeinst = jl_get_codeinst_for_src(mi, src); - if (src->inferred) { - jl_value_t *null = nullptr; - jl_atomic_cmpswap_relaxed(&codeinst->inferred, &null, jl_nothing); - } - } - ++SpecFPtrCount; - _jl_compile_codeinst(codeinst, src, world, *jl_ExecutionEngine->getContext(), is_recompile); - if (jl_atomic_load_relaxed(&codeinst->invoke) == NULL) - codeinst = NULL; - } - else { - codeinst = NULL; - } - JL_UNLOCK(&jl_codegen_lock); - if (timed) { - if (measure_compile_time_enabled) { - uint64_t t_comp = jl_hrtime() - compiler_start_time; - if (is_recompile) { - jl_atomic_fetch_add_relaxed(&jl_cumulative_recompile_time, t_comp); - } - jl_atomic_fetch_add_relaxed(&jl_cumulative_compile_time, t_comp); - } - ct->reentrant_timing &= ~1ull; - } - JL_GC_POP(); - return codeinst; -} - -extern "C" JL_DLLEXPORT_CODEGEN -void jl_generate_fptr_for_oc_wrapper_impl(jl_code_instance_t *oc_wrap) -{ - if (jl_atomic_load_relaxed(&oc_wrap->invoke) != NULL) { + if (jl_atomic_load_relaxed(&ci->invoke) != NULL) { return; } JL_LOCK(&jl_codegen_lock); - if (jl_atomic_load_relaxed(&oc_wrap->invoke) == NULL) { - _jl_compile_codeinst(oc_wrap, NULL, 1, *jl_ExecutionEngine->getContext(), 0); + if (jl_atomic_load_relaxed(&ci->invoke) == NULL) { + ++SpecFPtrCount; + uint64_t start = jl_typeinf_timing_begin(); + _jl_compile_codeinst(ci, NULL, *jl_ExecutionEngine->getContext()); + jl_typeinf_timing_end(start, 0); } JL_UNLOCK(&jl_codegen_lock); // Might GC } @@ -580,7 +495,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) if (jl_is_method(def)) { src = (jl_code_info_t*)def->source; if (src && (jl_value_t*)src != jl_nothing) - src = jl_uncompress_ir(def, NULL, (jl_value_t*)src); + src = jl_uncompress_ir(def, (jl_value_t*)src); } else { src = (jl_code_info_t*)jl_atomic_load_relaxed(&unspec->def->uninferred); @@ -589,7 +504,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) if (src) { assert(jl_is_code_info(src)); ++UnspecFPtrCount; - _jl_compile_codeinst(unspec, src, jl_atomic_load_relaxed(&unspec->min_world), *jl_ExecutionEngine->getContext(), 0); + _jl_compile_codeinst(unspec, src, *jl_ExecutionEngine->getContext()); } jl_callptr_t null = nullptr; // if we hit a codegen bug (or ran into a broken generated function or llvmcall), fall back to the interpreter as a last resort @@ -613,7 +528,7 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world, char emit_mc, char getwrapper, const char* asm_variant, const char *debuginfo, char binary) { // printing via disassembly - jl_code_instance_t *codeinst = jl_generate_fptr(mi, world); + jl_code_instance_t *codeinst = jl_compile_method_internal(mi, world); if (codeinst) { uintptr_t fptr = (uintptr_t)jl_atomic_load_acquire(&codeinst->invoke); if (getwrapper) @@ -634,26 +549,11 @@ jl_value_t *jl_dump_method_asm_impl(jl_method_instance_t *mi, size_t world, JL_LOCK(&jl_codegen_lock); // also disables finalizers, to prevent any unexpected recursion specfptr = (uintptr_t)jl_atomic_load_relaxed(&codeinst->specptr.fptr); if (specfptr == 0) { - jl_code_info_t *src = jl_type_infer(mi, world, 0); - JL_GC_PUSH1(&src); - jl_method_t *def = mi->def.method; - if (jl_is_method(def)) { - if (!src) { - // TODO: jl_code_for_staged can throw - src = def->generator ? jl_code_for_staged(mi, world) : (jl_code_info_t*)def->source; - } - if (src && (jl_value_t*)src != jl_nothing) - src = jl_uncompress_ir(mi->def.method, codeinst, (jl_value_t*)src); - } - fptr = (uintptr_t)jl_atomic_load_acquire(&codeinst->invoke); - specfptr = (uintptr_t)jl_atomic_load_relaxed(&codeinst->specptr.fptr); - if (src && jl_is_code_info(src)) { - if (fptr == (uintptr_t)jl_fptr_const_return_addr && specfptr == 0) { - fptr = (uintptr_t)_jl_compile_codeinst(codeinst, src, world, *jl_ExecutionEngine->getContext(), 0); - (void)fptr; // silence unused variable warning - specfptr = (uintptr_t)jl_atomic_load_relaxed(&codeinst->specptr.fptr); - } - } + jl_code_instance_t *forced_ci = jl_type_infer(mi, world, 0, SOURCE_MODE_FORCE_SOURCE); + JL_GC_PUSH1(&forced_ci); + // Force compile of this codeinst even though it already has an ->invoke + _jl_compile_codeinst(forced_ci, NULL, *jl_ExecutionEngine->getContext()); + specfptr = (uintptr_t)jl_atomic_load_relaxed(&forced_ci->specptr.fptr); JL_GC_POP(); } JL_UNLOCK(&jl_codegen_lock); diff --git a/src/jitlayers.h b/src/jitlayers.h index 3da4c28af66011..8b4baebc6dc02a 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -247,7 +247,8 @@ struct jl_codegen_params_t { std::unique_ptr _shared_module; inline Module &shared_module(); // inputs - size_t world = 0; + size_t min_world = 0; + size_t max_world = -1; const jl_cgparams_t *params = &jl_default_cgparams; bool cache = false; bool external_linkage = false; diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc index f9587d33518a2b..99502bb837b6ac 100644 --- a/src/jl_exported_funcs.inc +++ b/src/jl_exported_funcs.inc @@ -271,7 +271,6 @@ XX(jl_ios_buffer_n) \ XX(jl_ios_fd) \ XX(jl_ios_get_nbyte_int) \ - XX(jl_ir_flag_inferred) \ XX(jl_ir_flag_has_fcall) \ XX(jl_ir_flag_inlining) \ XX(jl_ir_inlining_cost) \ @@ -541,9 +540,8 @@ YY(jl_init_codegen) \ YY(jl_getFunctionInfo) \ YY(jl_register_fptrs) \ - YY(jl_generate_fptr) \ YY(jl_generate_fptr_for_unspecialized) \ - YY(jl_generate_fptr_for_oc_wrapper) \ + YY(jl_compile_codeinst) \ YY(jl_compile_extern_c) \ YY(jl_teardown_codegen) \ YY(jl_jit_total_bytes) \ diff --git a/src/jltypes.c b/src/jltypes.c index 84e90303affaa4..a69c7356222c1e 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3141,22 +3141,19 @@ void jl_init_types(void) JL_GC_DISABLED jl_code_info_type = jl_new_datatype(jl_symbol("CodeInfo"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(22, + jl_perm_symsvec(19, "code", "codelocs", "ssavaluetypes", "ssaflags", - "method_for_inference_limit_heuristics", "linetable", "slotnames", "slotflags", "slottypes", - "rettype", - "parent", + "method_for_inference_limit_heuristics", "edges", "min_world", "max_world", - "inferred", "propagate_inbounds", "has_fcall", "nospecializeinfer", @@ -3164,31 +3161,28 @@ void jl_init_types(void) JL_GC_DISABLED "constprop", "purity", "inlining_cost"), - jl_svec(22, + jl_svec(19, jl_array_any_type, jl_array_int32_type, jl_any_type, jl_array_uint32_type, jl_any_type, - jl_any_type, jl_array_symbol_type, jl_array_uint8_type, jl_any_type, jl_any_type, jl_any_type, - jl_any_type, jl_ulong_type, jl_ulong_type, jl_bool_type, jl_bool_type, jl_bool_type, - jl_bool_type, jl_uint8_type, jl_uint8_type, jl_uint16_type, jl_uint16_type), jl_emptysvec, - 0, 1, 22); + 0, 1, 19); jl_method_type = jl_new_datatype(jl_symbol("Method"), core, diff --git a/src/julia.h b/src/julia.h index 43df87fd452663..f2f4b737ca53cd 100644 --- a/src/julia.h +++ b/src/julia.h @@ -289,19 +289,21 @@ typedef struct _jl_code_info_t { // 1 << 11 = :inaccessiblemem_or_argmemonly // 1 << 12-19 = callsite effects overrides // miscellaneous data: - jl_value_t *method_for_inference_limit_heuristics; // optional method used during inference jl_value_t *linetable; // Table of locations [TODO: make this volatile like slotnames] jl_array_t *slotnames; // names of local variables jl_array_t *slotflags; // local var bit flags // the following are optional transient properties (not preserved by compression--as they typically get stored elsewhere): jl_value_t *slottypes; // inferred types of slots - jl_value_t *rettype; - jl_method_instance_t *parent; // context (optionally, if available, otherwise nothing) + + // These may be used by generated functions to further constrain the resulting inputs. + // They are not used by any other part of the system and may be moved elsewhere in the + // future. + jl_value_t *method_for_inference_limit_heuristics; // optional method used during inference jl_value_t *edges; // forward edges to method instances that must be invalidated size_t min_world; size_t max_world; + // various boolean properties: - uint8_t inferred; uint8_t propagate_inbounds; uint8_t has_fcall; uint8_t nospecializeinfer; @@ -422,7 +424,13 @@ typedef struct _jl_code_instance_t { jl_value_t *rettype; // return type for fptr jl_value_t *exctype; // thrown type for fptr jl_value_t *rettype_const; // inferred constant return value, or null - _Atomic(jl_value_t *) inferred; // inferred jl_code_info_t (may be compressed), or jl_nothing, or null + + // Inferred result. When part of the runtime cache, either + // - A jl_code_info_t (may be compressed) containing the inferred IR + // - jl_nothing, indicating that inference was completed, but the result was + // deleted to save space. + // - null, indicating that inference was not yet completed or did not succeed + _Atomic(jl_value_t *) inferred; //TODO: jl_array_t *edges; // stored information about edges from this object //TODO: uint8_t absolute_max; // whether true max world is unknown @@ -2122,8 +2130,7 @@ JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr JL_MAYBE_UNROOTED); // IR representation JL_DLLEXPORT jl_value_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code); -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t *metadata, jl_value_t *data); -JL_DLLEXPORT uint8_t jl_ir_flag_inferred(jl_value_t *data) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_value_t *data); JL_DLLEXPORT uint8_t jl_ir_flag_inlining(jl_value_t *data) JL_NOTSAFEPOINT; JL_DLLEXPORT uint8_t jl_ir_flag_has_fcall(jl_value_t *data) JL_NOTSAFEPOINT; JL_DLLEXPORT uint16_t jl_ir_inlining_cost(jl_value_t *data) JL_NOTSAFEPOINT; diff --git a/src/julia_internal.h b/src/julia_internal.h index 94b7e5609f06d7..88c906550611e4 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -269,6 +269,9 @@ static inline uint64_t cycleclock(void) JL_NOTSAFEPOINT #include "timing.h" +extern JL_DLLEXPORT uint64_t jl_typeinf_timing_begin(void); +extern JL_DLLEXPORT void jl_typeinf_timing_end(uint64_t start, int is_recompile); + // Global *atomic* integers controlling *process-wide* measurement of compilation time. extern JL_DLLEXPORT _Atomic(uint8_t) jl_measure_compile_time_enabled; extern JL_DLLEXPORT _Atomic(uint64_t) jl_cumulative_compile_time; @@ -624,7 +627,6 @@ STATIC_INLINE jl_value_t *undefref_check(jl_datatype_t *dt, jl_value_t *v) JL_NO // -- helper types -- // typedef struct { - uint8_t inferred:1; uint8_t propagate_inbounds:1; uint8_t has_fcall:1; uint8_t nospecializeinfer:1; @@ -639,7 +641,10 @@ typedef union { // -- functions -- // -JL_DLLEXPORT jl_code_info_t *jl_type_infer(jl_method_instance_t *li, size_t world, int force); +#define SOURCE_MODE_NOT_REQUIRED 0x0 +#define SOURCE_MODE_ABI 0x1 +#define SOURCE_MODE_FORCE_SOURCE 0x2 +JL_DLLEXPORT jl_code_instance_t *jl_type_infer(jl_method_instance_t *li, size_t world, int force, uint8_t source_mode); JL_DLLEXPORT jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *meth JL_PROPAGATES_ROOT, size_t world); JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, @@ -813,6 +818,8 @@ JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert) JL_GLOBA jl_opaque_closure_t *jl_new_opaque_closure(jl_tupletype_t *argt, jl_value_t *rt_lb, jl_value_t *rt_ub, jl_value_t *source, jl_value_t **env, size_t nenv, int do_compile); +jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name, + int nargs, jl_value_t *functionloc, jl_value_t *uninferred_source, int isva); JL_DLLEXPORT int jl_is_valid_oc_argtype(jl_tupletype_t *argt, jl_method_t *source); // Each tuple can exist in one of 4 Vararg states: @@ -991,6 +998,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp); JL_DLLEXPORT jl_value_t *jl_rettype_inferred(jl_method_instance_t *li JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); JL_DLLEXPORT jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_code_instance_t *jl_method_inferred_with_source(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt JL_PROPAGATES_ROOT, jl_value_t *type, size_t world); JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams); @@ -1691,9 +1699,8 @@ JL_DLLEXPORT uint32_t jl_crc32c(uint32_t crc, const char *buf, size_t len); #define IR_FLAG_INBOUNDS 0x01 -JL_DLLIMPORT jl_code_instance_t *jl_generate_fptr(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t world); JL_DLLIMPORT void jl_generate_fptr_for_unspecialized(jl_code_instance_t *unspec); -JL_DLLIMPORT void jl_generate_fptr_for_oc_wrapper(jl_code_instance_t *unspec); +JL_DLLIMPORT void jl_compile_codeinst(jl_code_instance_t *unspec); JL_DLLIMPORT int jl_compile_extern_c(LLVMOrcThreadSafeModuleRef llvmmod, void *params, void *sysimg, jl_value_t *declrt, jl_value_t *sigt); typedef struct { diff --git a/src/method.c b/src/method.c index 3d3cc4cb7ea4e3..0900eb6838d371 100644 --- a/src/method.c +++ b/src/method.c @@ -20,9 +20,6 @@ extern jl_value_t *jl_builtin_tuple; jl_methtable_t *jl_kwcall_mt; jl_method_t *jl_opaque_closure_method; -jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name, - int nargs, jl_value_t *functionloc, jl_code_info_t *ci, int isva); - static void check_c_types(const char *where, jl_value_t *rt, jl_value_t *at) { if (jl_is_svec(rt)) @@ -124,7 +121,7 @@ static jl_value_t *resolve_globals(jl_value_t *expr, jl_module_t *module, jl_sve } else if (!jl_is_long(oc_nargs)) { jl_type_error("opaque_closure_method", (jl_value_t*)jl_long_type, oc_nargs); } - jl_method_t *m = jl_make_opaque_closure_method(module, name, jl_unbox_long(oc_nargs), functionloc, (jl_code_info_t*)ci, isva); + jl_method_t *m = jl_make_opaque_closure_method(module, name, jl_unbox_long(oc_nargs), functionloc, ci, isva); return (jl_value_t*)m; } if (e->head == jl_cfunction_sym) { @@ -511,15 +508,12 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) src->slotflags = NULL; src->slotnames = NULL; src->slottypes = jl_nothing; - src->parent = (jl_method_instance_t*)jl_nothing; - src->rettype = (jl_value_t*)jl_any_type; src->min_world = 1; src->max_world = ~(size_t)0; - src->inferred = 0; + src->edges = jl_nothing; src->propagate_inbounds = 0; src->has_fcall = 0; src->nospecializeinfer = 0; - src->edges = jl_nothing; src->constprop = 0; src->inlining = 0; src->purity.bits = 0; @@ -910,7 +904,7 @@ void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_method_instance_t *ca // method definition ---------------------------------------------------------- jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name, - int nargs, jl_value_t *functionloc, jl_code_info_t *ci, int isva) + int nargs, jl_value_t *functionloc, jl_value_t *uninferred_source, int isva) { jl_method_t *m = jl_new_method_uninit(module); JL_GC_PUSH1(&m); @@ -929,7 +923,8 @@ jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name jl_value_t *file = jl_linenode_file(functionloc); m->file = jl_is_symbol(file) ? (jl_sym_t*)file : jl_empty_sym; m->line = jl_linenode_line(functionloc); - jl_method_set_source(m, ci); + if (jl_is_code_info(uninferred_source)) + jl_method_set_source(m, (jl_code_info_t*)uninferred_source); JL_GC_POP(); return m; } diff --git a/src/opaque_closure.c b/src/opaque_closure.c index 0b0d1052bd4495..2e10ce32850c48 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -107,7 +107,7 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t // OC wrapper methods are not world dependent ci = jl_get_method_inferred(mi_generic, selected_rt, 1, ~(size_t)0); if (!jl_atomic_load_acquire(&ci->invoke)) - jl_generate_fptr_for_oc_wrapper(ci); + jl_compile_codeinst(ci); specptr = jl_atomic_load_relaxed(&ci->specptr.fptr); } jl_opaque_closure_t *oc = (jl_opaque_closure_t*)jl_gc_alloc(ct->ptls, sizeof(jl_opaque_closure_t), oc_type); @@ -131,20 +131,15 @@ jl_opaque_closure_t *jl_new_opaque_closure(jl_tupletype_t *argt, jl_value_t *rt_ return oc; } -jl_method_t *jl_make_opaque_closure_method(jl_module_t *module, jl_value_t *name, - int nargs, jl_value_t *functionloc, jl_code_info_t *ci, int isva); - JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tupletype_t *argt, jl_value_t *rt_lb, jl_value_t *rt_ub, - jl_module_t *mod, jl_code_info_t *ci, int lineno, jl_value_t *file, int nargs, int isva, jl_value_t *env, int do_compile) + jl_module_t *mod, jl_code_info_t *ci, int lineno, jl_value_t *file, int nargs, int isva, jl_value_t *env, int do_compile, int isinferred) { - if (!ci->inferred) - jl_error("CodeInfo must already be inferred"); jl_value_t *root = NULL, *sigtype = NULL; jl_code_instance_t *inst = NULL; JL_GC_PUSH3(&root, &sigtype, &inst); root = jl_box_long(lineno); root = jl_new_struct(jl_linenumbernode_type, root, file); - jl_method_t *meth = jl_make_opaque_closure_method(mod, jl_nothing, nargs, root, ci, isva); + jl_method_t *meth = jl_make_opaque_closure_method(mod, jl_nothing, nargs, root, isinferred ? jl_nothing : (jl_value_t*)ci, isva); root = (jl_value_t*)meth; size_t world = jl_current_task->world_age; // these are only legal in the current world since they are not in any tables diff --git a/src/precompile_utils.c b/src/precompile_utils.c index 338c62b7e2d72d..37269981eb3d0e 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -186,7 +186,6 @@ static int precompile_enq_specialization_(jl_method_instance_t *mi, void *closur jl_value_t *inferred = jl_atomic_load_relaxed(&codeinst->inferred); if (inferred && inferred != jl_nothing && - jl_ir_flag_inferred(inferred) && (jl_ir_inlining_cost(inferred) == UINT16_MAX)) { do_compile = 1; } diff --git a/src/staticdata.c b/src/staticdata.c index e7ebcea7bd0619..dbfc55f51aee74 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -2330,7 +2330,7 @@ static jl_value_t *strip_codeinfo_meta(jl_method_t *m, jl_value_t *ci_, int orig int compressed = 0; if (!jl_is_code_info(ci_)) { compressed = 1; - ci = jl_uncompress_ir(m, NULL, (jl_value_t*)ci_); + ci = jl_uncompress_ir(m, (jl_value_t*)ci_); } else { ci = (jl_code_info_t*)ci_; diff --git a/src/toplevel.c b/src/toplevel.c index e4438da991e1ee..8f67a0e1aa3e74 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -929,7 +929,7 @@ jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_value_t *e, int size_t world = jl_atomic_load_acquire(&jl_world_counter); ct->world_age = world; if (!has_defs && jl_get_module_infer(m) != 0) { - (void)jl_type_infer(mfunc, world, 0); + (void)jl_type_infer(mfunc, world, 0, SOURCE_MODE_NOT_REQUIRED); } result = jl_invoke(/*func*/NULL, /*args*/NULL, /*nargs*/0, mfunc); ct->world_age = last_age; @@ -1010,10 +1010,10 @@ JL_DLLEXPORT jl_value_t *jl_infer_thunk(jl_code_info_t *thk, jl_module_t *m) JL_GC_PUSH1(&li); jl_resolve_globals_in_ir((jl_array_t*)thk->code, m, NULL, 0); jl_task_t *ct = jl_current_task; - jl_code_info_t *src = jl_type_infer(li, ct->world_age, 0); + jl_code_instance_t *ci = jl_type_infer(li, ct->world_age, 0, SOURCE_MODE_NOT_REQUIRED); JL_GC_POP(); - if (src) - return src->rettype; + if (ci) + return ci->rettype; return (jl_value_t*)jl_any_type; } diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index 4e5141c0de08e7..f46db9e890b245 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -54,6 +54,75 @@ function is_expected_union(u::Union) return true end +function print_warntype_codeinfo(io::IO, src::Core.CodeInfo, @nospecialize(rettype), nargs::Integer; lineprinter) + if src.slotnames !== nothing + slotnames = Base.sourceinfo_slotnames(src) + io = IOContext(io, :SOURCE_SLOTNAMES => slotnames) + slottypes = src.slottypes + nargs > 0 && println(io, "Arguments") + for i = 1:length(slotnames) + if i == nargs + 1 + println(io, "Locals") + end + print(io, " ", slotnames[i]) + if isa(slottypes, Vector{Any}) + warntype_type_printer(io; type=slottypes[i], used=true) + end + println(io) + end + end + print(io, "Body") + warntype_type_printer(io; type=rettype, used=true) + println(io) + irshow_config = Base.IRShow.IRShowConfig(lineprinter(src), warntype_type_printer) + Base.IRShow.show_ir(io, src, irshow_config) + println(io) +end + +function print_warntype_mi(io::IO, mi::Core.MethodInstance) + println(io, mi) + print(io, " from ") + println(io, mi.def) + if !isempty(mi.sparam_vals) + println(io, "Static Parameters") + sig = mi.def.sig + warn_color = Base.warn_color() # more mild user notification + for i = 1:length(mi.sparam_vals) + sig = sig::UnionAll + name = sig.var.name + val = mi.sparam_vals[i] + print_highlighted(io::IO, v::String, color::Symbol) = + if highlighting[:warntype] + Base.printstyled(io, v; color) + else + Base.print(io, v) + end + if val isa TypeVar + if val.lb === Union{} + print(io, " ", name, " <: ") + print_highlighted(io, "$(val.ub)", warn_color) + elseif val.ub === Any + print(io, " ", sig.var.name, " >: ") + print_highlighted(io, "$(val.lb)", warn_color) + else + print(io, " ") + print_highlighted(io, "$(val.lb)", warn_color) + print(io, " <: ", sig.var.name, " <: ") + print_highlighted(io, "$(val.ub)", warn_color) + end + elseif val isa typeof(Vararg) + print(io, " ", name, "::") + print_highlighted(io, "Int", warn_color) + else + print(io, " ", sig.var.name, " = ") + print_highlighted(io, "$(val)", :cyan) # show the "good" type + end + println(io) + sig = sig.body + end + end +end + """ code_warntype([io::IO], f, types; debuginfo=:default) @@ -75,84 +144,27 @@ See [`@code_warntype`](@ref man-code-warntype) for more information. See also: [`@code_warntype`](@ref), [`code_typed`](@ref), [`code_lowered`](@ref), [`code_llvm`](@ref), [`code_native`](@ref). """ function code_warntype(io::IO, @nospecialize(f), @nospecialize(t=Base.default_tt(f)); + world=Base.get_world_counter(), + interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world), debuginfo::Symbol=:default, optimize::Bool=false, kwargs...) + (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && + error("code reflection cannot be used from generated functions") debuginfo = Base.IRShow.debuginfo(debuginfo) lineprinter = Base.IRShow.__debuginfo[debuginfo] - for (src, rettype) in code_typed(f, t; optimize, kwargs...) - if !(src isa Core.CodeInfo) - println(io, src) - println(io, " failed to infer") - continue - end - lambda_io::IOContext = io - p = src.parent - nargs::Int = 0 - if p isa Core.MethodInstance - println(io, p) - print(io, " from ") - println(io, p.def) - p.def isa Method && (nargs = p.def.nargs) - if !isempty(p.sparam_vals) - println(io, "Static Parameters") - sig = p.def.sig - warn_color = Base.warn_color() # more mild user notification - for i = 1:length(p.sparam_vals) - sig = sig::UnionAll - name = sig.var.name - val = p.sparam_vals[i] - print_highlighted(io::IO, v::String, color::Symbol) = - if highlighting[:warntype] - Base.printstyled(io, v; color) - else - Base.print(io, v) - end - if val isa TypeVar - if val.lb === Union{} - print(io, " ", name, " <: ") - print_highlighted(io, "$(val.ub)", warn_color) - elseif val.ub === Any - print(io, " ", sig.var.name, " >: ") - print_highlighted(io, "$(val.lb)", warn_color) - else - print(io, " ") - print_highlighted(io, "$(val.lb)", warn_color) - print(io, " <: ", sig.var.name, " <: ") - print_highlighted(io, "$(val.ub)", warn_color) - end - elseif val isa typeof(Vararg) - print(io, " ", name, "::") - print_highlighted(io, "Int", warn_color) - else - print(io, " ", sig.var.name, " = ") - print_highlighted(io, "$(val)", :cyan) # show the "good" type - end - println(io) - sig = sig.body - end - end - end - if src.slotnames !== nothing - slotnames = Base.sourceinfo_slotnames(src) - lambda_io = IOContext(lambda_io, :SOURCE_SLOTNAMES => slotnames) - slottypes = src.slottypes - nargs > 0 && println(io, "Arguments") - for i = 1:length(slotnames) - if i == nargs + 1 - println(io, "Locals") - end - print(io, " ", slotnames[i]) - if isa(slottypes, Vector{Any}) - warntype_type_printer(io; type=slottypes[i], used=true) - end - println(io) - end - end - print(io, "Body") - warntype_type_printer(io; type=rettype, used=true) - println(io) - irshow_config = Base.IRShow.IRShowConfig(lineprinter(src), warntype_type_printer) - Base.IRShow.show_ir(lambda_io, src, irshow_config) - println(io) + nargs = 0 + if isa(f, Core.OpaqueClosure) + isa(f.source, Method) && (nargs = f.nargs) + print_warntype_codeinfo(io, Base.code_typed_opaque_closure(f)[1]..., nargs; lineprinter) + return nothing + end + matches = Base._methods_by_ftype(Base.to_tuple_type(Base.signature_type(f, t)), #=lim=#-1, world)::Vector + for match in matches + match = match::Core.MethodMatch + (src, rettype) = Core.Compiler.typeinf_code(interp, match, optimize) + mi = Core.Compiler.specialize_method(match) + mi.def isa Method && (nargs = mi.def.nargs) + print_warntype_mi(io, mi) + print_warntype_codeinfo(io, src, rettype, nargs; lineprinter) end nothing end @@ -188,7 +200,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe else world = UInt64(f.world) tt = Base.to_tuple_type(t) - if Core.Compiler.is_source_inferred(f.source.source) + if !isdefined(f.source, :source) # OC was constructed from inferred source. There's only one # specialization and we can't infer anything more precise either. world = f.source.primary_world diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index cb0621516189af..fa684494954721 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -347,9 +347,9 @@ _true = true # and show that we can still work around it @noinline g_broken_code() = _true ? 0 : h_broken_code() @noinline h_broken_code() = (g_broken_code(); f_broken_code()) -let err = tempname(), +let errf = tempname(), old_stderr = stderr, - new_stderr = open(err, "w") + new_stderr = open(errf, "w") try redirect_stderr(new_stderr) println(new_stderr, "start") @@ -362,7 +362,7 @@ let err = tempname(), finally redirect_stderr(old_stderr) close(new_stderr) - let errstr = read(err, String) + let errstr = read(errf, String) @test startswith(errstr, """start end Internal error: encountered unexpected error during compilation of f_broken_code: @@ -370,7 +370,7 @@ let err = tempname(), """) || errstr @test !endswith(errstr, "\nend\n") || errstr end - rm(err) + rm(errf) end end end @@ -378,9 +378,11 @@ end # Issue #33163 A33163(x; y) = x + y B33163(x) = x -@test (@code_typed A33163(1, y=2))[1].inferred -@test !(@code_typed optimize=false A33163(1, y=2))[1].inferred -@test !(@code_typed optimize=false B33163(1))[1].inferred +let + @code_typed A33163(1, y=2)[1] + @code_typed optimize=false A33163(1, y=2)[1] + @code_typed optimize=false B33163(1)[1] +end @test_throws MethodError (@code_lowered wrongkeyword=true 3 + 4) @@ -414,10 +416,11 @@ a14637 = A14637(0) @test (@code_typed max.(1 .+ 3, 5 - 7))[2] == Int f36261(x,y) = 3x + 4y A36261 = Float64[1.0, 2.0, 3.0] -@test (@code_typed f36261.(A36261, pi))[1].inferred -@test (@code_typed f36261.(A36261, 1 .+ pi))[1].inferred -@test (@code_typed f36261.(A36261, 1 + pi))[1].inferred - +let + @code_typed f36261.(A36261, pi)[1] + @code_typed f36261.(A36261, 1 .+ pi)[1] + @code_typed f36261.(A36261, 1 + pi)[1] +end module ReflectionTest using Test, Random, InteractiveUtils @@ -643,7 +646,7 @@ end # macro options should accept both literals and variables let opt = false - @test !(first(@code_typed optimize=opt sum(1:10)).inferred) + first(@code_typed optimize=opt sum(1:10)) end @testset "@time_imports" begin diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index b03936afd5c03c..6170d0cfe26c53 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -80,7 +80,7 @@ const TAGS = Any[ const NTAGS = length(TAGS) @assert NTAGS == 255 -const ser_version = 26 # do not make changes without bumping the version #! +const ser_version = 27 # do not make changes without bumping the version #! format_version(::AbstractSerializer) = ser_version format_version(s::Serializer) = s.version @@ -1201,26 +1201,35 @@ function deserialize(s::AbstractSerializer, ::Type{CodeInfo}) if pre_12 ci.slotflags = deserialize(s) else - ci.method_for_inference_limit_heuristics = deserialize(s) + if format_version(s) <= 26 + ci.method_for_inference_limit_heuristics = deserialize(s) + end ci.linetable = deserialize(s) end ci.slotnames = deserialize(s) if !pre_12 ci.slotflags = deserialize(s) ci.slottypes = deserialize(s) - ci.rettype = deserialize(s) - ci.parent = deserialize(s) - world_or_edges = deserialize(s) - pre_13 = isa(world_or_edges, Integer) - if pre_13 - ci.min_world = world_or_edges + if format_version(s) <= 26 + deserialize(s) # rettype + deserialize(s) # parent + world_or_edges = deserialize(s) + pre_13 = isa(world_or_edges, Integer) + if pre_13 + ci.min_world = world_or_edges + else + ci.edges = world_or_edges + ci.min_world = reinterpret(UInt, deserialize(s)) + ci.max_world = reinterpret(UInt, deserialize(s)) + end else - ci.edges = world_or_edges + ci.method_for_inference_limit_heuristics = deserialize(s) + ci.edges = deserialize(s) ci.min_world = reinterpret(UInt, deserialize(s)) ci.max_world = reinterpret(UInt, deserialize(s)) end end - ci.inferred = deserialize(s) + #ci.inferred = deserialize(s) if format_version(s) < 22 inlining_cost = deserialize(s) if isa(inlining_cost, Bool) diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index 121e7fad55c90b..f37182d60f3f0b 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -438,7 +438,7 @@ function custom_lookup(mi::MethodInstance, min_world::UInt, max_world::UInt) for inf_result in CONST_INVOKE_INTERP.inf_cache if inf_result.linfo === mi if CC.any(inf_result.overridden_by_const) - return CodeInstance(CONST_INVOKE_INTERP, inf_result, inf_result.valid_worlds) + return CodeInstance(CONST_INVOKE_INTERP, inf_result) end end end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index a797467ff61735..71636deccc7ad4 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -675,7 +675,6 @@ end function test_inferred_static(arrow::Pair, all_ssa) code, rt = arrow @test isdispatchelem(rt) - @test code.inferred for i = 1:length(code.code) e = code.code[i] test_inferred_static(e) @@ -1698,7 +1697,6 @@ let linfo = get_linfo(Base.convert, Tuple{Type{Int64}, Int32}), @test opt.src !== linfo.def.source @test length(opt.src.slotflags) == linfo.def.nargs <= length(opt.src.slotnames) @test opt.src.ssavaluetypes isa Vector{Any} - @test !opt.src.inferred @test opt.mod === Base end diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 0ef7f6def1057c..14edac727022f7 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -630,8 +630,8 @@ g41299(f::Tf, args::Vararg{Any,N}) where {Tf,N} = f(args...) # idempotency of callsite inlining function getcache(mi::Core.MethodInstance) cache = Core.Compiler.code_cache(Core.Compiler.NativeInterpreter()) - codeinf = Core.Compiler.get(cache, mi, nothing) - return isnothing(codeinf) ? nothing : codeinf + codeinst = Core.Compiler.get(cache, mi, nothing) + return isnothing(codeinst) ? nothing : codeinst end @noinline f42078(a) = sum(sincos(a)) let @@ -649,8 +649,8 @@ let end let # make sure to discard the inferred source mi = only(methods(f42078)).specializations::Core.MethodInstance - codeinf = getcache(mi)::Core.CodeInstance - @atomic codeinf.inferred = nothing + codeinst = getcache(mi)::Core.CodeInstance + @atomic codeinst.inferred = nothing end let # inference should re-infer `f42078(::Int)` and we should get the same code diff --git a/test/copy.jl b/test/copy.jl index 633beee5f2af32..11cfbb9f14d76a 100644 --- a/test/copy.jl +++ b/test/copy.jl @@ -213,11 +213,13 @@ end @testset "copying CodeInfo" begin _testfunc() = nothing ci,_ = code_typed(_testfunc, ())[1] - ci.edges = [_testfunc] + if isdefined(ci, :edges) + ci.edges = [_testfunc] - ci2 = copy(ci) - # Test that edges are not shared - @test ci2.edges !== ci.edges + ci2 = copy(ci) + # Test that edges are not shared + @test ci2.edges !== ci.edges + end end @testset "issue #34025" begin diff --git a/test/opaque_closure.jl b/test/opaque_closure.jl index 856253ecd5a8d2..e9f1601019a594 100644 --- a/test/opaque_closure.jl +++ b/test/opaque_closure.jl @@ -189,8 +189,6 @@ let ci = @code_lowered const_int() cig.slotnames = Symbol[Symbol("#self#")] cig.slottypes = Any[Any] cig.slotflags = UInt8[0x00] - @assert cig.min_world == UInt(1) - @assert cig.max_world == typemax(UInt) return cig end end @@ -246,8 +244,8 @@ end # constructing an opaque closure from IRCode let src = first(only(code_typed(+, (Int, Int)))) - ir = Core.Compiler.inflate_ir(src) - @test OpaqueClosure(src)(40, 2) == 42 + ir = Core.Compiler.inflate_ir(src, Core.Compiler.VarState[], src.slottypes) + @test OpaqueClosure(src; sig=Tuple{Int, Int}, rettype=Int, nargs=2)(40, 2) == 42 oc = OpaqueClosure(ir) @test oc(40, 2) == 42 @test isa(oc, OpaqueClosure{Tuple{Int,Int}, Int}) @@ -266,11 +264,11 @@ end let src = code_typed((Int,Int)) do x, y... return (x, y) end |> only |> first - let oc = OpaqueClosure(src) + let oc = OpaqueClosure(src; rettype=Tuple{Int, Tuple{Int}}, sig=Tuple{Int, Int}, nargs=2, isva=true) @test oc(1,2) === (1,(2,)) @test_throws MethodError oc(1,2,3) end - ir = Core.Compiler.inflate_ir(src) + ir = Core.Compiler.inflate_ir(src, Core.Compiler.VarState[], src.slottypes) let oc = OpaqueClosure(ir; isva=true) @test oc(1,2) === (1,(2,)) @test_throws MethodError oc(1,2,3) diff --git a/test/worlds.jl b/test/worlds.jl index 8e820bdab88df4..6dd4a93f918498 100644 --- a/test/worlds.jl +++ b/test/worlds.jl @@ -265,8 +265,7 @@ function equal(ci1::Core.CodeInfo, ci2::Core.CodeInfo) ci1.linetable == ci2.linetable && ci1.slotnames == ci2.slotnames && ci1.slotflags == ci2.slotflags && - ci1.slottypes == ci2.slottypes && - ci1.rettype == ci2.rettype + ci1.slottypes == ci2.slottypes end equal(p1::Pair, p2::Pair) = p1.second == p2.second && equal(p1.first, p2.first)