diff --git a/src/maglev/arm/maglev-assembler-arm.cc b/src/maglev/arm/maglev-assembler-arm.cc index b2e95913e1c4..0afbac4eb5bb 100644 --- a/src/maglev/arm/maglev-assembler-arm.cc +++ b/src/maglev/arm/maglev-assembler-arm.cc @@ -79,21 +79,64 @@ void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot, bind(*done); } +void MaglevAssembler::OSRPrologue(Graph* graph) { + ScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + + DCHECK(graph->is_osr()); + CHECK(!graph->has_recursive_calls()); + + uint32_t source_frame_size = + graph->min_maglev_stackslots_for_unoptimized_frame_size(); + + if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { + add(scratch, sp, + Operand(source_frame_size * kSystemPointerSize + + StandardFrameConstants::kFixedFrameSizeFromFp)); + cmp(scratch, fp); + Assert(eq, AbortReason::kOsrUnexpectedStackSize); + } + + uint32_t target_frame_size = + graph->tagged_stack_slots() + graph->untagged_stack_slots(); + CHECK_LE(source_frame_size, target_frame_size); + + if (source_frame_size < target_frame_size) { + ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); + uint32_t additional_tagged = + source_frame_size < graph->tagged_stack_slots() + ? graph->tagged_stack_slots() - source_frame_size + : 0; + if (additional_tagged) { + Move(scratch, 0); + } + for (size_t i = 0; i < additional_tagged; ++i) { + Push(scratch); + } + uint32_t size_so_far = source_frame_size + additional_tagged; + CHECK_LE(size_so_far, target_frame_size); + if (size_so_far < target_frame_size) { + sub(sp, sp, + Operand((target_frame_size - size_so_far) * kSystemPointerSize)); + } + } +} + void MaglevAssembler::Prologue(Graph* graph) { ScratchRegisterScope temps(this); temps.Include({r4, r8}); - if (!graph->is_osr()) { - BailoutIfDeoptimized(); - } - CHECK_IMPLIES(graph->is_osr(), !graph->has_recursive_calls()); + DCHECK(!graph->is_osr()); + + BailoutIfDeoptimized(); + if (graph->has_recursive_calls()) { bind(code_gen_state()->entry_label()); } // Tiering support. // TODO(jgruber): Extract to a builtin. - if (v8_flags.turbofan && !graph->is_osr()) { + if (v8_flags.turbofan) { ScratchRegisterScope temps(this); Register flags = temps.Acquire(); Register feedback_vector = temps.Acquire(); @@ -115,47 +158,6 @@ void MaglevAssembler::Prologue(Graph* graph) { deferred_flags_need_processing); } - if (graph->is_osr()) { - Register scratch = temps.Acquire(); - - uint32_t source_frame_size = - graph->min_maglev_stackslots_for_unoptimized_frame_size(); - - if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { - add(scratch, sp, - Operand(source_frame_size * kSystemPointerSize + - StandardFrameConstants::kFixedFrameSizeFromFp), - SetCC); - cmp(scratch, fp); - Assert(eq, AbortReason::kOsrUnexpectedStackSize); - } - - uint32_t target_frame_size = - graph->tagged_stack_slots() + graph->untagged_stack_slots(); - CHECK_LE(source_frame_size, target_frame_size); - - if (source_frame_size < target_frame_size) { - ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); - uint32_t additional_tagged = - source_frame_size < graph->tagged_stack_slots() - ? graph->tagged_stack_slots() - source_frame_size - : 0; - if (additional_tagged) { - Move(scratch, 0); - } - for (size_t i = 0; i < additional_tagged; ++i) { - Push(scratch); - } - uint32_t size_so_far = source_frame_size + additional_tagged; - CHECK_LE(size_so_far, target_frame_size); - if (size_so_far < target_frame_size) { - sub(sp, sp, - Operand((target_frame_size - size_so_far) * kSystemPointerSize)); - } - } - return; - } - EnterFrame(StackFrame::MAGLEV); // Save arguments in frame. // TODO(leszeks): Consider eliding this frame if we don't make any calls diff --git a/src/maglev/arm64/maglev-assembler-arm64.cc b/src/maglev/arm64/maglev-assembler-arm64.cc index 5c6b662761ec..69a5d3c9257b 100644 --- a/src/maglev/arm64/maglev-assembler-arm64.cc +++ b/src/maglev/arm64/maglev-assembler-arm64.cc @@ -79,6 +79,50 @@ void MaglevAssembler::Allocate(RegisterSnapshot register_snapshot, bind(*done); } +void MaglevAssembler::OSRPrologue(Graph* graph) { + DCHECK(graph->is_osr()); + CHECK(!graph->has_recursive_calls()); + + uint32_t source_frame_size = + graph->min_maglev_stackslots_for_unoptimized_frame_size(); + + static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1); + if (source_frame_size % 2 == 0) source_frame_size++; + + if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { + ScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + Add(scratch, sp, + source_frame_size * kSystemPointerSize + + StandardFrameConstants::kFixedFrameSizeFromFp); + Cmp(scratch, fp); + Assert(eq, AbortReason::kOsrUnexpectedStackSize); + } + + uint32_t target_frame_size = + graph->tagged_stack_slots() + graph->untagged_stack_slots(); + CHECK_EQ(target_frame_size % 2, 1); + CHECK_LE(source_frame_size, target_frame_size); + if (source_frame_size < target_frame_size) { + ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); + uint32_t additional_tagged = + source_frame_size < graph->tagged_stack_slots() + ? graph->tagged_stack_slots() - source_frame_size + : 0; + uint32_t additional_tagged_double = + additional_tagged / 2 + additional_tagged % 2; + for (size_t i = 0; i < additional_tagged_double; ++i) { + Push(xzr, xzr); + } + uint32_t size_so_far = source_frame_size + additional_tagged_double * 2; + CHECK_LE(size_so_far, target_frame_size); + if (size_so_far < target_frame_size) { + Sub(sp, sp, + Immediate((target_frame_size - size_so_far) * kSystemPointerSize)); + } + } +} + void MaglevAssembler::Prologue(Graph* graph) { ScratchRegisterScope temps(this); // We add two extra registers to the scope. Ideally we could add all the @@ -90,19 +134,18 @@ void MaglevAssembler::Prologue(Graph* graph) { // used registers manually. temps.Include({x14, x15}); - if (!graph->is_osr()) { - CallTarget(); - BailoutIfDeoptimized(); - } + DCHECK(!graph->is_osr()); + + CallTarget(); + BailoutIfDeoptimized(); - CHECK_IMPLIES(graph->is_osr(), !graph->has_recursive_calls()); if (graph->has_recursive_calls()) { BindCallTarget(code_gen_state()->entry_label()); } // Tiering support. // TODO(jgruber): Extract to a builtin. - if (v8_flags.turbofan && !graph->is_osr()) { + if (v8_flags.turbofan) { ScratchRegisterScope temps(this); Register flags = temps.Acquire(); Register feedback_vector = temps.Acquire(); @@ -124,47 +167,6 @@ void MaglevAssembler::Prologue(Graph* graph) { deferred_flags_need_processing); } - if (graph->is_osr()) { - uint32_t source_frame_size = - graph->min_maglev_stackslots_for_unoptimized_frame_size(); - - static_assert(StandardFrameConstants::kFixedSlotCount % 2 == 1); - if (source_frame_size % 2 == 0) source_frame_size++; - - if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { - Register scratch = temps.Acquire(); - Add(scratch, sp, - source_frame_size * kSystemPointerSize + - StandardFrameConstants::kFixedFrameSizeFromFp); - Cmp(scratch, fp); - Assert(eq, AbortReason::kOsrUnexpectedStackSize); - } - - uint32_t target_frame_size = - graph->tagged_stack_slots() + graph->untagged_stack_slots(); - CHECK_EQ(target_frame_size % 2, 1); - CHECK_LE(source_frame_size, target_frame_size); - if (source_frame_size < target_frame_size) { - ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); - uint32_t additional_tagged = - source_frame_size < graph->tagged_stack_slots() - ? graph->tagged_stack_slots() - source_frame_size - : 0; - uint32_t additional_tagged_double = - additional_tagged / 2 + additional_tagged % 2; - for (size_t i = 0; i < additional_tagged_double; ++i) { - Push(xzr, xzr); - } - uint32_t size_so_far = source_frame_size + additional_tagged_double * 2; - CHECK_LE(size_so_far, target_frame_size); - if (size_so_far < target_frame_size) { - Sub(sp, sp, - Immediate((target_frame_size - size_so_far) * kSystemPointerSize)); - } - } - return; - } - EnterFrame(StackFrame::MAGLEV); // Save arguments in frame. diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 22106d7c8b12..4e909a269c93 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -486,6 +486,7 @@ class MaglevAssembler : public MacroAssembler { template inline void PushReverse(T... vals); + void OSRPrologue(Graph* graph); void Prologue(Graph* graph); inline void FinishCode(); diff --git a/src/maglev/maglev-code-generator.cc b/src/maglev/maglev-code-generator.cc index 78413292b93f..d53c82e094cc 100644 --- a/src/maglev/maglev-code-generator.cc +++ b/src/maglev/maglev-code-generator.cc @@ -710,7 +710,11 @@ class MaglevCodeGeneratingNodeProcessor { __ DebugBreak(); } - __ Prologue(graph); + if (graph->is_osr()) { + __ OSRPrologue(graph); + } else { + __ Prologue(graph); + } } void PostProcessGraph(Graph* graph) {} diff --git a/src/maglev/x64/maglev-assembler-x64.cc b/src/maglev/x64/maglev-assembler-x64.cc index 206b96304b0e..419cc47258eb 100644 --- a/src/maglev/x64/maglev-assembler-x64.cc +++ b/src/maglev/x64/maglev-assembler-x64.cc @@ -394,12 +394,50 @@ void MaglevAssembler::TryChangeFloat64ToIndex(Register result, Jump(fail); } -void MaglevAssembler::Prologue(Graph* graph) { - if (!graph->is_osr()) { - BailoutIfDeoptimized(rbx); +void MaglevAssembler::OSRPrologue(Graph* graph) { + DCHECK(graph->is_osr()); + CHECK(!graph->has_recursive_calls()); + + uint32_t source_frame_size = + graph->min_maglev_stackslots_for_unoptimized_frame_size(); + + if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { + movq(kScratchRegister, rbp); + subq(kScratchRegister, rsp); + cmpq(kScratchRegister, + Immediate(source_frame_size * kSystemPointerSize + + StandardFrameConstants::kFixedFrameSizeFromFp)); + Assert(equal, AbortReason::kOsrUnexpectedStackSize); + } + + uint32_t target_frame_size = + graph->tagged_stack_slots() + graph->untagged_stack_slots(); + CHECK_LE(source_frame_size, target_frame_size); + + if (source_frame_size < target_frame_size) { + ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); + Move(kScratchRegister, 0); + uint32_t additional_tagged = + source_frame_size < graph->tagged_stack_slots() + ? graph->tagged_stack_slots() - source_frame_size + : 0; + for (size_t i = 0; i < additional_tagged; ++i) { + pushq(kScratchRegister); + } + uint32_t size_so_far = source_frame_size + additional_tagged; + CHECK_LE(size_so_far, target_frame_size); + if (size_so_far < target_frame_size) { + subq(rsp, + Immediate((target_frame_size - size_so_far) * kSystemPointerSize)); + } } +} + +void MaglevAssembler::Prologue(Graph* graph) { + DCHECK(!graph->is_osr()); + + BailoutIfDeoptimized(rbx); - CHECK_IMPLIES(graph->is_osr(), !graph->has_recursive_calls()); if (graph->has_recursive_calls()) { bind(code_gen_state()->entry_label()); } @@ -407,7 +445,7 @@ void MaglevAssembler::Prologue(Graph* graph) { // Tiering support. // TODO(jgruber): Extract to a builtin (the tiering prologue is ~230 bytes // per Maglev code object on x64). - if (v8_flags.turbofan && !graph->is_osr()) { + if (v8_flags.turbofan) { // Scratch registers. Don't clobber regs related to the calling // convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date // with deferred flags code. @@ -432,43 +470,6 @@ void MaglevAssembler::Prologue(Graph* graph) { deferred_flags_need_processing); } - if (graph->is_osr()) { - uint32_t source_frame_size = - graph->min_maglev_stackslots_for_unoptimized_frame_size(); - - if (v8_flags.maglev_assert_stack_size && v8_flags.debug_code) { - movq(kScratchRegister, rbp); - subq(kScratchRegister, rsp); - cmpq(kScratchRegister, - Immediate(source_frame_size * kSystemPointerSize + - StandardFrameConstants::kFixedFrameSizeFromFp)); - Assert(equal, AbortReason::kOsrUnexpectedStackSize); - } - - uint32_t target_frame_size = - graph->tagged_stack_slots() + graph->untagged_stack_slots(); - CHECK_LE(source_frame_size, target_frame_size); - - if (source_frame_size < target_frame_size) { - ASM_CODE_COMMENT_STRING(this, "Growing frame for OSR"); - Move(kScratchRegister, 0); - uint32_t additional_tagged = - source_frame_size < graph->tagged_stack_slots() - ? graph->tagged_stack_slots() - source_frame_size - : 0; - for (size_t i = 0; i < additional_tagged; ++i) { - pushq(kScratchRegister); - } - uint32_t size_so_far = source_frame_size + additional_tagged; - CHECK_LE(size_so_far, target_frame_size); - if (size_so_far < target_frame_size) { - subq(rsp, - Immediate((target_frame_size - size_so_far) * kSystemPointerSize)); - } - } - return; - } - EnterFrame(StackFrame::MAGLEV); // Save arguments in frame. // TODO(leszeks): Consider eliding this frame if we don't make any calls