Skip to content

Commit

Permalink
[maglev] Add a FunctionEntryStackCheck trampoline
Browse files Browse the repository at this point in the history
This tries to reduce the Maglev generated code by adding the slow path
in a trampoline.

Bug: v8:7700
Change-Id: Ie2ab3c7e9addd43df8888b51ae21f3ec89243ddb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4661667
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Victor Gomes <victorgomes@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#88672}
  • Loading branch information
victorgomes authored and V8 LUCI CQ committed Jul 5, 2023
1 parent c2860e1 commit e9320c5
Show file tree
Hide file tree
Showing 20 changed files with 241 additions and 132 deletions.
25 changes: 25 additions & 0 deletions src/builtins/arm/builtins-arm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1815,6 +1815,31 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
D::MaybeTargetCodeRegister());
}

#ifdef V8_ENABLE_MAGLEV

void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
bool save_new_target) {
// Input (r0): Stack size (Smi).
// This builtin can be invoked just after Maglev's prologue.
// All registers are available, except (possibly) new.target.
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ AssertSmi(r0);
if (save_new_target) {
__ Push(kJavaScriptCallNewTargetRegister);
}
__ Push(r0);
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
if (save_new_target) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
}
__ Ret();
}

#endif // V8_ENABLE_MAGLEV

// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
Expand Down
28 changes: 28 additions & 0 deletions src/builtins/arm64/builtins-arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2055,6 +2055,34 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
D::MaybeTargetCodeRegister());
}

#ifdef V8_ENABLE_MAGLEV

// static
void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
bool save_new_target) {
// Input (x0): Stack size (Smi).
// This builtin can be invoked just after Maglev's prologue.
// All registers are available, except (possibly) new.target.
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ AssertSmi(x0);
if (save_new_target) {
__ AssertSmiOrHeapObjectInCompressionCage(
kJavaScriptCallNewTargetRegister);
__ Push(kJavaScriptCallNewTargetRegister, padreg);
}
__ PushArgument(x0);
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
if (save_new_target) {
__ Pop(padreg, kJavaScriptCallNewTargetRegister);
}
}
__ Ret();
}

#endif // V8_ENABLE_MAGLEV

// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
Expand Down
2 changes: 2 additions & 0 deletions src/builtins/builtins-definitions.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,8 @@ namespace internal {
\
/* Maglev Compiler */ \
ASM(MaglevOnStackReplacement, OnStackReplacement) \
ASM(MaglevFunctionEntryStackCheck_WithoutNewTarget, Void) \
ASM(MaglevFunctionEntryStackCheck_WithNewTarget, Void) \
\
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \
Expand Down
18 changes: 18 additions & 0 deletions src/builtins/builtins-internal-gen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1368,6 +1368,24 @@ void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) {
}
#endif // V8_TARGET_ARCH_X64

#ifndef V8_ENABLE_MAGLEV
// static
void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
bool save_new_target) {
masm->Trap();
}
#endif // V8_ENABLE_MAGLEV

void Builtins::Generate_MaglevFunctionEntryStackCheck_WithoutNewTarget(
MacroAssembler* masm) {
Generate_MaglevFunctionEntryStackCheck(masm, false);
}

void Builtins::Generate_MaglevFunctionEntryStackCheck_WithNewTarget(
MacroAssembler* masm) {
Generate_MaglevFunctionEntryStackCheck(masm, true);
}

// ES6 [[Get]] operation.
TF_BUILTIN(GetProperty, CodeStubAssembler) {
auto object = Parameter<Object>(Descriptor::kObject);
Expand Down
3 changes: 3 additions & 0 deletions src/builtins/builtins.h
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,9 @@ class Builtins {
CallOrConstructMode mode,
Handle<Code> code);

static void Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
bool save_new_target);

enum class InterpreterEntryTrampolineMode {
// The version of InterpreterEntryTrampoline used by default.
kDefault,
Expand Down
28 changes: 28 additions & 0 deletions src/builtins/x64/builtins-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2740,6 +2740,34 @@ void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) {
D::MaybeTargetCodeRegister());
}

#ifdef V8_ENABLE_MAGLEV

// static
void Builtins::Generate_MaglevFunctionEntryStackCheck(MacroAssembler* masm,
bool save_new_target) {
// Input (rax): Stack size (Smi).
// This builtin can be invoked just after Maglev's prologue.
// All registers are available, except (possibly) new.target.
ASM_CODE_COMMENT(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ AssertSmi(rax);
if (save_new_target) {
__ AssertSmiOrHeapObjectInCompressionCage(
kJavaScriptCallNewTargetRegister);
__ Push(kJavaScriptCallNewTargetRegister);
}
__ Push(rax);
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
if (save_new_target) {
__ Pop(kJavaScriptCallNewTargetRegister);
}
}
__ Ret();
}

#endif // V8_ENABLE_MAGLEV

#if V8_ENABLE_WEBASSEMBLY

// Returns the offset beyond the last saved FP register.
Expand Down
13 changes: 13 additions & 0 deletions src/codegen/arm64/macro-assembler-arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1685,6 +1685,19 @@ void MacroAssembler::AssertBoundFunction(Register object) {
Check(eq, AbortReason::kOperandIsNotABoundFunction);
}

void MacroAssembler::AssertSmiOrHeapObjectInCompressionCage(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Label is_smi;
B(&is_smi, CheckSmi(object));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
sub(temp, object, kPtrComprCageBaseRegister);
Cmp(temp, Immediate(UINT32_MAX));
Check(lo, AbortReason::kObjectNotTagged);
bind(&is_smi);
}

void MacroAssembler::AssertGeneratorObject(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Expand Down
5 changes: 5 additions & 0 deletions src/codegen/arm64/macro-assembler-arm64.h
Original file line number Diff line number Diff line change
Expand Up @@ -1946,6 +1946,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
// enabled via --debug-code.
void AssertUndefinedOrAllocationSite(Register object) NOOP_UNLESS_DEBUG_CODE;

// Abort execution if argument is not smi nor in the pointer compresssion
// cage, enabled via --debug-code.
void AssertSmiOrHeapObjectInCompressionCage(Register object)
NOOP_UNLESS_DEBUG_CODE;

// ---- Calling / Jumping helpers ----

void CallRuntime(const Runtime::Function* f, int num_arguments);
Expand Down
13 changes: 13 additions & 0 deletions src/codegen/x64/macro-assembler-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include <climits>
#include <cstdint>

#if V8_TARGET_ARCH_X64
Expand Down Expand Up @@ -2945,6 +2946,18 @@ void MacroAssembler::AssertCode(Register object) {
Check(equal, AbortReason::kOperandIsNotACode);
}

void MacroAssembler::AssertSmiOrHeapObjectInCompressionCage(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Label is_smi;
j(CheckSmi(object), &is_smi, Label::kNear);
movq(kScratchRegister, object);
subq(kScratchRegister, kPtrComprCageBaseRegister);
cmpq(kScratchRegister, Immediate(UINT32_MAX));
Check(below, AbortReason::kObjectNotTagged);
bind(&is_smi);
}

void MacroAssembler::AssertConstructor(Register object) {
if (!v8_flags.debug_code) return;
ASM_CODE_COMMENT(this);
Expand Down
5 changes: 5 additions & 0 deletions src/codegen/x64/macro-assembler-x64.h
Original file line number Diff line number Diff line change
Expand Up @@ -534,6 +534,11 @@ class V8_EXPORT_PRIVATE MacroAssembler
// --debug-code.
void AssertCode(Register object) NOOP_UNLESS_DEBUG_CODE;

// Abort execution if argument is not smi nor in the pointer compresssion
// cage, enabled via --debug-code.
void AssertSmiOrHeapObjectInCompressionCage(Register object)
NOOP_UNLESS_DEBUG_CODE;

// Print a message to stdout and abort execution.
void Abort(AbortReason msg);

Expand Down
14 changes: 14 additions & 0 deletions src/maglev/arm/maglev-assembler-arm-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -991,6 +991,20 @@ inline void MaglevAssembler::AssertStackSizeCorrect() {
}
}

inline Condition MaglevAssembler::FunctionEntryStackCheck(
int stack_check_offset) {
ScratchRegisterScope temps(this);
Register stack_cmp_reg = sp;
if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
stack_cmp_reg = temps.Acquire();
sub(stack_cmp_reg, sp, Operand(stack_check_offset));
}
Register interrupt_stack_limit = temps.Acquire();
LoadStackLimit(interrupt_stack_limit, StackLimitKind::kInterruptStackLimit);
cmp(stack_cmp_reg, interrupt_stack_limit);
return kUnsignedGreaterThanEqual;
}

inline void MaglevAssembler::FinishCode() { CheckConstPool(true, false); }

template <typename NodeT>
Expand Down
44 changes: 0 additions & 44 deletions src/maglev/arm/maglev-ir-arm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -842,50 +842,6 @@ void ReduceInterruptBudgetForReturn::GenerateCode(
amount());
}

int FunctionEntryStackCheck::MaxCallStackArgs() const { return 1; }
void FunctionEntryStackCheck::SetValueLocationConstraints() {
set_temporaries_needed(2);
}
void FunctionEntryStackCheck::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real
// stack limit or tighter. By ensuring we have space until that limit
// after building the frame we can quickly precheck both at once.
MaglevAssembler::ScratchRegisterScope temps(masm);
const int stack_check_offset = masm->code_gen_state()->stack_check_offset();
Register stack_cmp_reg = sp;
if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
stack_cmp_reg = temps.Acquire();
__ sub(stack_cmp_reg, sp, Operand(stack_check_offset));
}
Register interrupt_stack_limit = temps.Acquire();
__ LoadStackLimit(interrupt_stack_limit,
StackLimitKind::kInterruptStackLimit);
__ cmp(stack_cmp_reg, interrupt_stack_limit);

ZoneLabelRef deferred_call_stack_guard_return(masm);
__ JumpToDeferredIf(
lo,
[](MaglevAssembler* masm, FunctionEntryStackCheck* node,
ZoneLabelRef done, int stack_check_offset) {
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
{
SaveRegisterStateForCall save_register_state(
masm, node->register_snapshot());
// Push the frame size
__ Push(Smi::FromInt(stack_check_offset));
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
save_register_state.DefineSafepointWithLazyDeopt(
node->lazy_deopt_info());
}
__ b(*done);
},
this, deferred_call_stack_guard_return, stack_check_offset);
__ bind(*deferred_call_stack_guard_return);
}

// ---
// Control nodes
// ---
Expand Down
14 changes: 14 additions & 0 deletions src/maglev/arm64/maglev-assembler-arm64-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1028,6 +1028,20 @@ inline void MaglevAssembler::AssertStackSizeCorrect() {
}
}

inline Condition MaglevAssembler::FunctionEntryStackCheck(
int stack_check_offset) {
ScratchRegisterScope temps(this);
Register stack_cmp_reg = sp;
if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
stack_cmp_reg = temps.Acquire();
Sub(stack_cmp_reg, sp, stack_check_offset);
}
Register interrupt_stack_limit = temps.Acquire();
LoadStackLimit(interrupt_stack_limit, StackLimitKind::kInterruptStackLimit);
Cmp(stack_cmp_reg, interrupt_stack_limit);
return kUnsignedGreaterThanEqual;
}

inline void MaglevAssembler::FinishCode() {
ForceConstantPoolEmissionWithoutJump();
}
Expand Down
44 changes: 0 additions & 44 deletions src/maglev/arm64/maglev-ir-arm64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -750,50 +750,6 @@ void ReduceInterruptBudgetForReturn::GenerateCode(
amount());
}

int FunctionEntryStackCheck::MaxCallStackArgs() const { return 1; }
void FunctionEntryStackCheck::SetValueLocationConstraints() {
set_temporaries_needed(2);
}
void FunctionEntryStackCheck::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real
// stack limit or tighter. By ensuring we have space until that limit
// after building the frame we can quickly precheck both at once.
MaglevAssembler::ScratchRegisterScope temps(masm);
const int stack_check_offset = masm->code_gen_state()->stack_check_offset();
Register stack_cmp_reg = sp;
if (stack_check_offset > kStackLimitSlackForDeoptimizationInBytes) {
stack_cmp_reg = temps.Acquire();
__ Sub(stack_cmp_reg, sp, stack_check_offset);
}
Register interrupt_stack_limit = temps.Acquire();
__ LoadStackLimit(interrupt_stack_limit,
StackLimitKind::kInterruptStackLimit);
__ Cmp(stack_cmp_reg, interrupt_stack_limit);

ZoneLabelRef deferred_call_stack_guard_return(masm);
__ JumpToDeferredIf(
lo,
[](MaglevAssembler* masm, FunctionEntryStackCheck* node,
ZoneLabelRef done, int stack_check_offset) {
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
{
SaveRegisterStateForCall save_register_state(
masm, node->register_snapshot());
// Push the frame size
__ Push(Smi::FromInt(stack_check_offset));
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
save_register_state.DefineSafepointWithLazyDeopt(
node->lazy_deopt_info());
}
__ B(*done);
},
this, deferred_call_stack_guard_return, stack_check_offset);
__ bind(*deferred_call_stack_guard_return);
}

// ---
// Control nodes
// ---
Expand Down
1 change: 1 addition & 0 deletions src/maglev/maglev-assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,7 @@ class MaglevAssembler : public MacroAssembler {
inline void FinishCode();

inline void AssertStackSizeCorrect();
inline Condition FunctionEntryStackCheck(int stack_check_offset);

inline void SetMapAsRoot(Register object, RootIndex map);

Expand Down
4 changes: 3 additions & 1 deletion src/maglev/maglev-graph-builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,9 @@ class MaglevGraphBuilder {
// Don't use the AddNewNode helper for the function entry stack check, so
// that we can set a custom deopt frame on it.
FunctionEntryStackCheck* function_entry_stack_check =
FunctionEntryStackCheck::New(zone(), {});
NodeBase::New<FunctionEntryStackCheck>(
zone(), {},
bytecode().incoming_new_target_or_generator_register().is_valid());
new (function_entry_stack_check->lazy_deopt_info()) LazyDeoptInfo(
zone(), GetDeoptFrameForEntryStackCheck(),
interpreter::Register::invalid_value(), 0, compiler::FeedbackSource());
Expand Down
Loading

0 comments on commit e9320c5

Please sign in to comment.