Skip to content

Commit

Permalink
[x64] Optimize feedback vector flag test
Browse files Browse the repository at this point in the history
This reduces the size of generated maglev code by avoiding
loading the feedback vector flag into a register.

Bug: v8:7700
Change-Id: Id79ac947d7eb9d46c770b1bf7316275cca3e95c4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4665584
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#88675}
  • Loading branch information
victorgomes authored and V8 LUCI CQ committed Jul 5, 2023
1 parent 27490a2 commit 8361a77
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 32 deletions.
21 changes: 9 additions & 12 deletions src/builtins/x64/builtins-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1076,10 +1076,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(

// Check the tiering state.
Label flags_need_processing;
Register flags = rcx;
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::INTERPRETED_FUNCTION,
&flags_need_processing);
__ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
feedback_vector, CodeKind::INTERPRETED_FUNCTION, &flags_need_processing);

ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);

Expand Down Expand Up @@ -1241,7 +1239,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(

#ifndef V8_JITLESS
__ bind(&flags_need_processing);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, closure);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(feedback_vector, closure);

__ bind(&is_baseline);
{
Expand All @@ -1259,8 +1257,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ j(not_equal, &install_baseline_code);

// Check the tiering state.
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
__ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
feedback_vector, CodeKind::BASELINE, &flags_need_processing);

// Load the baseline code into the closure.
__ Move(rcx, kInterpreterBytecodeArrayRegister);
Expand Down Expand Up @@ -1566,12 +1564,11 @@ void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register feedback_vector = r8;
Register flags = rcx;
Register return_address = r15;

#ifdef DEBUG
for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) {
DCHECK(!AreAliased(feedback_vector, flags, return_address, reg));
DCHECK(!AreAliased(feedback_vector, return_address, reg));
}
#endif

Expand All @@ -1589,8 +1586,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {

// Check the tiering state.
Label flags_need_processing;
__ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::BASELINE, &flags_need_processing);
__ CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
feedback_vector, CodeKind::BASELINE, &flags_need_processing);

ResetFeedbackVectorOsrUrgency(masm, feedback_vector, kScratchRegister);

Expand Down Expand Up @@ -1669,7 +1666,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// return since we may do a runtime call along the way that requires the
// stack to only contain valid frames.
__ Drop(1);
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, closure,
__ OptimizeCodeOrTailCallOptimizedCodeSlot(feedback_vector, closure,
JumpMode::kPushAndReturn);
__ Trap();
}
Expand Down
21 changes: 11 additions & 10 deletions src/codegen/x64/macro-assembler-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -909,41 +909,42 @@ void MacroAssembler::ReplaceClosureCodeWithOptimizedCode(

// Read off the flags in the feedback vector and check if there
// is optimized code or a tiering state that needs to be processed.
void MacroAssembler::LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
Register flags, Register feedback_vector, CodeKind current_code_kind,
void MacroAssembler::CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
Register feedback_vector, CodeKind current_code_kind,
Label* flags_need_processing) {
ASM_CODE_COMMENT(this);
DCHECK(CodeKindCanTierUp(current_code_kind));
movzxwl(flags, FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
uint32_t kFlagsMask = FeedbackVector::kFlagsTieringStateIsAnyRequested |
FeedbackVector::kFlagsMaybeHasTurbofanCode |
FeedbackVector::kFlagsLogNextExecution;
if (current_code_kind != CodeKind::MAGLEV) {
kFlagsMask |= FeedbackVector::kFlagsMaybeHasMaglevCode;
}
testw(flags, Immediate(kFlagsMask));
testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
Immediate(kFlagsMask));
j(not_zero, flags_need_processing);
}

void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot(
Register flags, Register feedback_vector, Register closure,
JumpMode jump_mode) {
Register feedback_vector, Register closure, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(flags, feedback_vector, closure));
DCHECK(!AreAliased(feedback_vector, closure));
Label maybe_has_optimized_code, maybe_needs_logging;
// Check if optimized code is available.
testl(flags, Immediate(FeedbackVector::kFlagsTieringStateIsAnyRequested));
testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
Immediate(FeedbackVector::kFlagsTieringStateIsAnyRequested));
j(zero, &maybe_needs_logging);

GenerateTailCallToReturnedCode(Runtime::kCompileOptimized, jump_mode);

bind(&maybe_needs_logging);
testl(flags, Immediate(FeedbackVector::LogNextExecutionBit::kMask));
testw(FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset),
Immediate(FeedbackVector::LogNextExecutionBit::kMask));
j(zero, &maybe_has_optimized_code);
GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution, jump_mode);

bind(&maybe_has_optimized_code);
Register optimized_code_entry = flags;
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
LoadTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
Expand Down
6 changes: 3 additions & 3 deletions src/codegen/x64/macro-assembler-x64.h
Original file line number Diff line number Diff line change
Expand Up @@ -871,11 +871,11 @@ class V8_EXPORT_PRIVATE MacroAssembler
Register slot_address);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
JumpMode jump_mode = JumpMode::kJump);
void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
Register flags, Register feedback_vector, CodeKind current_code_kind,
void CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
Register feedback_vector, CodeKind current_code_kind,
Label* flags_need_processing);
void OptimizeCodeOrTailCallOptimizedCodeSlot(
Register flags, Register feedback_vector, Register closure,
Register feedback_vector, Register closure,
JumpMode jump_mode = JumpMode::kJump);

// Abort execution if argument is not a Constructor, enabled via --debug-code.
Expand Down
12 changes: 5 additions & 7 deletions src/maglev/x64/maglev-assembler-x64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -449,25 +449,23 @@ void MaglevAssembler::Prologue(Graph* graph) {
// Scratch registers. Don't clobber regs related to the calling
// convention (e.g. kJavaScriptCallArgCountRegister). Keep up-to-date
// with deferred flags code.
Register flags = rcx;
Register feedback_vector = r9;

Label* deferred_flags_need_processing = MakeDeferredCode(
[](MaglevAssembler* masm, Register flags, Register feedback_vector) {
[](MaglevAssembler* masm, Register feedback_vector) {
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// TODO(leszeks): This could definitely be a builtin that we
// tail-call.
__ OptimizeCodeOrTailCallOptimizedCodeSlot(
flags, feedback_vector, kJSFunctionRegister, JumpMode::kJump);
feedback_vector, kJSFunctionRegister, JumpMode::kJump);
__ Trap();
},
flags, feedback_vector);
feedback_vector);

Move(feedback_vector,
compilation_info()->toplevel_compilation_unit()->feedback().object());
LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(
flags, feedback_vector, CodeKind::MAGLEV,
deferred_flags_need_processing);
CheckFeedbackVectorFlagsAndJumpIfNeedsProcessing(
feedback_vector, CodeKind::MAGLEV, deferred_flags_need_processing);
}

EnterFrame(StackFrame::MAGLEV);
Expand Down

0 comments on commit 8361a77

Please sign in to comment.