Skip to content

Commit 0657abd

Browse files
authored
Update to jdk-20+13
27971: failed to build after JDK-8290025 27970: LA port of 8282410: Remove SA ProcDebugger support 27969: LA port of 8292890: Remove PrintTouchedMethodsAtExit and LogTouchedMethods 27968: LA port of 8290025: Remove the Sweeper
1 parent 9b721b1 commit 0657abd

24 files changed

+248
-268
lines changed

src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
290290

291291
// Insert nmethod entry barrier into frame.
292292
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
293-
bs->nmethod_entry_barrier(this);
293+
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
294294
}
295295

296296
void C1_MacroAssembler::remove_frame(int framesize) {

src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1731,3 +1731,17 @@ bool C2_MacroAssembler::in_scratch_emit_size() {
17311731
}
17321732
return MacroAssembler::in_scratch_emit_size();
17331733
}
1734+
1735+
void C2_MacroAssembler::emit_entry_barrier_stub(C2EntryBarrierStub* stub) {
1736+
bind(stub->slow_path());
1737+
call_long(StubRoutines::la::method_entry_barrier());
1738+
b(stub->continuation());
1739+
1740+
bind(stub->guard());
1741+
relocate(entry_guard_Relocation::spec());
1742+
emit_int32(0); // nmethod guard value
1743+
}
1744+
1745+
int C2_MacroAssembler::entry_barrier_stub_size() {
1746+
return 5 * 4;
1747+
}

src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,8 @@
4747
#define T8 RT8
4848

4949
public:
50-
void emit_entry_barrier_stub(C2EntryBarrierStub* stub) {}
51-
static int entry_barrier_stub_size() { return 0; }
50+
void emit_entry_barrier_stub(C2EntryBarrierStub* stub);
51+
static int entry_barrier_stub_size();
5252

5353
void cmp_branch_short(int flag, Register op1, Register op2, Label& L, bool is_signed);
5454
void cmp_branch_long(int flag, Register op1, Register op2, Label* L, bool is_signed);

src/hotspot/cpu/loongarch/frame_loongarch.cpp

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -162,16 +162,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
162162
}
163163

164164
// We must always be able to find a recognizable pc
165-
CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
165+
CodeBlob* sender_blob = CodeCache::find_blob(sender_pc);
166166
if (sender_pc == NULL || sender_blob == NULL) {
167167
return false;
168168
}
169169

170-
// Could be a zombie method
171-
if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
172-
return false;
173-
}
174-
175170
// Could just be some random pointer within the codeBlob
176171
if (!sender_blob->code_contains(sender_pc)) {
177172
return false;

src/hotspot/cpu/loongarch/frame_loongarch.inline.hpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -112,10 +112,8 @@ inline frame::frame(intptr_t* ptr_sp, intptr_t* ptr_fp) {
112112
_pc = (address)(ptr_sp[-1]);
113113

114114
// Here's a sticky one. This constructor can be called via AsyncGetCallTrace
115-
// when last_Java_sp is non-null but the pc fetched is junk. If we are truly
116-
// unlucky the junk value could be to a zombied method and we'll die on the
117-
// find_blob call. This is also why we can have no asserts on the validity
118-
// of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
115+
// when last_Java_sp is non-null but the pc fetched is junk.
116+
// AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler
119117
// -> pd_last_frame should use a specialized version of pd_last_frame which could
120118
// call a specilaized frame constructor instead of this one.
121119
// Then we could use the assert below. However this assert is of somewhat dubious

src/hotspot/cpu/loongarch/gc/g1/g1BarrierSetAssembler_loongarch.cpp

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -192,12 +192,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
192192
__ b(done);
193193

194194
__ bind(runtime);
195-
// save the live input values
196-
if (tosca_live) __ push(V0);
197-
198-
if (obj != noreg && obj != V0) __ push(obj);
199-
200-
if (pre_val != V0) __ push(pre_val);
195+
__ push_call_clobbered_registers();
201196

202197
// Calling the runtime using the regular call_VM_leaf mechanism generates
203198
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
@@ -220,14 +215,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
220215
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
221216
}
222217

223-
// save the live input values
224-
if (pre_val != V0)
225-
__ pop(pre_val);
226-
227-
if (obj != noreg && obj != V0)
228-
__ pop(obj);
229-
230-
if (tosca_live) __ pop(V0);
218+
__ pop_call_clobbered_registers();
231219

232220
__ bind(done);
233221
}

src/hotspot/cpu/loongarch/gc/shared/barrierSetAssembler_loongarch.cpp

Lines changed: 80 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -217,32 +217,97 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
217217
__ st_d(t1, Address(TREG, JavaThread::allocated_bytes_offset()));
218218
}
219219

220-
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
220+
static volatile uint32_t _patching_epoch = 0;
221+
222+
address BarrierSetAssembler::patching_epoch_addr() {
223+
return (address)&_patching_epoch;
224+
}
225+
226+
void BarrierSetAssembler::increment_patching_epoch() {
227+
Atomic::inc(&_patching_epoch);
228+
}
229+
230+
void BarrierSetAssembler::clear_patching_epoch() {
231+
_patching_epoch = 0;
232+
}
233+
234+
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
221235
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
222236

223237
if (bs_nm == NULL) {
224238
return;
225239
}
226240

227-
Label skip, guard;
228-
Address thread_disarmed_addr(TREG, in_bytes(bs_nm->thread_disarmed_offset()));
241+
Label local_guard;
242+
NMethodPatchingType patching_type = nmethod_patching_type();
229243

230-
__ lipc(SCR1, guard);
231-
__ ld_w(SCR1, SCR1, 0);
244+
if (slow_path == NULL) {
245+
guard = &local_guard;
246+
}
232247

233-
// Subsequent loads of oops must occur after load of guard value.
234-
// BarrierSetNMethod::disarm sets guard with release semantics.
235-
__ membar(__ LoadLoad);
236-
__ ld_w(SCR2, thread_disarmed_addr);
237-
__ beq(SCR1, SCR2, skip);
248+
__ lipc(SCR1, *guard);
249+
__ ld_wu(SCR1, SCR1, 0);
250+
251+
switch (patching_type) {
252+
case NMethodPatchingType::conc_data_patch:
253+
// Subsequent loads of oops must occur after load of guard value.
254+
// BarrierSetNMethod::disarm sets guard with release semantics.
255+
__ membar(__ LoadLoad); // fall through to stw_instruction_and_data_patch
256+
case NMethodPatchingType::stw_instruction_and_data_patch:
257+
{
258+
// With STW patching, no data or instructions are updated concurrently,
259+
// which means there isn't really any need for any fencing for neither
260+
// data nor instruction modification happening concurrently. The
261+
// instruction patching is synchronized with global icache_flush() by
262+
// the write hart on riscv. So here we can do a plain conditional
263+
// branch with no fencing.
264+
Address thread_disarmed_addr(TREG, in_bytes(bs_nm->thread_disarmed_offset()));
265+
__ ld_wu(SCR2, thread_disarmed_addr);
266+
break;
267+
}
268+
case NMethodPatchingType::conc_instruction_and_data_patch:
269+
{
270+
// If we patch code we need both a code patching and a loadload
271+
// fence. It's not super cheap, so we use a global epoch mechanism
272+
// to hide them in a slow path.
273+
// The high level idea of the global epoch mechanism is to detect
274+
// when any thread has performed the required fencing, after the
275+
// last nmethod was disarmed. This implies that the required
276+
// fencing has been performed for all preceding nmethod disarms
277+
// as well. Therefore, we do not need any further fencing.
278+
__ lea(SCR2, ExternalAddress((address)&_patching_epoch));
279+
// Embed an artificial data dependency to order the guard load
280+
// before the epoch load.
281+
__ srli_d(RA, SCR1, 32);
282+
__ orr(SCR2, SCR2, RA);
283+
// Read the global epoch value.
284+
__ ld_wu(SCR2, SCR2);
285+
// Combine the guard value (low order) with the epoch value (high order).
286+
__ slli_d(SCR2, SCR2, 32);
287+
__ orr(SCR1, SCR1, SCR2);
288+
// Compare the global values with the thread-local values
289+
Address thread_disarmed_and_epoch_addr(TREG, in_bytes(bs_nm->thread_disarmed_offset()));
290+
__ ld_d(SCR2, thread_disarmed_and_epoch_addr);
291+
break;
292+
}
293+
default:
294+
ShouldNotReachHere();
295+
}
238296

239-
__ call_long(StubRoutines::la::method_entry_barrier());
240-
__ b(skip);
297+
if (slow_path == NULL) {
298+
Label skip_barrier;
299+
__ beq(SCR1, SCR2, skip_barrier);
241300

242-
__ bind(guard);
243-
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
301+
__ call_long(StubRoutines::la::method_entry_barrier());
302+
__ b(skip_barrier);
244303

245-
__ bind(skip);
304+
__ bind(local_guard);
305+
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
306+
__ bind(skip_barrier);
307+
} else {
308+
__ bne(SCR1, SCR2, *slow_path);
309+
__ bind(*continuation);
310+
}
246311
}
247312

248313
void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {

src/hotspot/cpu/loongarch/gc/shared/barrierSetAssembler_loongarch.hpp

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@
3434

3535
class InterpreterMacroAssembler;
3636

37+
enum class NMethodPatchingType {
38+
stw_instruction_and_data_patch,
39+
conc_instruction_and_data_patch,
40+
conc_data_patch
41+
};
42+
3743
class BarrierSetAssembler: public CHeapObj<mtGC> {
3844
private:
3945
void incr_allocated_bytes(MacroAssembler* masm,
@@ -77,9 +83,14 @@ class BarrierSetAssembler: public CHeapObj<mtGC> {
7783

7884
virtual void barrier_stubs_init() {}
7985

80-
virtual void nmethod_entry_barrier(MacroAssembler* masm);
86+
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; }
87+
88+
virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard);
8189
virtual void c2i_entry_barrier(MacroAssembler* masm);
8290

91+
static address patching_epoch_addr();
92+
static void clear_patching_epoch();
93+
static void increment_patching_epoch();
8394
};
8495

8596
#endif // CPU_LOONGARCH_GC_SHARED_BARRIERSETASSEMBLER_LOONGARCH_HPP

src/hotspot/cpu/loongarch/gc/shared/barrierSetNMethod_loongarch.cpp

Lines changed: 74 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include "precompiled.hpp"
2727
#include "code/codeCache.hpp"
2828
#include "code/nativeInst.hpp"
29+
#include "gc/shared/barrierSetAssembler.hpp"
2930
#include "gc/shared/barrierSetNMethod.hpp"
3031
#include "logging/log.hpp"
3132
#include "memory/resourceArea.hpp"
@@ -36,20 +37,57 @@
3637
#include "utilities/align.hpp"
3738
#include "utilities/debug.hpp"
3839

40+
static int slow_path_size(nmethod* nm) {
41+
// The slow path code is out of line with C2.
42+
// Leave a b to the stub in the fast path.
43+
return nm->is_compiled_by_c2() ? 1 : 6;
44+
}
45+
46+
static int entry_barrier_offset(nmethod* nm) {
47+
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
48+
switch (bs_asm->nmethod_patching_type()) {
49+
case NMethodPatchingType::stw_instruction_and_data_patch:
50+
return -4 * (3 + slow_path_size(nm));
51+
case NMethodPatchingType::conc_data_patch:
52+
return -4 * (4 + slow_path_size(nm));
53+
case NMethodPatchingType::conc_instruction_and_data_patch:
54+
return -4 * (9 + slow_path_size(nm));
55+
}
56+
ShouldNotReachHere();
57+
return 0;
58+
}
59+
3960
class NativeNMethodBarrier: public NativeInstruction {
4061
address instruction_address() const { return addr_at(0); }
4162

42-
int *guard_addr() {
43-
return reinterpret_cast<int*>(instruction_address() + 9 * 4);
63+
int local_guard_offset(nmethod* nm) {
64+
// It's the last instruction
65+
return (-entry_barrier_offset(nm)) - 4;
66+
}
67+
68+
int *guard_addr(nmethod* nm) {
69+
if (nm->is_compiled_by_c2()) {
70+
// With c2 compiled code, the guard is out-of-line in a stub
71+
// We find it using the RelocIterator.
72+
RelocIterator iter(nm);
73+
while (iter.next()) {
74+
if (iter.type() == relocInfo::entry_guard_type) {
75+
entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
76+
return reinterpret_cast<int*>(reloc->addr());
77+
}
78+
}
79+
ShouldNotReachHere();
80+
}
81+
return reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
4482
}
4583

4684
public:
47-
int get_value() {
48-
return Atomic::load_acquire(guard_addr());
85+
int get_value(nmethod* nm) {
86+
return Atomic::load_acquire(guard_addr(nm));
4987
}
5088

51-
void set_value(int value) {
52-
Atomic::release_store(guard_addr(), value);
89+
void set_value(nmethod* nm, int value) {
90+
Atomic::release_store(guard_addr(nm), value);
5391
}
5492

5593
void verify() const;
@@ -64,14 +102,7 @@ struct CheckInsn {
64102

65103
static const struct CheckInsn barrierInsn[] = {
66104
{ 0xfe000000, 0x18000000, "pcaddi"},
67-
{ 0xffc00000, 0x28800000, "ld.w"},
68-
{ 0xffff8000, 0x38720000, "dbar"},
69-
{ 0xffc00000, 0x28800000, "ld.w"},
70-
{ 0xfc000000, 0x58000000, "beq"},
71-
{ 0xfe000000, 0x14000000, "lu12i.w"},
72-
{ 0xfe000000, 0x16000000, "lu32i.d"},
73-
{ 0xfc000000, 0x4c000000, "jirl"},
74-
{ 0xfc000000, 0x50000000, "b"}
105+
{ 0xffc00000, 0x2a800000, "ld.wu"},
75106
};
76107

77108
// The encodings must match the instructions emitted by
@@ -123,17 +154,8 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
123154
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
124155
}
125156

126-
// This is the offset of the entry barrier from where the frame is completed.
127-
// If any code changes between the end of the verified entry where the entry
128-
// barrier resides, and the completion of the frame, then
129-
// NativeNMethodCmpBarrier::verify() will immediately complain when it does
130-
// not find the expected native instruction at this offset, which needs updating.
131-
// Note that this offset is invariant of PreserveFramePointer.
132-
133-
static const int entry_barrier_offset = -4 * 10;
134-
135157
static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
136-
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
158+
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
137159
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
138160
debug_only(barrier->verify());
139161
return barrier;
@@ -144,15 +166,40 @@ void BarrierSetNMethod::disarm(nmethod* nm) {
144166
return;
145167
}
146168

169+
// The patching epoch is incremented before the nmethod is disarmed. Disarming
170+
// is performed with a release store. In the nmethod entry barrier, the values
171+
// are read in the opposite order, such that the load of the nmethod guard
172+
// acquires the patching epoch. This way, the guard is guaranteed to block
173+
// entries to the nmethod, util it has safely published the requirement for
174+
// further fencing by mutators, before they are allowed to enter.
175+
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
176+
bs_asm->increment_patching_epoch();
177+
147178
// Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier.
148179
// Symmetric "LD.W; DBAR" is in the nmethod barrier.
149180
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
150181

151-
barrier->set_value(disarmed_value());
182+
barrier->set_value(nm, disarmed_value());
152183
}
153184

154185
void BarrierSetNMethod::arm(nmethod* nm, int arm_value) {
155-
Unimplemented();
186+
if (!supports_entry_barrier(nm)) {
187+
return;
188+
}
189+
190+
if (arm_value == disarmed_value()) {
191+
// The patching epoch is incremented before the nmethod is disarmed. Disarming
192+
// is performed with a release store. In the nmethod entry barrier, the values
193+
// are read in the opposite order, such that the load of the nmethod guard
194+
// acquires the patching epoch. This way, the guard is guaranteed to block
195+
// entries to the nmethod, until it has safely published the requirement for
196+
// further fencing by mutators, before they are allowed to enter.
197+
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
198+
bs_asm->increment_patching_epoch();
199+
}
200+
201+
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
202+
barrier->set_value(nm, arm_value);
156203
}
157204

158205
bool BarrierSetNMethod::is_armed(nmethod* nm) {
@@ -161,5 +208,5 @@ bool BarrierSetNMethod::is_armed(nmethod* nm) {
161208
}
162209

163210
NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
164-
return barrier->get_value() != disarmed_value();
211+
return barrier->get_value(nm) != disarmed_value();
165212
}

0 commit comments

Comments
 (0)