|
24 | 24 |
|
25 | 25 | #include "precompiled.hpp" |
26 | 26 | #include "gc/shared/allocTracer.hpp" |
27 | | -#include "gc/shared/threadLocalAllocBuffer.inline.hpp" |
28 | 27 | #include "jfr/jfrEvents.hpp" |
29 | 28 | #include "utilities/globalDefinitions.hpp" |
30 | 29 | #include "utilities/macros.hpp" |
31 | 30 | #if INCLUDE_JFR |
32 | 31 | #include "jfr/support/jfrAllocationTracer.hpp" |
33 | 32 | #endif |
34 | 33 |
|
35 | | -static THREAD_LOCAL int64_t _last_allocated_bytes = 0; |
36 | | - |
37 | | -inline void send_allocation_sample(const Klass* klass, int64_t allocated_bytes) { |
38 | | - assert(allocated_bytes > 0, "invariant"); |
39 | | - EventObjectAllocationSample event; |
40 | | - if (event.should_commit()) { |
41 | | - const size_t weight = allocated_bytes - _last_allocated_bytes; |
42 | | - assert(weight > 0, "invariant"); |
43 | | - event.set_objectClass(klass); |
44 | | - event.set_weight(weight); |
45 | | - event.commit(); |
46 | | - _last_allocated_bytes = allocated_bytes; |
47 | | - } |
48 | | -} |
49 | | - |
50 | | -inline bool send_allocation_sample_with_result(const Klass* klass, int64_t allocated_bytes) { |
51 | | - assert(allocated_bytes > 0, "invariant"); |
52 | | - EventObjectAllocationSample event; |
53 | | - if (event.should_commit()) { |
54 | | - const size_t weight = allocated_bytes - _last_allocated_bytes; |
55 | | - assert(weight > 0, "invariant"); |
56 | | - event.set_objectClass(klass); |
57 | | - event.set_weight(weight); |
58 | | - event.commit(); |
59 | | - _last_allocated_bytes = allocated_bytes; |
60 | | - return true; |
61 | | - } |
62 | | - return false; |
63 | | -} |
64 | | - |
65 | | -inline intptr_t estimate_tlab_size_bytes(Thread* thread) { |
66 | | - assert(thread != NULL, "invariant"); |
67 | | - const size_t desired_tlab_size_bytes = thread->tlab().desired_size() * HeapWordSize; |
68 | | - const size_t alignment_reserve_bytes = thread->tlab().alignment_reserve_in_bytes(); |
69 | | - assert(desired_tlab_size_bytes > alignment_reserve_bytes, "invariant"); |
70 | | - return static_cast<intptr_t>(desired_tlab_size_bytes - alignment_reserve_bytes); |
71 | | -} |
72 | | - |
73 | | -inline int64_t load_allocated_bytes(Thread* thread) { |
74 | | - const int64_t allocated_bytes = thread->allocated_bytes(); |
75 | | - if (allocated_bytes < _last_allocated_bytes) { |
76 | | - // A hw thread can detach and reattach to the VM, and when it does, |
77 | | - // it gets a new JavaThread representation. The thread local variable |
78 | | - // tracking _last_allocated_bytes is mapped to the existing hw thread, |
79 | | - // so it needs to be reset. |
80 | | - _last_allocated_bytes = 0; |
81 | | - } |
82 | | - return allocated_bytes == _last_allocated_bytes ? 0 : allocated_bytes; |
83 | | -} |
84 | | - |
85 | | -// To avoid large objects from being undersampled compared to the regular TLAB samples, |
86 | | -// the data amount is normalized as if it was a TLAB, giving a number of TLAB sampling attempts to the large object. |
87 | | -static void normalize_as_tlab_and_send_allocation_samples(Klass* klass, intptr_t obj_alloc_size_bytes, Thread* thread) { |
88 | | - const int64_t allocated_bytes = load_allocated_bytes(thread); |
89 | | - assert(allocated_bytes > 0, "invariant"); // obj_alloc_size_bytes is already attributed to allocated_bytes at this point. |
90 | | - if (!UseTLAB) { |
91 | | - send_allocation_sample(klass, allocated_bytes); |
92 | | - return; |
93 | | - } |
94 | | - const intptr_t tlab_size_bytes = estimate_tlab_size_bytes(thread); |
95 | | - if (allocated_bytes - _last_allocated_bytes < tlab_size_bytes) { |
96 | | - return; |
97 | | - } |
98 | | - assert(obj_alloc_size_bytes > 0, "invariant"); |
99 | | - do { |
100 | | - if (send_allocation_sample_with_result(klass, allocated_bytes)) { |
101 | | - return; |
102 | | - } |
103 | | - obj_alloc_size_bytes -= tlab_size_bytes; |
104 | | - } while (obj_alloc_size_bytes > 0); |
105 | | -} |
106 | | - |
107 | 34 | void AllocTracer::send_allocation_outside_tlab(Klass* klass, HeapWord* obj, size_t alloc_size, Thread* thread) { |
108 | | - JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) |
| 35 | + JFR_ONLY(JfrAllocationTracer tracer(klass, obj, alloc_size, true, thread);) |
109 | 36 | EventObjectAllocationOutsideTLAB event; |
110 | 37 | if (event.should_commit()) { |
111 | 38 | event.set_objectClass(klass); |
112 | 39 | event.set_allocationSize(alloc_size); |
113 | 40 | event.commit(); |
114 | 41 | } |
115 | | - normalize_as_tlab_and_send_allocation_samples(klass, static_cast<intptr_t>(alloc_size), thread); |
116 | 42 | } |
117 | 43 |
|
118 | 44 | void AllocTracer::send_allocation_in_new_tlab(Klass* klass, HeapWord* obj, size_t tlab_size, size_t alloc_size, Thread* thread) { |
119 | | - JFR_ONLY(JfrAllocationTracer tracer(obj, alloc_size, thread);) |
| 45 | + JFR_ONLY(JfrAllocationTracer tracer(klass, obj, alloc_size, false, thread);) |
120 | 46 | EventObjectAllocationInNewTLAB event; |
121 | 47 | if (event.should_commit()) { |
122 | 48 | event.set_objectClass(klass); |
123 | 49 | event.set_allocationSize(alloc_size); |
124 | 50 | event.set_tlabSize(tlab_size); |
125 | 51 | event.commit(); |
126 | 52 | } |
127 | | - const int64_t allocated_bytes = load_allocated_bytes(thread); |
128 | | - if (allocated_bytes == 0) { |
129 | | - return; |
130 | | - } |
131 | | - send_allocation_sample(klass, allocated_bytes); |
132 | 53 | } |
133 | 54 |
|
134 | 55 | void AllocTracer::send_allocation_requiring_gc_event(size_t size, uint gcId) { |
|
0 commit comments