forked from Pissandshittium/pissandshittium
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstack_sampler_impl.cc
203 lines (169 loc) · 7.32 KB
/
stack_sampler_impl.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/profiler/stack_sampler_impl.h"
#include <iterator>
#include <utility>
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/memory/raw_ptr.h"
#include "base/profiler/metadata_recorder.h"
#include "base/profiler/profile_builder.h"
#include "base/profiler/sample_metadata.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/stack_copier.h"
#include "base/profiler/suspendable_thread_delegate.h"
#include "base/profiler/unwinder.h"
#include "base/ranges/algorithm.h"
#include "build/build_config.h"
// IMPORTANT NOTE: Some functions within this implementation are invoked while
// the target thread is suspended so it must not do any allocation from the
// heap, including indirectly via use of DCHECK/CHECK or other logging
// statements. Otherwise this code can deadlock on heap locks acquired by the
// target thread before it was suspended. These functions are commented with "NO
// HEAP ALLOCATIONS".
namespace base {
namespace {
// Notifies the unwinders about the stack capture, and records metadata, while
// the thread is suspended.
class StackCopierDelegate : public StackCopier::Delegate {
public:
StackCopierDelegate(
const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders,
ProfileBuilder* profile_builder,
MetadataRecorder::MetadataProvider* metadata_provider)
: unwinders_(unwinders),
profile_builder_(profile_builder),
metadata_provider_(metadata_provider) {}
StackCopierDelegate(const StackCopierDelegate&) = delete;
StackCopierDelegate& operator=(const StackCopierDelegate&) = delete;
// StackCopier::Delegate:
// IMPORTANT NOTE: to avoid deadlock this function must not invoke any
// non-reentrant code that is also invoked by the target thread. In
// particular, it may not perform any heap allocation or deallocation,
// including indirectly via use of DCHECK/CHECK or other logging statements.
void OnStackCopy() override {
for (const auto& unwinder : *unwinders_)
unwinder->OnStackCapture();
profile_builder_->RecordMetadata(*metadata_provider_);
}
private:
raw_ptr<const base::circular_deque<std::unique_ptr<Unwinder>>> unwinders_;
const raw_ptr<ProfileBuilder> profile_builder_;
const raw_ptr<const MetadataRecorder::MetadataProvider> metadata_provider_;
};
} // namespace
StackSamplerImpl::StackSamplerImpl(std::unique_ptr<StackCopier> stack_copier,
UnwindersFactory core_unwinders_factory,
ModuleCache* module_cache,
RepeatingClosure record_sample_callback,
StackSamplerTestDelegate* test_delegate)
: stack_copier_(std::move(stack_copier)),
unwinders_factory_(std::move(core_unwinders_factory)),
module_cache_(module_cache),
record_sample_callback_(std::move(record_sample_callback)),
test_delegate_(test_delegate) {
DCHECK(unwinders_factory_);
}
StackSamplerImpl::~StackSamplerImpl() = default;
void StackSamplerImpl::Initialize() {
std::vector<std::unique_ptr<Unwinder>> unwinders =
std::move(unwinders_factory_).Run();
// |unwinders| is iterated backward since |unwinders_factory_| generates
// unwinders in increasing priority order. |unwinders_| is stored in
// decreasing priority order for ease of use within the class.
unwinders_.insert(unwinders_.end(),
std::make_move_iterator(unwinders.rbegin()),
std::make_move_iterator(unwinders.rend()));
for (const auto& unwinder : unwinders_)
unwinder->Initialize(module_cache_);
was_initialized_ = true;
}
void StackSamplerImpl::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
// Initialize() invokes Initialize() on the unwinders that are present
// at the time. If it hasn't occurred yet, we allow it to add the initial
// modules, otherwise we do it here.
if (was_initialized_)
unwinder->Initialize(module_cache_);
unwinders_.push_front(std::move(unwinder));
}
void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer,
ProfileBuilder* profile_builder) {
DCHECK(stack_buffer);
if (record_sample_callback_)
record_sample_callback_.Run();
RegisterContext thread_context;
uintptr_t stack_top;
TimeTicks timestamp;
bool copy_stack_succeeded;
{
// Make this scope as small as possible because |metadata_provider| is
// holding a lock.
MetadataRecorder::MetadataProvider metadata_provider(
GetSampleMetadataRecorder());
StackCopierDelegate delegate(&unwinders_, profile_builder,
&metadata_provider);
copy_stack_succeeded = stack_copier_->CopyStack(
stack_buffer, &stack_top, ×tamp, &thread_context, &delegate);
}
if (!copy_stack_succeeded) {
profile_builder->OnSampleCompleted(
{}, timestamp.is_null() ? TimeTicks::Now() : timestamp);
return;
}
for (const auto& unwinder : unwinders_)
unwinder->UpdateModules();
if (test_delegate_)
test_delegate_->OnPreStackWalk();
profile_builder->OnSampleCompleted(
WalkStack(module_cache_, &thread_context, stack_top, unwinders_),
timestamp);
}
// static
std::vector<Frame> StackSamplerImpl::WalkStackForTesting(
ModuleCache* module_cache,
RegisterContext* thread_context,
uintptr_t stack_top,
const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
return WalkStack(module_cache, thread_context, stack_top, unwinders);
}
// static
std::vector<Frame> StackSamplerImpl::WalkStack(
ModuleCache* module_cache,
RegisterContext* thread_context,
uintptr_t stack_top,
const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
std::vector<Frame> stack;
// Reserve enough memory for most stacks, to avoid repeated
// allocations. Approximately 99.9% of recorded stacks are 128 frames or
// fewer.
stack.reserve(128);
// Record the first frame from the context values.
stack.emplace_back(RegisterContextInstructionPointer(thread_context),
module_cache->GetModuleForAddress(
RegisterContextInstructionPointer(thread_context)));
size_t prior_stack_size;
UnwindResult result;
do {
// Choose an authoritative unwinder for the current module. Use the first
// unwinder that thinks it can unwind from the current frame.
auto unwinder = ranges::find_if(
unwinders, [&stack](const std::unique_ptr<Unwinder>& unwinder) {
return unwinder->CanUnwindFrom(stack.back());
});
if (unwinder == unwinders.end())
return stack;
prior_stack_size = stack.size();
result = unwinder->get()->TryUnwind(thread_context, stack_top, &stack);
// The unwinder with the lowest priority should be the only one that returns
// COMPLETED since the stack starts in native code.
DCHECK(result != UnwindResult::kCompleted ||
unwinder->get() == unwinders.back().get());
} while (result != UnwindResult::kAborted &&
result != UnwindResult::kCompleted &&
// Give up if the authoritative unwinder for the module was unable to
// unwind.
stack.size() > prior_stack_size);
return stack;
}
} // namespace base