Commit 603a50e0 authored by Mike Wittman's avatar Mike Wittman Committed by Commit Bot

[Sampling profiler] Reland "Implement signal-based stack copying in //base/profiler"

This CL extracts the core stack copying parts of
tracing::StackUnwinderAndroid into the dedicated base::StackCopierSignal
class, and adapts it for direct use by the sampling profiler stack
unwinding implementation. This drops some unneeded code; updates the code
to run on 32-bit Android, 64-bit Android, and 64-bit Linux platforms;
and converts the code to use C++ atomics.

The StackUnwinderAndroid implementation will eventually be replaced
wholesale by the sampling profiler implementation for Android. This CL
does not attempt to reuse the extracted stack copying implementation on
its own, in order to avoid introducing bugs in StackUnwinderAndroid due
to the differing implementations.

The reland enables the StackCopierSignalTest.CopyStackFromOtherThread
test on 32-bit Android only.

TBR=gab@chromium.org

Bug: 988579, 1018615
Change-Id: I96704fe4843994721a0eb37896e11bea99be3fda
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1884376
Commit-Queue: Mike Wittman <wittman@chromium.org>
Reviewed-by: default avatarGabriel Charette <gab@chromium.org>
Reviewed-by: default avatarCharlie Andrews <charliea@chromium.org>
Cr-Commit-Position: refs/heads/master@{#710415}
parent 20df05c7
...@@ -622,8 +622,6 @@ jumbo_component("base") { ...@@ -622,8 +622,6 @@ jumbo_component("base") {
"profiler/stack_buffer.h", "profiler/stack_buffer.h",
"profiler/stack_copier.cc", "profiler/stack_copier.cc",
"profiler/stack_copier.h", "profiler/stack_copier.h",
"profiler/stack_copier_signal.cc",
"profiler/stack_copier_signal.h",
"profiler/stack_copier_suspend.cc", "profiler/stack_copier_suspend.cc",
"profiler/stack_copier_suspend.h", "profiler/stack_copier_suspend.h",
"profiler/stack_sampler.cc", "profiler/stack_sampler.cc",
...@@ -1204,6 +1202,16 @@ jumbo_component("base") { ...@@ -1204,6 +1202,16 @@ jumbo_component("base") {
"threading/thread_local_storage_posix.cc", "threading/thread_local_storage_posix.cc",
"timer/hi_res_timer_manager_posix.cc", "timer/hi_res_timer_manager_posix.cc",
] ]
if (!is_nacl && !is_mac && !is_ios) {
sources += [
"profiler/stack_copier_signal.cc",
"profiler/stack_copier_signal.h",
"profiler/stack_sampler_posix.cc",
"profiler/thread_delegate_posix.cc",
"profiler/thread_delegate_posix.h",
]
}
} }
if (!is_nacl) { if (!is_nacl) {
...@@ -1222,14 +1230,6 @@ jumbo_component("base") { ...@@ -1222,14 +1230,6 @@ jumbo_component("base") {
if (is_posix) { if (is_posix) {
sources += [ "base_paths_posix.h" ] sources += [ "base_paths_posix.h" ]
if (!is_mac && !is_ios) {
sources += [
"profiler/stack_sampler_posix.cc",
"profiler/thread_delegate_posix.cc",
"profiler/thread_delegate_posix.h",
]
}
} }
if (is_linux) { if (is_linux) {
...@@ -2905,6 +2905,9 @@ test("base_unittests") { ...@@ -2905,6 +2905,9 @@ test("base_unittests") {
"posix/unix_domain_socket_unittest.cc", "posix/unix_domain_socket_unittest.cc",
"task/thread_pool/task_tracker_posix_unittest.cc", "task/thread_pool/task_tracker_posix_unittest.cc",
] ]
if (!is_nacl && !is_mac && !is_ios) {
sources += [ "profiler/stack_copier_signal_unittest.cc" ]
}
} }
# Allow more direct string conversions on platforms with native utf8 # Allow more direct string conversions on platforms with native utf8
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#define BASE_PROFILER_REGISTER_CONTEXT_H_ #define BASE_PROFILER_REGISTER_CONTEXT_H_
#include <cstdint> #include <cstdint>
#include <type_traits>
#include "build/build_config.h" #include "build/build_config.h"
...@@ -25,13 +24,14 @@ ...@@ -25,13 +24,14 @@
namespace base { namespace base {
// Helper function to account for the fact that platform-specific register state // Helper function to account for the fact that platform-specific register state
// types may be unsigned and of the same size as uintptr_t, but not of the same // types may be of the same size as uintptr_t, but not of the same type or
// type -- e.g. unsigned int vs. unsigned long on 32-bit Windows and unsigned // signedness -- e.g. unsigned int vs. unsigned long on 32-bit Windows, unsigned
// long vs. unsigned long long on Mac. // long vs. unsigned long long on Mac, long long vs. unsigned long long on
// Linux.
template <typename T> template <typename T>
uintptr_t& AsUintPtr(T* value) { uintptr_t& AsUintPtr(T* value) {
static_assert(std::is_unsigned<T>::value && sizeof(T) == sizeof(uintptr_t), static_assert(sizeof(T) == sizeof(uintptr_t),
"register state type must be equivalent to uintptr_t"); "register state type must be of equivalent size to uintptr_t");
return *reinterpret_cast<uintptr_t*>(value); return *reinterpret_cast<uintptr_t*>(value);
} }
...@@ -86,12 +86,12 @@ inline uintptr_t& RegisterContextInstructionPointer( ...@@ -86,12 +86,12 @@ inline uintptr_t& RegisterContextInstructionPointer(
return AsUintPtr(&context->__rip); return AsUintPtr(&context->__rip);
} }
#elif (defined(OS_ANDROID) || defined(OS_LINUX)) && \ #elif defined(OS_ANDROID) || defined(OS_LINUX) // #if defined(OS_WIN)
defined(ARCH_CPU_ARM_FAMILY) && \
defined(ARCH_CPU_32_BITS) // #if defined(OS_WIN)
using RegisterContext = mcontext_t; using RegisterContext = mcontext_t;
#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
inline uintptr_t& RegisterContextStackPointer(mcontext_t* context) { inline uintptr_t& RegisterContextStackPointer(mcontext_t* context) {
return AsUintPtr(&context->arm_sp); return AsUintPtr(&context->arm_sp);
} }
...@@ -104,6 +104,55 @@ inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) { ...@@ -104,6 +104,55 @@ inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) {
return AsUintPtr(&context->arm_ip); return AsUintPtr(&context->arm_ip);
} }
#elif defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_64_BITS)
inline uintptr_t& RegisterContextStackPointer(mcontext_t* context) {
return AsUintPtr(&context->sp);
}
inline uintptr_t& RegisterContextFramePointer(mcontext_t* context) {
// r29 is the FP register on 64-bit ARM per the Procedure Call Standard,
// section 5.1.1.
return AsUintPtr(&context->regs[29]);
}
inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) {
return AsUintPtr(&context->pc);
}
#elif defined(ARCH_CPU_X86_64) // #if defined(ARCH_CPU_ARM_FAMILY) &&
// defined(ARCH_CPU_32_BITS)
inline uintptr_t& RegisterContextStackPointer(mcontext_t* context) {
return AsUintPtr(&context->gregs[REG_RSP]);
}
inline uintptr_t& RegisterContextFramePointer(mcontext_t* context) {
return AsUintPtr(&context->gregs[REG_RBP]);
}
inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) {
return AsUintPtr(&context->gregs[REG_RIP]);
}
#else // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
// Placeholders for other POSIX platforms that just return the first
// three register slots in the context.
inline uintptr_t& RegisterContextStackPointer(mcontext_t* context) {
return *reinterpret_cast<uintptr_t*>(context);
}
inline uintptr_t& RegisterContextFramePointer(mcontext_t* context) {
return *(reinterpret_cast<uintptr_t*>(context) + 1);
}
inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) {
return *(reinterpret_cast<uintptr_t*>(context) + 2);
}
#endif // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
#else // #if defined(OS_WIN) #else // #if defined(OS_WIN)
// Placeholders for other platforms. // Placeholders for other platforms.
......
...@@ -40,14 +40,15 @@ class BASE_EXPORT StackBuffer { ...@@ -40,14 +40,15 @@ class BASE_EXPORT StackBuffer {
~(kPlatformStackAlignment - 1)); ~(kPlatformStackAlignment - 1));
} }
// Size in bytes.
size_t size() const { return size_; } size_t size() const { return size_; }
private: private:
// The buffer to store the stack. // The buffer to store the stack.
const std::unique_ptr<uint8_t[]> buffer_; const std::unique_ptr<uint8_t[]> buffer_;
// The size of the requested buffer allocation. The actual allocation is // The size in bytes of the requested buffer allocation. The actual allocation
// larger to accommodate alignment requirements. // is larger to accommodate alignment requirements.
const size_t size_; const size_t size_;
DISALLOW_COPY_AND_ASSIGN(StackBuffer); DISALLOW_COPY_AND_ASSIGN(StackBuffer);
......
...@@ -4,13 +4,169 @@ ...@@ -4,13 +4,169 @@
#include "base/profiler/stack_copier_signal.h" #include "base/profiler/stack_copier_signal.h"
#include <linux/futex.h>
#include <signal.h>
#include <sys/ucontext.h>
#include <syscall.h>
#include <atomic>
#include "base/profiler/metadata_recorder.h" #include "base/profiler/metadata_recorder.h"
#include "base/profiler/register_context.h"
#include "base/profiler/sample_metadata.h" #include "base/profiler/sample_metadata.h"
#include "base/profiler/stack_buffer.h" #include "base/profiler/stack_buffer.h"
#include "base/profiler/suspendable_thread_delegate.h" #include "base/profiler/suspendable_thread_delegate.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
namespace base { namespace base {
namespace {
// Waitable event implementation with futex and without DCHECK(s), since signal
// handlers cannot allocate memory or use pthread api.
class AsyncSafeWaitableEvent {
public:
AsyncSafeWaitableEvent() { futex_.store(0, std::memory_order_release); }
~AsyncSafeWaitableEvent() {}
bool Wait() {
// futex() can wake up spuriously if this memory address was previously used
// for a pthread mutex. So, also check the condition.
while (true) {
int res =
syscall(SYS_futex, futex_int_ptr(), FUTEX_WAIT | FUTEX_PRIVATE_FLAG,
0, nullptr, nullptr, 0);
if (futex_.load(std::memory_order_acquire) != 0)
return true;
if (res != 0)
return false;
}
}
void Signal() {
futex_.store(1, std::memory_order_release);
syscall(SYS_futex, futex_int_ptr(), FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1,
nullptr, nullptr, 0);
}
private:
// Provides a pointer to the atomic's storage. std::atomic_int has standard
// layout so its address can be used for the pointer as long as it only
// contains the int.
int* futex_int_ptr() {
static_assert(sizeof(futex_) == sizeof(int),
"Expected std::atomic_int to be the same size as int");
return reinterpret_cast<int*>(&futex_);
}
std::atomic_int futex_{0};
};
// Scoped signal event that calls Signal on the AsyncSafeWaitableEvent at
// destructor.
class ScopedEventSignaller {
public:
ScopedEventSignaller(AsyncSafeWaitableEvent* event) : event_(event) {}
~ScopedEventSignaller() { event_->Signal(); }
private:
AsyncSafeWaitableEvent* event_;
};
// Struct to store the arguments to the signal handler.
struct HandlerParams {
uintptr_t stack_base_address;
// The event is signalled when signal handler is done executing.
AsyncSafeWaitableEvent* event;
// Return values:
// Successfully copied the stack segment.
bool* success;
// The thread context of the leaf function.
mcontext_t* context;
// Buffer to copy the stack segment.
StackBuffer* stack_buffer;
const uint8_t** stack_copy_bottom;
};
// Pointer to the parameters to be "passed" to the CopyStackSignalHandler() from
// the sampling thread to the sampled (stopped) thread. This value is set just
// before sending the signal to the thread and reset when the handler is done.
std::atomic<HandlerParams*> g_handler_params;
// CopyStackSignalHandler is invoked on the stopped thread and records the
// thread's stack and register context at the time the signal was received. This
// function may only call reentrant code.
void CopyStackSignalHandler(int n, siginfo_t* siginfo, void* sigcontext) {
HandlerParams* params = g_handler_params.load(std::memory_order_acquire);
ScopedEventSignaller e(params->event);
*params->success = false;
const ucontext_t* ucontext = static_cast<ucontext_t*>(sigcontext);
memcpy(params->context, &ucontext->uc_mcontext, sizeof(mcontext_t));
const uintptr_t bottom = RegisterContextStackPointer(params->context);
const uintptr_t top = params->stack_base_address;
if ((top - bottom) > params->stack_buffer->size()) {
// The stack exceeds the size of the allocated buffer. The buffer is sized
// such that this shouldn't happen under typical execution so we can safely
// punt in this situation.
return;
}
*params->stack_copy_bottom =
StackCopierSignal::CopyStackContentsAndRewritePointers(
reinterpret_cast<uint8_t*>(bottom), reinterpret_cast<uintptr_t*>(top),
StackBuffer::kPlatformStackAlignment, params->stack_buffer->buffer());
// TODO(https://crbug.com/988579): Record metadata while the thread is
// suspended.
*params->success = true;
}
// Sets the global handler params for the signal handler function.
class ScopedSetSignalHandlerParams {
public:
ScopedSetSignalHandlerParams(HandlerParams* params) {
g_handler_params.store(params, std::memory_order_release);
}
~ScopedSetSignalHandlerParams() {
g_handler_params.store(nullptr, std::memory_order_release);
}
};
class ScopedSigaction {
public:
ScopedSigaction(int signal,
struct sigaction* action,
struct sigaction* original_action)
: signal_(signal),
action_(action),
original_action_(original_action),
succeeded_(sigaction(signal, action, original_action) == 0) {}
bool succeeded() const { return succeeded_; }
~ScopedSigaction() {
if (!succeeded_)
return;
bool reset_succeeded = sigaction(signal_, original_action_, action_) == 0;
DCHECK(reset_succeeded);
}
private:
const int signal_;
struct sigaction* const action_;
struct sigaction* const original_action_;
const bool succeeded_;
};
} // namespace
StackCopierSignal::StackCopierSignal( StackCopierSignal::StackCopierSignal(
std::unique_ptr<ThreadDelegate> thread_delegate) std::unique_ptr<ThreadDelegate> thread_delegate)
: thread_delegate_(std::move(thread_delegate)) {} : thread_delegate_(std::move(thread_delegate)) {}
...@@ -21,8 +177,57 @@ bool StackCopierSignal::CopyStack(StackBuffer* stack_buffer, ...@@ -21,8 +177,57 @@ bool StackCopierSignal::CopyStack(StackBuffer* stack_buffer,
uintptr_t* stack_top, uintptr_t* stack_top,
ProfileBuilder* profile_builder, ProfileBuilder* profile_builder,
RegisterContext* thread_context) { RegisterContext* thread_context) {
// TODO(wittman): Implement signal-based stack copying. AsyncSafeWaitableEvent wait_event;
return false; bool copied = false;
const uint8_t* stack_copy_bottom = nullptr;
const uintptr_t stack_base_address = thread_delegate_->GetStackBaseAddress();
HandlerParams params = {stack_base_address, &wait_event, &copied,
thread_context, stack_buffer, &stack_copy_bottom};
{
ScopedSetSignalHandlerParams scoped_handler_params(&params);
// Set the signal handler for the thread to the stack copy function.
struct sigaction action;
struct sigaction original_action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = CopyStackSignalHandler;
action.sa_flags = SA_RESTART | SA_SIGINFO;
sigemptyset(&action.sa_mask);
TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
"StackCopierSignal copy stack");
// SIGURG is chosen here because we observe no crashes with this signal and
// neither Chrome or the AOSP sets up a special handler for this signal.
ScopedSigaction scoped_sigaction(SIGURG, &action, &original_action);
if (!scoped_sigaction.succeeded())
return false;
if (syscall(SYS_tgkill, getpid(), thread_delegate_->GetThreadId(),
SIGURG) != 0) {
NOTREACHED();
return false;
}
bool finished_waiting = wait_event.Wait();
TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug"),
"StackCopierSignal copy stack");
if (!finished_waiting) {
NOTREACHED();
return false;
}
}
const uintptr_t bottom = RegisterContextStackPointer(params.context);
for (uintptr_t* reg :
thread_delegate_->GetRegistersToRewrite(thread_context)) {
*reg = StackCopierSignal::RewritePointerIfInOriginalStack(
reinterpret_cast<uint8_t*>(bottom),
reinterpret_cast<uintptr_t*>(stack_base_address), stack_copy_bottom,
*reg);
}
*stack_top = reinterpret_cast<uintptr_t>(stack_copy_bottom) +
(stack_base_address - bottom);
return copied;
} }
} // namespace base } // namespace base
...@@ -27,6 +27,8 @@ class BASE_EXPORT StackCopierSignal : public StackCopier { ...@@ -27,6 +27,8 @@ class BASE_EXPORT StackCopierSignal : public StackCopier {
ProfileBuilder* profile_builder, ProfileBuilder* profile_builder,
RegisterContext* thread_context) override; RegisterContext* thread_context) override;
using StackCopier::CopyStackContentsAndRewritePointers;
private: private:
std::unique_ptr<ThreadDelegate> thread_delegate_; std::unique_ptr<ThreadDelegate> thread_delegate_;
}; };
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include <algorithm>
#include <utility>
#include "base/profiler/profile_builder.h"
#include "base/profiler/sampling_profiler_thread_token.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/stack_copier_signal.h"
#include "base/profiler/thread_delegate_posix.h"
#include "base/stl_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace {
class TestProfileBuilder : public ProfileBuilder {
public:
TestProfileBuilder() = default;
TestProfileBuilder(const TestProfileBuilder&) = delete;
TestProfileBuilder& operator=(const TestProfileBuilder&) = delete;
// ProfileBuilder
ModuleCache* GetModuleCache() override { return nullptr; }
void RecordMetadata(
base::ProfileBuilder::MetadataProvider* metadata_provider) override {}
void OnSampleCompleted(std::vector<Frame> frames) override {}
void OnProfileCompleted(TimeDelta profile_duration,
TimeDelta sampling_period) override {}
private:
};
// Values to write to the stack and look for in the copy.
static const uint32_t kStackSentinels[] = {0xf312ecd9, 0x1fcd7f19, 0xe69e617d,
0x8245f94f};
class TargetThread : public SimpleThread {
public:
TargetThread()
: SimpleThread("target", Options()),
started_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED),
copy_finished_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED) {}
void Run() override {
thread_token_ = GetSamplingProfilerCurrentThreadToken();
// Copy the sentinel values onto the stack. Volatile to defeat compiler
// optimizations.
volatile uint32_t sentinels[size(kStackSentinels)];
for (size_t i = 0; i < size(kStackSentinels); ++i)
sentinels[i] = kStackSentinels[i];
started_.Signal();
copy_finished_.Wait();
}
SamplingProfilerThreadToken GetThreadToken() {
started_.Wait();
return thread_token_;
}
void NotifyCopyFinished() { copy_finished_.Signal(); }
private:
WaitableEvent started_;
WaitableEvent copy_finished_;
SamplingProfilerThreadToken thread_token_;
};
} // namespace
// ASAN moves local variables outside of the stack extents, which breaks the
// sentinels. TSAN hangs on the AsyncSafeWaitableEvent FUTEX_WAIT call.
#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER)
#define MAYBE_CopyStack DISABLED_CopyStack
#else
#define MAYBE_CopyStack CopyStack
#endif
TEST(StackCopierSignalTest, MAYBE_CopyStack) {
StackBuffer stack_buffer(/* buffer_size = */ 1 << 20);
memset(stack_buffer.buffer(), 0, stack_buffer.size());
uintptr_t stack_top = 0;
TestProfileBuilder profiler_builder;
RegisterContext context;
StackCopierSignal copier(std::make_unique<ThreadDelegatePosix>(
GetSamplingProfilerCurrentThreadToken()));
// Copy the sentinel values onto the stack. Volatile to defeat compiler
// optimizations.
volatile uint32_t sentinels[size(kStackSentinels)];
for (size_t i = 0; i < size(kStackSentinels); ++i)
sentinels[i] = kStackSentinels[i];
bool result =
copier.CopyStack(&stack_buffer, &stack_top, &profiler_builder, &context);
ASSERT_TRUE(result);
uint32_t* const end = reinterpret_cast<uint32_t*>(stack_top);
uint32_t* const sentinel_location = std::find_if(
reinterpret_cast<uint32_t*>(RegisterContextStackPointer(&context)), end,
[](const uint32_t& location) {
return memcmp(&location, &kStackSentinels[0],
sizeof(kStackSentinels)) == 0;
});
EXPECT_NE(end, sentinel_location);
}
// Limit to 32-bit Android, which is the platform we care about for this
// functionality. The test is broken on too many other varied platforms to try
// to selectively disable.
#if !(defined(OS_ANDROID) && defined(ARCH_CPU_32_BITS))
#define MAYBE_CopyStackFromOtherThread DISABLED_CopyStackFromOtherThread
#else
#define MAYBE_CopyStackFromOtherThread CopyStackFromOtherThread
#endif
TEST(StackCopierSignalTest, MAYBE_CopyStackFromOtherThread) {
StackBuffer stack_buffer(/* buffer_size = */ 1 << 20);
memset(stack_buffer.buffer(), 0, stack_buffer.size());
uintptr_t stack_top = 0;
TestProfileBuilder profiler_builder;
RegisterContext context{};
TargetThread target_thread;
target_thread.Start();
const SamplingProfilerThreadToken thread_token =
target_thread.GetThreadToken();
StackCopierSignal copier(std::make_unique<ThreadDelegatePosix>(thread_token));
bool result =
copier.CopyStack(&stack_buffer, &stack_top, &profiler_builder, &context);
ASSERT_TRUE(result);
target_thread.NotifyCopyFinished();
target_thread.Join();
uint32_t* const end = reinterpret_cast<uint32_t*>(stack_top);
uint32_t* const sentinel_location = std::find_if(
reinterpret_cast<uint32_t*>(RegisterContextStackPointer(&context)), end,
[](const uint32_t& location) {
return memcmp(&location, &kStackSentinels[0],
sizeof(kStackSentinels)) == 0;
});
EXPECT_NE(end, sentinel_location);
}
} // namespace base
...@@ -117,7 +117,7 @@ TEST(StackCopierSuspendTest, CopyStack) { ...@@ -117,7 +117,7 @@ TEST(StackCopierSuspendTest, CopyStack) {
std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t)); std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t));
uintptr_t stack_top = 0; uintptr_t stack_top = 0;
TestProfileBuilder profile_builder; TestProfileBuilder profile_builder;
RegisterContext register_context = {0}; RegisterContext register_context{};
stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top, stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top,
&profile_builder, &register_context); &profile_builder, &register_context);
...@@ -139,7 +139,7 @@ TEST(StackCopierSuspendTest, CopyStackBufferTooSmall) { ...@@ -139,7 +139,7 @@ TEST(StackCopierSuspendTest, CopyStackBufferTooSmall) {
stack_buffer->buffer()[0] = 100; stack_buffer->buffer()[0] = 100;
uintptr_t stack_top = 0; uintptr_t stack_top = 0;
TestProfileBuilder profile_builder; TestProfileBuilder profile_builder;
RegisterContext register_context = {0}; RegisterContext register_context{};
stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top, stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top,
&profile_builder, &register_context); &profile_builder, &register_context);
...@@ -164,7 +164,7 @@ TEST(StackCopierSuspendTest, CopyStackAndRewritePointers) { ...@@ -164,7 +164,7 @@ TEST(StackCopierSuspendTest, CopyStackAndRewritePointers) {
std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t)); std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t));
uintptr_t stack_top = 0; uintptr_t stack_top = 0;
TestProfileBuilder profile_builder; TestProfileBuilder profile_builder;
RegisterContext register_context = {0}; RegisterContext register_context{};
stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top, stack_copier_suspend.CopyStack(stack_buffer.get(), &stack_top,
&profile_builder, &register_context); &profile_builder, &register_context);
...@@ -180,7 +180,7 @@ TEST(StackCopierSuspendTest, CopyStackAndRewritePointers) { ...@@ -180,7 +180,7 @@ TEST(StackCopierSuspendTest, CopyStackAndRewritePointers) {
TEST(StackCopierSuspendTest, RewriteRegisters) { TEST(StackCopierSuspendTest, RewriteRegisters) {
std::vector<uintptr_t> stack = {0, 1, 2}; std::vector<uintptr_t> stack = {0, 1, 2};
RegisterContext register_context = {0}; RegisterContext register_context{};
RegisterContextFramePointer(&register_context) = RegisterContextFramePointer(&register_context) =
reinterpret_cast<uintptr_t>(&stack[1]); reinterpret_cast<uintptr_t>(&stack[1]);
StackCopierSuspend stack_copier_suspend( StackCopierSuspend stack_copier_suspend(
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "base/process/process_handle.h" #include "base/process/process_handle.h"
#include "base/profiler/thread_delegate_posix.h" #include "base/profiler/thread_delegate_posix.h"
#include "base/stl_util.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -76,6 +77,30 @@ std::vector<uintptr_t*> ThreadDelegatePosix::GetRegistersToRewrite( ...@@ -76,6 +77,30 @@ std::vector<uintptr_t*> ThreadDelegatePosix::GetRegistersToRewrite(
// arm_lr and arm_pc do not require rewriting because they contain // arm_lr and arm_pc do not require rewriting because they contain
// addresses of executable code, not addresses in the stack. // addresses of executable code, not addresses in the stack.
}; };
#elif defined(ARCH_CPU_ARM_FAMILY) && \
defined(ARCH_CPU_64_BITS) // #if defined(ARCH_CPU_ARM_FAMILY) &&
// defined(ARCH_CPU_32_BITS)
std::vector<uintptr_t*> registers;
registers.reserve(12);
// Return the set of callee-save registers per the ARM 64-bit Procedure Call
// Standard section 5.1.1, plus the stack pointer.
registers.push_back(reinterpret_cast<uintptr_t*>(&thread_context->sp));
for (size_t i = 19; i <= 29; ++i)
registers.push_back(reinterpret_cast<uintptr_t*>(&thread_context->regs[i]));
return registers;
#elif defined(ARCH_CPU_X86_64) // #if defined(ARCH_CPU_ARM_FAMILY) &&
// defined(ARCH_CPU_32_BITS)
return {
// Return the set of callee-save registers per the x86-64 System V ABI
// section 3.2.1, plus the stack pointer.
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_RBP]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_RBX]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_R12]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_R13]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_R14]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_R15]),
reinterpret_cast<uintptr_t*>(&thread_context->gregs[REG_RSP]),
};
#else // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) #else // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
// Unimplemented for other architectures. // Unimplemented for other architectures.
return {}; return {};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment