Commit cbbd217b authored by Justin Cohen's avatar Justin Cohen Committed by Commit Bot

Update Crashpad to 311a5a2fdd5b6be8cee01b66991933397094204f

6835b8e29db1 Roll buildtools/ afc5b798c..4164a3056 (9 commits)
9a31d3f8e981 Print thread state after failure to suspend
4e2a190ad6e6 [ios] Bring up first draft thread and memory snapshot
296501351816 Prepare crashpad mig stuff for -Wunreachable-code in
             chromium_code
af2be80bdb8a android: configure a native test suite
b75c5783735e linux: disable arguments test on old kernels
311a5a2fdd5b Fix chromium build

Change-Id: I031f48b23b71e68eb306918240dffe3851591184
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2119550
Commit-Queue: Mark Mentovai <mark@chromium.org>
Auto-Submit: Justin Cohen <justincohen@chromium.org>
Reviewed-by: default avatarMark Mentovai <mark@chromium.org>
Cr-Commit-Position: refs/heads/master@{#753228}
parent 5016701f
......@@ -2,7 +2,7 @@ Name: Crashpad
Short Name: crashpad
URL: https://crashpad.chromium.org/
Version: unknown
Revision: 1d75af9bf5918fa1c365a4ac696f038e6028a30b
Revision: 311a5a2fdd5b6be8cee01b66991933397094204f
License: Apache 2.0
License File: crashpad/LICENSE
Security Critical: yes
......@@ -37,4 +37,3 @@ $ git am --3way --message-id -p4 /tmp/patchdir
Local Modifications:
- codereview.settings has been excluded.
- elf_image_reader_fuzzer is enabled only when use_fuzzing_engine is true.
- cherry-pick upstream 296501351816
......@@ -52,6 +52,30 @@ if (crashpad_is_in_chromium || crashpad_is_in_fuchsia) {
# TODO(fuchsia:46559): Fix the leaks and remove this.
deps += [ "//build/config/sanitizers:suppress-lsan.DO-NOT-USE-THIS" ]
}
if (crashpad_is_android) {
use_raw_android_executable = true
copy("crashpad_test_data") {
testonly = true
sources = [
"test/test_paths_test_data_root.txt",
"util/net/testdata/ascii_http_body.txt",
"util/net/testdata/binary_http_body.dat",
]
outputs = [
"$root_out_dir/crashpad_test_data/{{source}}",
]
}
deps += [ ":crashpad_test_data" ]
extra_dist_files = [
"$root_out_dir/crashpad_handler",
"$root_out_dir/crashpad_test_test_multiprocess_exec_test_child",
"$root_out_dir/crashpad_test_data",
]
}
}
if (crashpad_is_in_fuchsia) {
......
......@@ -25,7 +25,7 @@ vars = {
deps = {
'buildtools':
Var('chromium_git') + '/chromium/src/buildtools.git@' +
'afc5b798c72905e85f9991152be878714c579958',
'4164a305626786b1912d467003acf4c4995bec7d',
'crashpad/third_party/edo/edo': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git@' +
'243fc89ae95b24717d41f3786f6a9abeeef87c92',
......
......@@ -17,6 +17,9 @@ import("../build/crashpad_fuzzer_test.gni")
if (crashpad_is_in_chromium) {
import("//build/config/compiler/compiler.gni")
# Prevent Chromium source assignment filters from being inherited.
set_sources_assignment_filter([])
}
static_library("snapshot") {
......@@ -110,15 +113,20 @@ static_library("snapshot") {
if (crashpad_is_ios) {
sources += [
"ios/memory_snapshot_ios.cc",
"ios/memory_snapshot_ios.h",
"ios/module_snapshot_ios.cc",
"ios/module_snapshot_ios.h",
"ios/process_snapshot_ios.cc",
"ios/process_snapshot_ios.h",
"ios/thread_snapshot_ios.cc",
"ios/thread_snapshot_ios.h",
"mac/cpu_context_mac.cc",
"mac/cpu_context_mac.h",
]
}
if (crashpad_is_linux || crashpad_is_android) {
set_sources_assignment_filter([])
sources += [
"linux/cpu_context_linux.cc",
"linux/cpu_context_linux.h",
......
// Copyright 2020 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "snapshot/ios/memory_snapshot_ios.h"
namespace crashpad {
namespace internal {
void MemorySnapshotIOS::Initialize(vm_address_t address, vm_size_t size) {
INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
address_ = address;
size_ = base::checked_cast<size_t>(size);
// TODO(justincohen): This is temporary, as MemorySnapshotIOS will likely be
// able to point directly to the deserialized data dump rather than copying
// data around.
buffer_ = std::unique_ptr<uint8_t[]>(new uint8_t[size_]);
memcpy(buffer_.get(), reinterpret_cast<void*>(address_), size_);
INITIALIZATION_STATE_SET_VALID(initialized_);
}
uint64_t MemorySnapshotIOS::Address() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return address_;
}
size_t MemorySnapshotIOS::Size() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return size_;
}
bool MemorySnapshotIOS::Read(Delegate* delegate) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
if (size_ == 0) {
return delegate->MemorySnapshotDelegateRead(nullptr, size_);
}
return delegate->MemorySnapshotDelegateRead(buffer_.get(), size_);
}
const MemorySnapshot* MemorySnapshotIOS::MergeWithOtherSnapshot(
const MemorySnapshot* other) const {
CheckedRange<uint64_t, size_t> merged(0, 0);
if (!LoggingDetermineMergedRange(this, other, &merged))
return nullptr;
auto result = std::make_unique<MemorySnapshotIOS>();
result->Initialize(merged.base(), merged.size());
return result.release();
}
} // namespace internal
} // namespace crashpad
// Copyright 2020 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
#define CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
#include "base/macros.h"
#include "snapshot/memory_snapshot.h"
#include "util/misc/address_types.h"
#include "util/misc/initialization_state_dcheck.h"
namespace crashpad {
namespace internal {
//! \brief A MemorySnapshot of a memory region.
class MemorySnapshotIOS final : public MemorySnapshot {
public:
MemorySnapshotIOS() = default;
~MemorySnapshotIOS() = default;
//! \brief Initializes the object.
//!
//! \param[in] address The base address of the memory region to snapshot.
//! \param[in] size The size of the memory region to snapshot.
void Initialize(vm_address_t address, vm_size_t size);
// MemorySnapshot:
uint64_t Address() const override;
size_t Size() const override;
bool Read(Delegate* delegate) const override;
const MemorySnapshot* MergeWithOtherSnapshot(
const MemorySnapshot* other) const override;
private:
template <class T>
friend const MemorySnapshot* MergeWithOtherSnapshotImpl(
const T* self,
const MemorySnapshot* other);
// TODO(justincohen): This is temporary until deserialization is worked out.
std::unique_ptr<uint8_t[]> buffer_;
vm_address_t address_;
vm_size_t size_;
InitializationStateDcheck initialized_;
DISALLOW_COPY_AND_ASSIGN(MemorySnapshotIOS);
};
} // namespace internal
} // namespace crashpad
#endif // CRASHPAD_SNAPSHOT_IOS_MEMORY_SNAPSHOT_IOS_H_
......@@ -26,6 +26,7 @@ namespace crashpad {
ProcessSnapshotIOS::ProcessSnapshotIOS()
: ProcessSnapshot(),
threads_(),
modules_(),
report_id_(),
client_id_(),
......@@ -43,6 +44,7 @@ bool ProcessSnapshotIOS::Initialize() {
return false;
}
InitializeThreads();
InitializeModules();
INITIALIZATION_STATE_SET_VALID(initialized_);
......@@ -96,7 +98,11 @@ const SystemSnapshot* ProcessSnapshotIOS::System() const {
std::vector<const ThreadSnapshot*> ProcessSnapshotIOS::Threads() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return std::vector<const ThreadSnapshot*>();
std::vector<const ThreadSnapshot*> threads;
for (const auto& thread : threads_) {
threads.push_back(thread.get());
}
return threads;
}
std::vector<const ModuleSnapshot*> ProcessSnapshotIOS::Modules() const {
......@@ -140,6 +146,25 @@ const ProcessMemory* ProcessSnapshotIOS::Memory() const {
return nullptr;
}
void ProcessSnapshotIOS::InitializeThreads() {
mach_msg_type_number_t thread_count = 0;
const thread_act_array_t threads =
internal::ThreadSnapshotIOS::GetThreads(&thread_count);
for (uint32_t thread_index = 0; thread_index < thread_count; ++thread_index) {
thread_t thread = threads[thread_index];
auto thread_snapshot = std::make_unique<internal::ThreadSnapshotIOS>();
if (thread_snapshot->Initialize(thread)) {
threads_.push_back(std::move(thread_snapshot));
}
mach_port_deallocate(mach_task_self(), thread);
}
// TODO(justincohen): This dealloc above and below needs to move with the
// call to task_threads inside internal::ThreadSnapshotIOS::GetThreads.
vm_deallocate(mach_task_self(),
reinterpret_cast<vm_address_t>(threads),
sizeof(thread_t) * thread_count);
}
void ProcessSnapshotIOS::InitializeModules() {
const dyld_all_image_infos* image_infos =
internal::ModuleSnapshotIOS::DyldAllImageInfo();
......
......@@ -18,7 +18,9 @@
#include <vector>
#include "snapshot/ios/module_snapshot_ios.h"
#include "snapshot/ios/thread_snapshot_ios.h"
#include "snapshot/process_snapshot.h"
#include "snapshot/thread_snapshot.h"
#include "snapshot/unloaded_module_snapshot.h"
namespace crashpad {
......@@ -60,6 +62,10 @@ class ProcessSnapshotIOS final : public ProcessSnapshot {
// Initializes modules_ on behalf of Initialize().
void InitializeModules();
// Initializes threads_ on behalf of Initialize().
void InitializeThreads();
std::vector<std::unique_ptr<internal::ThreadSnapshotIOS>> threads_;
std::vector<std::unique_ptr<internal::ModuleSnapshotIOS>> modules_;
UUID report_id_;
UUID client_id_;
......
// Copyright 2020 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "snapshot/ios/thread_snapshot_ios.h"
#include "base/mac/mach_logging.h"
#include "snapshot/mac/cpu_context_mac.h"
namespace {
#if defined(ARCH_CPU_X86_64)
const thread_state_flavor_t kThreadStateFlavor = x86_THREAD_STATE64;
const thread_state_flavor_t kFloatStateFlavor = x86_FLOAT_STATE64;
const thread_state_flavor_t kDebugStateFlavor = x86_DEBUG_STATE64;
#elif defined(ARCH_CPU_ARM64)
const thread_state_flavor_t kThreadStateFlavor = ARM_THREAD_STATE64;
const thread_state_flavor_t kFloatStateFlavor = ARM_NEON_STATE64;
#endif
kern_return_t MachVMRegionRecurseDeepest(task_t task,
vm_address_t* address,
vm_size_t* size,
natural_t* depth,
vm_prot_t* protection,
unsigned int* user_tag) {
vm_region_submap_short_info_64 submap_info;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
while (true) {
kern_return_t kr = vm_region_recurse_64(
task,
address,
size,
depth,
reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
&count);
if (kr != KERN_SUCCESS) {
return kr;
}
if (!submap_info.is_submap) {
*protection = submap_info.protection;
*user_tag = submap_info.user_tag;
return KERN_SUCCESS;
}
++*depth;
}
}
//! \brief Adjusts the region for the red zone, if the ABI requires one.
//!
//! This method performs red zone calculation for CalculateStackRegion(). Its
//! parameters are local variables used within that method, and may be
//! modified as needed.
//!
//! Where a red zone is required, the region of memory captured for a thread’s
//! stack will be extended to include the red zone below the stack pointer,
//! provided that such memory is mapped, readable, and has the correct user
//! tag value. If these conditions cannot be met fully, as much of the red
//! zone will be captured as is possible while meeting these conditions.
//!
//! \param[in,out] start_address The base address of the region to begin
//! capturing stack memory from. On entry, \a start_address is the stack
//! pointer. On return, \a start_address may be decreased to encompass a
//! red zone.
//! \param[in,out] region_base The base address of the region that contains
//! stack memory. This is distinct from \a start_address in that \a
//! region_base will be page-aligned. On entry, \a region_base is the
//! base address of a region that contains \a start_address. On return,
//! if \a start_address is decremented and is outside of the region
//! originally described by \a region_base, \a region_base will also be
//! decremented appropriately.
//! \param[in,out] region_size The size of the region that contains stack
//! memory. This region begins at \a region_base. On return, if \a
//! region_base is decremented, \a region_size will be incremented
//! appropriately.
//! \param[in] user_tag The Mach VM system’s user tag for the region described
//! by the initial values of \a region_base and \a region_size. The red
//! zone will only be allowed to extend out of the region described by
//! these initial values if the user tag is appropriate for stack memory
//! and the expanded region has the same user tag value.
void LocateRedZone(vm_address_t* const start_address,
vm_address_t* const region_base,
vm_address_t* const region_size,
const unsigned int user_tag) {
// x86_64 has a red zone. See AMD64 ABI 0.99.8,
// https://raw.githubusercontent.com/wiki/hjl-tools/x86-psABI/x86-64-psABI-r252.pdf#page=19,
// section 3.2.2, “The Stack Frame”.
// So does ARM64,
// https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
// section "Red Zone".
constexpr vm_size_t kRedZoneSize = 128;
vm_address_t red_zone_base =
*start_address >= kRedZoneSize ? *start_address - kRedZoneSize : 0;
bool red_zone_ok = false;
if (red_zone_base >= *region_base) {
// The red zone is within the region already discovered.
red_zone_ok = true;
} else if (red_zone_base < *region_base && user_tag == VM_MEMORY_STACK) {
// Probe to see if there’s a region immediately below the one already
// discovered.
vm_address_t red_zone_region_base = red_zone_base;
vm_size_t red_zone_region_size;
natural_t red_zone_depth = 0;
vm_prot_t red_zone_protection;
unsigned int red_zone_user_tag;
kern_return_t kr = MachVMRegionRecurseDeepest(mach_task_self(),
&red_zone_region_base,
&red_zone_region_size,
&red_zone_depth,
&red_zone_protection,
&red_zone_user_tag);
if (kr != KERN_SUCCESS) {
MACH_LOG(INFO, kr) << "vm_region_recurse";
*start_address = *region_base;
} else if (red_zone_region_base + red_zone_region_size == *region_base &&
(red_zone_protection & VM_PROT_READ) != 0 &&
red_zone_user_tag == user_tag) {
// The region containing the red zone is immediately below the region
// already found, it’s readable (not the guard region), and it has the
// same user tag as the region already found, so merge them.
red_zone_ok = true;
*region_base -= red_zone_region_size;
*region_size += red_zone_region_size;
}
}
if (red_zone_ok) {
// Begin capturing from the base of the red zone (but not the entire
// region that encompasses the red zone).
*start_address = red_zone_base;
} else {
// The red zone would go lower into another region in memory, but no
// region was found. Memory can only be captured to an address as low as
// the base address of the region already found.
*start_address = *region_base;
}
}
//! \brief Calculates the base address and size of the region used as a
//! thread’s stack.
//!
//! The region returned by this method may be formed by merging multiple
//! adjacent regions in a process’ memory map if appropriate. The base address
//! of the returned region may be lower than the \a stack_pointer passed in
//! when the ABI mandates a red zone below the stack pointer.
//!
//! \param[in] stack_pointer The stack pointer, referring to the top (lowest
//! address) of a thread’s stack.
//! \param[out] stack_region_size The size of the memory region used as the
//! thread’s stack.
//!
//! \return The base address (lowest address) of the memory region used as the
//! thread’s stack.
vm_address_t CalculateStackRegion(vm_address_t stack_pointer,
vm_size_t* stack_region_size) {
// For pthreads, it may be possible to compute the stack region based on the
// internal _pthread::stackaddr and _pthread::stacksize. The _pthread struct
// for a thread can be located at TSD slot 0, or the known offsets of
// stackaddr and stacksize from the TSD area could be used.
vm_address_t region_base = stack_pointer;
vm_size_t region_size;
natural_t depth = 0;
vm_prot_t protection;
unsigned int user_tag;
kern_return_t kr = MachVMRegionRecurseDeepest(mach_task_self(),
&region_base,
&region_size,
&depth,
&protection,
&user_tag);
if (kr != KERN_SUCCESS) {
MACH_LOG(INFO, kr) << "mach_vm_region_recurse";
*stack_region_size = 0;
return 0;
}
if (region_base > stack_pointer) {
// There’s nothing mapped at the stack pointer’s address. Something may have
// trashed the stack pointer. Note that this shouldn’t happen for a normal
// stack guard region violation because the guard region is mapped but has
// VM_PROT_NONE protection.
*stack_region_size = 0;
return 0;
}
vm_address_t start_address = stack_pointer;
if ((protection & VM_PROT_READ) == 0) {
// If the region isn’t readable, the stack pointer probably points to the
// guard region. Don’t include it as part of the stack, and don’t include
// anything at any lower memory address. The code below may still possibly
// find the real stack region at a memory address higher than this region.
start_address = region_base + region_size;
} else {
// If the ABI requires a red zone, adjust the region to include it if
// possible.
LocateRedZone(&start_address, &region_base, &region_size, user_tag);
// Regardless of whether the ABI requires a red zone, capture up to
// kExtraCaptureSize additional bytes of stack, but only if present in the
// region that was already found.
constexpr vm_size_t kExtraCaptureSize = 128;
start_address = std::max(start_address >= kExtraCaptureSize
? start_address - kExtraCaptureSize
: start_address,
region_base);
// Align start_address to a 16-byte boundary, which can help readers by
// ensuring that data is aligned properly. This could page-align instead,
// but that might be wasteful.
constexpr vm_size_t kDesiredAlignment = 16;
start_address &= ~(kDesiredAlignment - 1);
DCHECK_GE(start_address, region_base);
}
region_size -= (start_address - region_base);
region_base = start_address;
vm_size_t total_region_size = region_size;
// The stack region may have gotten split up into multiple abutting regions.
// Try to coalesce them. This frequently happens for the main thread’s stack
// when setrlimit(RLIMIT_STACK, …) is called. It may also happen if a region
// is split up due to an mprotect() or vm_protect() call.
//
// Stack regions created by the kernel and the pthreads library will be marked
// with the VM_MEMORY_STACK user tag. Scanning for multiple adjacent regions
// with the same tag should find an entire stack region. Checking that the
// protection on individual regions is not VM_PROT_NONE should guarantee that
// this algorithm doesn’t collect map entries belonging to another thread’s
// stack: well-behaved stacks (such as those created by the kernel and the
// pthreads library) have VM_PROT_NONE guard regions at their low-address
// ends.
//
// Other stack regions may not be so well-behaved and thus if user_tag is not
// VM_MEMORY_STACK, the single region that was found is used as-is without
// trying to merge it with other adjacent regions.
if (user_tag == VM_MEMORY_STACK) {
vm_address_t try_address = region_base;
vm_address_t original_try_address;
while (try_address += region_size,
original_try_address = try_address,
(kr = MachVMRegionRecurseDeepest(mach_task_self(),
&try_address,
&region_size,
&depth,
&protection,
&user_tag) == KERN_SUCCESS) &&
try_address == original_try_address &&
(protection & VM_PROT_READ) != 0 &&
user_tag == VM_MEMORY_STACK) {
total_region_size += region_size;
}
if (kr != KERN_SUCCESS && kr != KERN_INVALID_ADDRESS) {
// Tolerate KERN_INVALID_ADDRESS because it will be returned when there
// are no more regions in the map at or above the specified |try_address|.
MACH_LOG(INFO, kr) << "vm_region_recurse";
}
}
*stack_region_size = total_region_size;
return region_base;
}
} // namespace
namespace crashpad {
namespace internal {
ThreadSnapshotIOS::ThreadSnapshotIOS()
: ThreadSnapshot(),
context_(),
stack_(),
thread_id_(0),
thread_specific_data_address_(0),
suspend_count_(0),
priority_(0),
initialized_() {}
ThreadSnapshotIOS::~ThreadSnapshotIOS() {}
// static
thread_act_array_t ThreadSnapshotIOS::GetThreads(
mach_msg_type_number_t* count) {
thread_act_array_t threads;
kern_return_t kr = task_threads(mach_task_self(), &threads, count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "task_threads";
}
return threads;
}
bool ThreadSnapshotIOS::Initialize(thread_t thread) {
INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
// TODO(justincohen): Move the following thread_get_state, thread_get_info,
// thread_policy_get and CalculateStackRegion to the serialize-on-read
// section.
thread_basic_info basic_info;
thread_precedence_policy precedence;
vm_size_t stack_region_size;
vm_address_t stack_region_address;
#if defined(ARCH_CPU_X86_64)
x86_thread_state64_t thread_state;
x86_float_state64_t float_state;
x86_debug_state64_t debug_state;
mach_msg_type_number_t thread_state_count = x86_THREAD_STATE64_COUNT;
mach_msg_type_number_t float_state_count = x86_FLOAT_STATE64_COUNT;
mach_msg_type_number_t debug_state_count = x86_DEBUG_STATE64_COUNT;
#elif defined(ARCH_CPU_ARM64)
arm_thread_state64_t thread_state;
arm_neon_state64_t float_state;
mach_msg_type_number_t thread_state_count = ARM_THREAD_STATE64_COUNT;
mach_msg_type_number_t float_state_count = ARM_NEON_STATE64_COUNT;
#endif
kern_return_t kr =
thread_get_state(thread,
kThreadStateFlavor,
reinterpret_cast<thread_state_t>(&thread_state),
&thread_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kThreadStateFlavor << ")";
}
kr = thread_get_state(thread,
kFloatStateFlavor,
reinterpret_cast<thread_state_t>(&float_state),
&float_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kFloatStateFlavor << ")";
}
#if defined(ARCH_CPU_X86_64)
kr = thread_get_state(thread,
kDebugStateFlavor,
reinterpret_cast<thread_state_t>(&debug_state),
&debug_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kDebugStateFlavor << ")";
}
#endif
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
kr = thread_info(thread,
THREAD_BASIC_INFO,
reinterpret_cast<thread_info_t>(&basic_info),
&count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "thread_info(THREAD_BASIC_INFO)";
}
thread_identifier_info identifier_info;
count = THREAD_IDENTIFIER_INFO_COUNT;
kr = thread_info(thread,
THREAD_IDENTIFIER_INFO,
reinterpret_cast<thread_info_t>(&identifier_info),
&count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "thread_info(THREAD_IDENTIFIER_INFO)";
}
count = THREAD_PRECEDENCE_POLICY_COUNT;
boolean_t get_default = FALSE;
kr = thread_policy_get(thread,
THREAD_PRECEDENCE_POLICY,
reinterpret_cast<thread_policy_t>(&precedence),
&count,
&get_default);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_policy_get";
}
#if defined(ARCH_CPU_X86_64)
vm_address_t stack_pointer = thread_state.__rsp;
#elif defined(ARCH_CPU_ARM64)
vm_address_t stack_pointer = thread_state.__sp;
#endif
stack_region_address =
CalculateStackRegion(stack_pointer, &stack_region_size);
// TODO(justincohen): Assume the following will fill in snapshot data from
// a deserialized object.
thread_id_ = identifier_info.thread_id;
suspend_count_ = basic_info.suspend_count;
priority_ = precedence.importance;
// thread_identifier_info::thread_handle contains the base of the
// thread-specific data area, which on x86 and x86_64 is the thread’s base
// address of the %gs segment. 10.9.2 xnu-2422.90.20/osfmk/kern/thread.c
// thread_info_internal() gets the value from
// machine_thread::cthread_self, which is the same value used to set the
// %gs base in xnu-2422.90.20/osfmk/i386/pcb_native.c
// act_machine_switch_pcb().
//
// On ARM64 10.15.0 xnu-6153.11.26/osfmk/kern/thread.c, it sets
// thread_identifier_info_t::thread_handle to
// thread->machine.cthread_self, which is set to tsd_base in
// osfmk/arm64/pcb.c.
thread_specific_data_address_ = identifier_info.thread_handle;
stack_.Initialize(stack_region_address, stack_region_size);
#if defined(ARCH_CPU_X86_64)
context_.architecture = kCPUArchitectureX86_64;
context_.x86_64 = &context_x86_64_;
InitializeCPUContextX86_64(&context_x86_64_,
THREAD_STATE_NONE,
nullptr,
0,
&thread_state,
&float_state,
&debug_state);
#elif defined(ARCH_CPU_ARM64)
context_.architecture = kCPUArchitectureARM64;
context_.arm64 = &context_arm64_;
InitializeCPUContextARM64(&context_arm64_, &thread_state, &float_state);
#endif
INITIALIZATION_STATE_SET_VALID(initialized_);
return true;
}
const CPUContext* ThreadSnapshotIOS::Context() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return &context_;
}
const MemorySnapshot* ThreadSnapshotIOS::Stack() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return &stack_;
}
uint64_t ThreadSnapshotIOS::ThreadID() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return thread_id_;
}
int ThreadSnapshotIOS::SuspendCount() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return suspend_count_;
}
int ThreadSnapshotIOS::Priority() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return priority_;
}
uint64_t ThreadSnapshotIOS::ThreadSpecificDataAddress() const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
return thread_specific_data_address_;
}
std::vector<const MemorySnapshot*> ThreadSnapshotIOS::ExtraMemory() const {
return std::vector<const MemorySnapshot*>();
}
} // namespace internal
} // namespace crashpad
// Copyright 2020 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
#define CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
#include "base/macros.h"
#include "build/build_config.h"
#include "snapshot/cpu_context.h"
#include "snapshot/ios/memory_snapshot_ios.h"
#include "snapshot/thread_snapshot.h"
#include "util/misc/initialization_state_dcheck.h"
namespace crashpad {
namespace internal {
//! \brief A ThreadSnapshot of a thread on an iOS system.
class ThreadSnapshotIOS final : public ThreadSnapshot {
public:
ThreadSnapshotIOS();
~ThreadSnapshotIOS() override;
//! \brief Initializes the object.
//!
//! \brief thread The mach thread used to initialize this object.
bool Initialize(thread_t thread);
//! \brief Returns an array of thread_t threads.
//!
//! \param[out] count The number of threads returned.
//!
//! \return An array of of size \a count threads.
static thread_act_array_t GetThreads(mach_msg_type_number_t* count);
// ThreadSnapshot:
const CPUContext* Context() const override;
const MemorySnapshot* Stack() const override;
uint64_t ThreadID() const override;
int SuspendCount() const override;
int Priority() const override;
uint64_t ThreadSpecificDataAddress() const override;
std::vector<const MemorySnapshot*> ExtraMemory() const override;
private:
#if defined(ARCH_CPU_X86_64)
CPUContextX86_64 context_x86_64_;
#elif defined(ARCH_CPU_ARM64)
CPUContextARM64 context_arm64_;
#else
#error Port.
#endif // ARCH_CPU_X86_64
CPUContext context_;
MemorySnapshotIOS stack_;
uint64_t thread_id_;
uint64_t thread_specific_data_address_;
int suspend_count_;
int priority_;
InitializationStateDcheck initialized_;
DISALLOW_COPY_AND_ASSIGN(ThreadSnapshotIOS);
};
} // namespace internal
} // namespace crashpad
#endif // CRASHPAD_SNAPSHOT_IOS_THREAD_SNAPSHOT_IOS_H_
......@@ -436,6 +436,32 @@ void InitializeCPUContextX86_64(CPUContextX86_64* context,
} // namespace internal
#elif defined(ARCH_CPU_ARM_FAMILY)
namespace internal {
void InitializeCPUContextARM64(CPUContextARM64* context,
const arm_thread_state64_t* arm_thread_state64,
const arm_neon_state64_t* arm_neon_state64) {
// The structures of context->regs and arm_thread_state64->__x are laid out
// identically for this copy, even though the members are organized
// differently. Because of this difference, there can't be a static assert
// similar to the one below for fpsimd.
memcpy(context->regs, arm_thread_state64->__x, sizeof(context->regs));
context->sp = arm_thread_state64->__sp;
context->pc = arm_thread_state64->__pc;
context->spsr =
static_cast<decltype(context->spsr)>(arm_thread_state64->__cpsr);
static_assert(sizeof(context->fpsimd) == sizeof(arm_neon_state64->__v),
"fpsimd context size mismatch");
memcpy(context->fpsimd, arm_neon_state64->__v, sizeof(arm_neon_state64->__v));
context->fpsr = arm_neon_state64->__fpsr;
context->fpcr = arm_neon_state64->__fpcr;
}
} // namespace internal
#endif
} // namespace crashpad
......@@ -108,6 +108,17 @@ void InitializeCPUContextX86_64(CPUContextX86_64* context,
const x86_float_state64_t* x86_float_state64,
const x86_debug_state64_t* x86_debug_state64);
#elif defined(ARCH_CPU_ARM_FAMILY) || DOXYGEN
//! \brief Initializes a CPUContextARM64 structure from native context
//! structures.
//!
//! \param[out] context The CPUContextARM64 structure to initialize.
//! \param[in] arm_thread_state64 The state of the thread’s integer registers.
//! \param[in] arm_neon_state64 The state of the thread’s floating-point
//! registers.
void InitializeCPUContextARM64(CPUContextARM64* context,
const arm_thread_state64_t* arm_thread_state64,
const arm_neon_state64_t* arm_neon_state64);
#endif
} // namespace internal
......
......@@ -73,6 +73,8 @@ int main(int argc, char* argv[]) {
// runner.
const bool use_chromium_test_launcher =
!crashpad::test::WinChildProcess::IsChildProcess();
#elif defined(OS_ANDROID)
constexpr bool use_chromium_test_launcher = false;
#else // OS_WIN
constexpr bool use_chromium_test_launcher = true;
#endif // OS_WIN
......
......@@ -69,10 +69,15 @@ base::FilePath TestDataRootInternal() {
// out/{Debug,Release} relative to the Crashpad root.
base::FilePath executable_path;
if (Paths::Executable(&executable_path)) {
#if defined(OS_ANDROID)
base::FilePath candidate = executable_path.DirName()
.Append("crashpad_test_data");
#else
base::FilePath candidate =
base::FilePath(executable_path.DirName()
.Append(base::FilePath::kParentDirectory)
.Append(base::FilePath::kParentDirectory));
#endif
if (IsTestDataRoot(candidate)) {
return candidate;
}
......
......@@ -41,7 +41,7 @@ ScopedTaskSuspend::ScopedTaskSuspend(const zx::process& process) {
for (const auto& thread : GetThreadHandles(process)) {
// We omit the crashed thread (blocked in an exception) as it is technically
// not suspended, cf. ZX-3772.
zx_info_thread info;
zx_info_thread_t info;
if (thread.get_info(
ZX_INFO_THREAD, &info, sizeof(info), nullptr, nullptr) == ZX_OK) {
if (info.state == ZX_THREAD_STATE_BLOCKED_EXCEPTION) {
......@@ -52,8 +52,16 @@ ScopedTaskSuspend::ScopedTaskSuspend(const zx::process& process) {
zx_signals_t observed = 0u;
const zx_status_t wait_status = thread.wait_one(
ZX_THREAD_SUSPENDED, zx::deadline_after(zx::msec(50)), &observed);
ZX_LOG_IF(ERROR, wait_status != ZX_OK, wait_status)
<< "thread failed to suspend";
if (wait_status != ZX_OK) {
zx_info_thread_t info = {};
zx_status_t info_status = thread.get_info(
ZX_INFO_THREAD, &info, sizeof(info), nullptr, nullptr);
ZX_LOG(ERROR, wait_status) << "thread failed to suspend";
LOG(ERROR) << "Thread info status " << info_status;
if (info_status == ZX_OK) {
LOG(ERROR) << "Thread state " << info.state;
}
}
}
}
......
......@@ -14,6 +14,7 @@
#include "util/posix/process_info.h"
#include <sys/utsname.h>
#include <time.h>
#include <algorithm>
......@@ -21,6 +22,7 @@
#include <string>
#include <vector>
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
#include "gtest/gtest.h"
......@@ -29,6 +31,7 @@
#include "test/multiprocess.h"
#include "util/file/file_io.h"
#include "util/misc/implicit_cast.h"
#include "util/string/split_string.h"
#if defined(OS_LINUX) || defined(OS_ANDROID)
#include "util/linux/direct_ptrace_connection.h"
......@@ -93,11 +96,39 @@ void TestProcessSelfOrClone(const ProcessInfo& process_info) {
time(&now);
EXPECT_LE(start_time.tv_sec, now);
const std::vector<std::string>& expect_argv = GetMainArguments();
#if defined(OS_ANDROID) || defined(OS_LINUX)
// Prior to Linux 4.2, the kernel only allowed reading a single page from
// /proc/<pid>/cmdline, causing any further arguments to be truncated. Disable
// testing arguments in this case.
// TODO(jperaza): The main arguments are stored on the main thread's stack
// (and so should be included in dumps automatically), and
// ProcessInfo::Arguments() might be updated to read the arguments directly,
// rather than via procfs on older kernels.
utsname uts;
ASSERT_EQ(uname(&uts), 0) << ErrnoMessage("uname");
std::vector<std::string> parts = SplitString(uts.release, '.');
ASSERT_GE(parts.size(), 2u);
int major, minor;
ASSERT_TRUE(base::StringToInt(parts[0], &major));
ASSERT_TRUE(base::StringToInt(parts[1], &minor));
size_t argv_size = 0;
for (const auto& arg : expect_argv) {
argv_size += arg.size() + 1;
}
if ((major < 4 || (major == 4 && minor < 2)) &&
argv_size > static_cast<size_t>(getpagesize())) {
return;
}
#endif // OS_ANDROID || OS_LINUX
std::vector<std::string> argv;
ASSERT_TRUE(process_info.Arguments(&argv));
const std::vector<std::string>& expect_argv = GetMainArguments();
// expect_argv always contains the initial view of the arguments at the time
// the program was invoked. argv may contain this view, or it may contain the
// current view of arguments after gtest argv processing. argv may be a subset
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment