breakpad/src/client/linux/microdump_writer/microdump_writer.cc
Primiano Tucci adca10c8ff Add statistics about free space to microdump format.
When a crash occurs as a result of an allocation failure, it is useful
to know approximately what regions of the virtual address space remain
available, so that we know whether the crash should be attributed to
memory fragmentation, or some other cause.

BUG=525938
R=primiano@chromium.org

Review URL: https://codereview.chromium.org/1796803003 .
2016-05-23 16:06:26 +01:00

594 lines
19 KiB
C++

// Copyright (c) 2014, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This translation unit generates microdumps into the console (logcat on
// Android). See crbug.com/410294 for more info and design docs.
#include "client/linux/microdump_writer/microdump_writer.h"
#include <limits>
#include <sys/utsname.h>
#include "client/linux/dump_writer_common/thread_info.h"
#include "client/linux/dump_writer_common/ucontext_reader.h"
#include "client/linux/handler/exception_handler.h"
#include "client/linux/handler/microdump_extra_info.h"
#include "client/linux/log/log.h"
#include "client/linux/minidump_writer/linux_ptrace_dumper.h"
#include "common/linux/file_id.h"
#include "common/linux/linux_libc_support.h"
#include "common/memory.h"
namespace {
using google_breakpad::auto_wasteful_vector;
using google_breakpad::ExceptionHandler;
using google_breakpad::kDefaultBuildIdSize;
using google_breakpad::LinuxDumper;
using google_breakpad::LinuxPtraceDumper;
using google_breakpad::MappingInfo;
using google_breakpad::MappingList;
using google_breakpad::MicrodumpExtraInfo;
using google_breakpad::RawContextCPU;
using google_breakpad::ThreadInfo;
using google_breakpad::UContextReader;
const size_t kLineBufferSize = 2048;
template <typename Dst, typename Src>
Dst saturated_cast(Src src) {
if (src >= std::numeric_limits<Dst>::max())
return std::numeric_limits<Dst>::max();
if (src <= std::numeric_limits<Dst>::min())
return std::numeric_limits<Dst>::min();
return static_cast<Dst>(src);
}
int Log2Floor(uint64_t n) {
// Copied from chromium src/base/bits.h
if (n == 0)
return -1;
int log = 0;
uint64_t value = n;
for (int i = 5; i >= 0; --i) {
int shift = (1 << i);
uint64_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
assert(value == 1u);
return log;
}
bool MappingsAreAdjacent(const MappingInfo& a, const MappingInfo& b) {
// Because of load biasing, we can end up with a situation where two
// mappings actually overlap. So we will define adjacency to also include a
// b start address that lies within a's address range (including starting
// immediately after a).
// Because load biasing only ever moves the start address backwards, the end
// address should still increase.
return a.start_addr <= b.start_addr && a.start_addr + a.size >= b.start_addr;
}
bool MappingLessThan(const MappingInfo* a, const MappingInfo* b) {
// Return true if mapping a is before mapping b.
// For the same reason (load biasing) we compare end addresses, which - unlike
// start addresses - will not have been modified.
return a->start_addr + a->size < b->start_addr + b->size;
}
size_t NextOrderedMapping(
const google_breakpad::wasteful_vector<MappingInfo*>& mappings,
size_t curr) {
// Find the mapping that directly follows mappings[curr].
// If no such mapping exists, return |invalid| to indicate this.
const size_t invalid = std::numeric_limits<size_t>::max();
size_t best = invalid;
for (size_t next = 0; next < mappings.size(); ++next) {
if (MappingLessThan(mappings[curr], mappings[next]) &&
(best == invalid || MappingLessThan(mappings[next], mappings[best]))) {
best = next;
}
}
return best;
}
class MicrodumpWriter {
public:
MicrodumpWriter(const ExceptionHandler::CrashContext* context,
const MappingList& mappings,
const MicrodumpExtraInfo& microdump_extra_info,
LinuxDumper* dumper)
: ucontext_(context ? &context->context : NULL),
#if !defined(__ARM_EABI__) && !defined(__mips__)
float_state_(context ? &context->float_state : NULL),
#endif
dumper_(dumper),
mapping_list_(mappings),
microdump_extra_info_(microdump_extra_info),
log_line_(NULL) {
log_line_ = reinterpret_cast<char*>(Alloc(kLineBufferSize));
if (log_line_)
log_line_[0] = '\0'; // Clear out the log line buffer.
}
~MicrodumpWriter() { dumper_->ThreadsResume(); }
bool Init() {
// In the exceptional case where the system was out of memory and there
// wasn't even room to allocate the line buffer, bail out. There is nothing
// useful we can possibly achieve without the ability to Log. At least let's
// try to not crash.
if (!dumper_->Init() || !log_line_)
return false;
return dumper_->ThreadsSuspend() && dumper_->LateInit();
}
bool Dump() {
bool success;
LogLine("-----BEGIN BREAKPAD MICRODUMP-----");
DumpProductInformation();
DumpOSInformation();
DumpGPUInformation();
#if !defined(__LP64__)
DumpFreeSpace();
#endif
success = DumpCrashingThread();
if (success)
success = DumpMappings();
LogLine("-----END BREAKPAD MICRODUMP-----");
dumper_->ThreadsResume();
return success;
}
private:
// Writes one line to the system log.
void LogLine(const char* msg) {
#if defined(__ANDROID__)
logger::writeToCrashLog(msg);
#else
logger::write(msg, my_strlen(msg));
logger::write("\n", 1);
#endif
}
// Stages the given string in the current line buffer.
void LogAppend(const char* str) {
my_strlcat(log_line_, str, kLineBufferSize);
}
// As above (required to take precedence over template specialization below).
void LogAppend(char* str) {
LogAppend(const_cast<const char*>(str));
}
// Stages the hex repr. of the given int type in the current line buffer.
template<typename T>
void LogAppend(T value) {
// Make enough room to hex encode the largest int type + NUL.
static const char HEX[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F'};
char hexstr[sizeof(T) * 2 + 1];
for (int i = sizeof(T) * 2 - 1; i >= 0; --i, value >>= 4)
hexstr[i] = HEX[static_cast<uint8_t>(value) & 0x0F];
hexstr[sizeof(T) * 2] = '\0';
LogAppend(hexstr);
}
// Stages the buffer content hex-encoded in the current line buffer.
void LogAppend(const void* buf, size_t length) {
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(buf);
for (size_t i = 0; i < length; ++i, ++ptr)
LogAppend(*ptr);
}
// Writes out the current line buffer on the system log.
void LogCommitLine() {
LogLine(log_line_);
my_strlcpy(log_line_, "", kLineBufferSize);
}
void DumpProductInformation() {
LogAppend("V ");
if (microdump_extra_info_.product_info) {
LogAppend(microdump_extra_info_.product_info);
} else {
LogAppend("UNKNOWN:0.0.0.0");
}
LogCommitLine();
}
void DumpOSInformation() {
const uint8_t n_cpus = static_cast<uint8_t>(sysconf(_SC_NPROCESSORS_CONF));
#if defined(__ANDROID__)
const char kOSId[] = "A";
#else
const char kOSId[] = "L";
#endif
// Dump the runtime architecture. On multiarch devices it might not match the
// hw architecture (the one returned by uname()), for instance in the case of
// a 32-bit app running on a aarch64 device.
#if defined(__aarch64__)
const char kArch[] = "arm64";
#elif defined(__ARMEL__)
const char kArch[] = "arm";
#elif defined(__x86_64__)
const char kArch[] = "x86_64";
#elif defined(__i386__)
const char kArch[] = "x86";
#elif defined(__mips__)
# if _MIPS_SIM == _ABIO32
const char kArch[] = "mips";
# elif _MIPS_SIM == _ABI64
const char kArch[] = "mips64";
# else
# error "This mips ABI is currently not supported (n32)"
#endif
#else
#error "This code has not been ported to your platform yet"
#endif
LogAppend("O ");
LogAppend(kOSId);
LogAppend(" ");
LogAppend(kArch);
LogAppend(" ");
LogAppend(n_cpus);
LogAppend(" ");
// Dump the HW architecture (e.g., armv7l, aarch64).
struct utsname uts;
const bool has_uts_info = (uname(&uts) == 0);
const char* hwArch = has_uts_info ? uts.machine : "unknown_hw_arch";
LogAppend(hwArch);
LogAppend(" ");
// If the client has attached a build fingerprint to the MinidumpDescriptor
// use that one. Otherwise try to get some basic info from uname().
if (microdump_extra_info_.build_fingerprint) {
LogAppend(microdump_extra_info_.build_fingerprint);
} else if (has_uts_info) {
LogAppend(uts.release);
LogAppend(" ");
LogAppend(uts.version);
} else {
LogAppend("no build fingerprint available");
}
LogCommitLine();
}
void DumpGPUInformation() {
LogAppend("G ");
if (microdump_extra_info_.gpu_fingerprint) {
LogAppend(microdump_extra_info_.gpu_fingerprint);
} else {
LogAppend("UNKNOWN");
}
LogCommitLine();
}
bool DumpThreadStack(uint32_t thread_id,
uintptr_t stack_pointer,
int max_stack_len,
uint8_t** stack_copy) {
*stack_copy = NULL;
const void* stack;
size_t stack_len;
if (!dumper_->GetStackInfo(&stack, &stack_len, stack_pointer)) {
// The stack pointer might not be available. In this case we don't hard
// fail, just produce a (almost useless) microdump w/o a stack section.
return true;
}
LogAppend("S 0 ");
LogAppend(stack_pointer);
LogAppend(" ");
LogAppend(reinterpret_cast<uintptr_t>(stack));
LogAppend(" ");
LogAppend(stack_len);
LogCommitLine();
if (max_stack_len >= 0 &&
stack_len > static_cast<unsigned int>(max_stack_len)) {
stack_len = max_stack_len;
}
*stack_copy = reinterpret_cast<uint8_t*>(Alloc(stack_len));
dumper_->CopyFromProcess(*stack_copy, thread_id, stack, stack_len);
// Dump the content of the stack, splicing it into chunks which size is
// compatible with the max logcat line size (see LOGGER_ENTRY_MAX_PAYLOAD).
const size_t STACK_DUMP_CHUNK_SIZE = 384;
for (size_t stack_off = 0; stack_off < stack_len;
stack_off += STACK_DUMP_CHUNK_SIZE) {
LogAppend("S ");
LogAppend(reinterpret_cast<uintptr_t>(stack) + stack_off);
LogAppend(" ");
LogAppend(*stack_copy + stack_off,
std::min(STACK_DUMP_CHUNK_SIZE, stack_len - stack_off));
LogCommitLine();
}
return true;
}
// Write information about the crashing thread.
bool DumpCrashingThread() {
const unsigned num_threads = dumper_->threads().size();
for (unsigned i = 0; i < num_threads; ++i) {
MDRawThread thread;
my_memset(&thread, 0, sizeof(thread));
thread.thread_id = dumper_->threads()[i];
// Dump only the crashing thread.
if (static_cast<pid_t>(thread.thread_id) != dumper_->crash_thread())
continue;
assert(ucontext_);
assert(!dumper_->IsPostMortem());
uint8_t* stack_copy;
const uintptr_t stack_ptr = UContextReader::GetStackPointer(ucontext_);
if (!DumpThreadStack(thread.thread_id, stack_ptr, -1, &stack_copy))
return false;
RawContextCPU cpu;
my_memset(&cpu, 0, sizeof(RawContextCPU));
#if !defined(__ARM_EABI__) && !defined(__mips__)
UContextReader::FillCPUContext(&cpu, ucontext_, float_state_);
#else
UContextReader::FillCPUContext(&cpu, ucontext_);
#endif
DumpCPUState(&cpu);
}
return true;
}
void DumpCPUState(RawContextCPU* cpu) {
LogAppend("C ");
LogAppend(cpu, sizeof(*cpu));
LogCommitLine();
}
// If there is caller-provided information about this mapping
// in the mapping_list_ list, return true. Otherwise, return false.
bool HaveMappingInfo(const MappingInfo& mapping) {
for (MappingList::const_iterator iter = mapping_list_.begin();
iter != mapping_list_.end();
++iter) {
// Ignore any mappings that are wholly contained within
// mappings in the mapping_info_ list.
if (mapping.start_addr >= iter->first.start_addr &&
(mapping.start_addr + mapping.size) <=
(iter->first.start_addr + iter->first.size)) {
return true;
}
}
return false;
}
// Dump information about the provided |mapping|. If |identifier| is non-NULL,
// use it instead of calculating a file ID from the mapping.
void DumpModule(const MappingInfo& mapping,
bool member,
unsigned int mapping_id,
const uint8_t* identifier) {
auto_wasteful_vector<uint8_t, kDefaultBuildIdSize> identifier_bytes(
dumper_->allocator());
if (identifier) {
// GUID was provided by caller.
identifier_bytes.insert(identifier_bytes.end(),
identifier,
identifier + sizeof(MDGUID));
} else {
dumper_->ElfFileIdentifierForMapping(
mapping,
member,
mapping_id,
identifier_bytes);
}
// Copy as many bytes of |identifier| as will fit into a MDGUID
MDGUID module_identifier = {0};
memcpy(&module_identifier, &identifier_bytes[0],
std::min(sizeof(MDGUID), identifier_bytes.size()));
char file_name[NAME_MAX];
char file_path[NAME_MAX];
dumper_->GetMappingEffectiveNameAndPath(
mapping, file_path, sizeof(file_path), file_name, sizeof(file_name));
LogAppend("M ");
LogAppend(static_cast<uintptr_t>(mapping.start_addr));
LogAppend(" ");
LogAppend(mapping.offset);
LogAppend(" ");
LogAppend(mapping.size);
LogAppend(" ");
LogAppend(module_identifier.data1);
LogAppend(module_identifier.data2);
LogAppend(module_identifier.data3);
LogAppend(module_identifier.data4[0]);
LogAppend(module_identifier.data4[1]);
LogAppend(module_identifier.data4[2]);
LogAppend(module_identifier.data4[3]);
LogAppend(module_identifier.data4[4]);
LogAppend(module_identifier.data4[5]);
LogAppend(module_identifier.data4[6]);
LogAppend(module_identifier.data4[7]);
LogAppend("0 "); // Age is always 0 on Linux.
LogAppend(file_name);
LogCommitLine();
}
#if !defined(__LP64__)
void DumpFreeSpace() {
const google_breakpad::wasteful_vector<MappingInfo*>& mappings =
dumper_->mappings();
if (mappings.size() == 0) return;
// This is complicated by the fact that mappings is not in order. It should
// be mostly in order, however the mapping that contains the entry point for
// the process is always at the front of the vector.
static const int HBITS = sizeof(size_t) * 8;
size_t hole_histogram[HBITS];
my_memset(hole_histogram, 0, sizeof(hole_histogram));
// Find the lowest address mapping.
size_t curr = 0;
for (size_t i = 1; i < mappings.size(); ++i) {
if (mappings[i]->start_addr < mappings[curr]->start_addr) curr = i;
}
uintptr_t lo_addr = mappings[curr]->start_addr;
size_t hole_cnt = 0;
size_t hole_max = 0;
size_t hole_sum = 0;
while (true) {
// Skip to the end of an adjacent run of mappings. This is an optimization
// for the fact that mappings is mostly sorted.
while (curr != mappings.size() - 1 &&
MappingsAreAdjacent(*mappings[curr], *mappings[curr + 1])) {
++curr;
}
size_t next = NextOrderedMapping(mappings, curr);
if (next == std::numeric_limits<size_t>::max())
break;
uintptr_t hole_lo = mappings[curr]->start_addr + mappings[curr]->size;
uintptr_t hole_hi = mappings[next]->start_addr;
if (hole_hi > hole_lo) {
size_t hole_sz = hole_hi - hole_lo;
int log2_hole_sz = Log2Floor(hole_sz);
hole_sum += hole_sz;
hole_max = std::max(hole_sz, hole_max);
++hole_cnt;
++hole_histogram[Log2Floor(hole_sz)];
}
curr = next;
}
uintptr_t hi_addr = mappings[curr]->start_addr + mappings[curr]->size;
LogAppend("H ");
LogAppend(lo_addr);
LogAppend(" ");
LogAppend(hi_addr);
LogAppend(" ");
LogAppend(saturated_cast<uint16_t>(hole_cnt));
LogAppend(" ");
LogAppend(hole_max);
LogAppend(" ");
LogAppend(hole_sum);
for (unsigned int i = 0; i < HBITS; ++i) {
if (!hole_histogram[i]) continue;
LogAppend(" ");
LogAppend(saturated_cast<uint8_t>(i));
LogAppend(":");
LogAppend(saturated_cast<uint8_t>(hole_histogram[i]));
}
LogCommitLine();
}
#endif
// Write information about the mappings in effect.
bool DumpMappings() {
// First write all the mappings from the dumper
for (unsigned i = 0; i < dumper_->mappings().size(); ++i) {
const MappingInfo& mapping = *dumper_->mappings()[i];
if (mapping.name[0] == 0 || // only want modules with filenames.
!mapping.exec || // only want executable mappings.
mapping.size < 4096 || // too small to get a signature for.
HaveMappingInfo(mapping)) {
continue;
}
DumpModule(mapping, true, i, NULL);
}
// Next write all the mappings provided by the caller
for (MappingList::const_iterator iter = mapping_list_.begin();
iter != mapping_list_.end();
++iter) {
DumpModule(iter->first, false, 0, iter->second);
}
return true;
}
void* Alloc(unsigned bytes) { return dumper_->allocator()->Alloc(bytes); }
const struct ucontext* const ucontext_;
#if !defined(__ARM_EABI__) && !defined(__mips__)
const google_breakpad::fpstate_t* const float_state_;
#endif
LinuxDumper* dumper_;
const MappingList& mapping_list_;
const MicrodumpExtraInfo microdump_extra_info_;
char* log_line_;
};
} // namespace
namespace google_breakpad {
bool WriteMicrodump(pid_t crashing_process,
const void* blob,
size_t blob_size,
const MappingList& mappings,
const MicrodumpExtraInfo& microdump_extra_info) {
LinuxPtraceDumper dumper(crashing_process);
const ExceptionHandler::CrashContext* context = NULL;
if (blob) {
if (blob_size != sizeof(ExceptionHandler::CrashContext))
return false;
context = reinterpret_cast<const ExceptionHandler::CrashContext*>(blob);
dumper.set_crash_address(
reinterpret_cast<uintptr_t>(context->siginfo.si_addr));
dumper.set_crash_signal(context->siginfo.si_signo);
dumper.set_crash_thread(context->tid);
}
MicrodumpWriter writer(context, mappings, microdump_extra_info, &dumper);
if (!writer.Init())
return false;
return writer.Dump();
}
} // namespace google_breakpad