aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJoshua Peraza <jperaza@chromium.org>2019-06-11 11:48:14 -0700
committerJoshua Peraza <jperaza@chromium.org>2019-06-11 19:04:02 +0000
commitc2467077b93523c9ab2b8a7548145201dc891328 (patch)
tree190c20f1a8628593db1aac0065852189828a32bf /src
parentlinux: fix broken tests (diff)
downloadbreakpad-c2467077b93523c9ab2b8a7548145201dc891328.tar.xz
Enable truncation of module ranges
ELF modules are loaded in memory in several, possibly discontiguous, segments. If the holes between segments are large enough, other things, possibly other ELF modules may be mapped in that space. Crashpad records the range of modules as the base address of the lowest mapped segment to the high address of the highest mapped segment. This means that when one module is mapped into a hole in another, it appears to the Breakpad processor as overlapping modules. Module ranges are relevant to the Breakpad processor during stackwalking for identifying which module a particular program counter belongs to (i.e. mapping the address to a module's text segment). This patch addresses this issue of overlapping modules by truncating the range of the module with the lower base address. A typical module's text segment is the first loaded segment which would leave the text segment range unaffected. Module producers can restrict the size of holes in their ELF modules with the flag "-Wl,-z,max-page-size=4096", preventing other modules from being mapped in their address range. Properly contemplating ELF module address ranges would require extensions to the minidump format to encode any holes. crbug.com/crashpad/298 This patch also renames the concept of "shrinking down" (which truncated the upper of two overlapping ranges) to "truncate upper". Change-Id: I4599201f1e43918db036c390961f8b39e3af1849 Reviewed-on: https://chromium-review.googlesource.com/c/breakpad/breakpad/+/1646932 Reviewed-by: Mark Mentovai <mark@chromium.org>
Diffstat (limited to 'src')
-rw-r--r--src/google_breakpad/processor/code_modules.h3
-rw-r--r--src/google_breakpad/processor/minidump.h8
-rw-r--r--src/processor/basic_code_modules.cc11
-rw-r--r--src/processor/basic_code_modules.h3
-rw-r--r--src/processor/microdump.cc3
-rw-r--r--src/processor/minidump.cc34
-rw-r--r--src/processor/processor.gyp3
-rw-r--r--src/processor/range_map-inl.h109
-rw-r--r--src/processor/range_map.h30
-rw-r--r--src/processor/range_map_truncate_lower_unittest.cc348
-rw-r--r--src/processor/range_map_truncate_upper_unittest.cc (renamed from src/processor/range_map_shrink_down_unittest.cc)35
-rw-r--r--src/processor/stackwalker_unittest_utils.h5
12 files changed, 484 insertions, 108 deletions
diff --git a/src/google_breakpad/processor/code_modules.h b/src/google_breakpad/processor/code_modules.h
index 509137cb..74f113c1 100644
--- a/src/google_breakpad/processor/code_modules.h
+++ b/src/google_breakpad/processor/code_modules.h
@@ -101,9 +101,6 @@ class CodeModules {
// down due to address range conflicts with other modules.
virtual std::vector<linked_ptr<const CodeModule> >
GetShrunkRangeModules() const = 0;
-
- // Returns true, if module address range shrink is enabled.
- virtual bool IsModuleShrinkEnabled() const = 0;
};
} // namespace google_breakpad
diff --git a/src/google_breakpad/processor/minidump.h b/src/google_breakpad/processor/minidump.h
index febdaeb7..d712cb66 100644
--- a/src/google_breakpad/processor/minidump.h
+++ b/src/google_breakpad/processor/minidump.h
@@ -523,9 +523,6 @@ class MinidumpModuleList : public MinidumpStream,
// down due to address range conflicts with other modules.
virtual vector<linked_ptr<const CodeModule> > GetShrunkRangeModules() const;
- // Returns true, if module address range shrink is enabled.
- virtual bool IsModuleShrinkEnabled() const;
-
// Print a human-readable representation of the object to stdout.
void Print();
@@ -847,7 +844,6 @@ class MinidumpUnloadedModuleList : public MinidumpStream,
GetModuleAtIndex(unsigned int index) const override;
const CodeModules* Copy() const override;
vector<linked_ptr<const CodeModule>> GetShrunkRangeModules() const override;
- bool IsModuleShrinkEnabled() const override;
protected:
explicit MinidumpUnloadedModuleList(Minidump* minidump_);
@@ -1268,6 +1264,10 @@ class Minidump {
// Is the OS Android.
bool IsAndroid();
+ // Determines the platform where the minidump was produced. |platform| is
+ // valid iff this method returns true.
+ bool GetPlatform(MDOSPlatform* platform);
+
// Get current hexdump display settings.
unsigned int HexdumpMode() const { return hexdump_ ? hexdump_width_ : 0; }
diff --git a/src/processor/basic_code_modules.cc b/src/processor/basic_code_modules.cc
index 48d97167..f71aeb74 100644
--- a/src/processor/basic_code_modules.cc
+++ b/src/processor/basic_code_modules.cc
@@ -49,13 +49,14 @@ namespace google_breakpad {
using std::vector;
-BasicCodeModules::BasicCodeModules(const CodeModules *that)
+BasicCodeModules::BasicCodeModules(const CodeModules* that,
+ MergeRangeStrategy strategy)
: main_address_(0), map_() {
BPLOG_IF(ERROR, !that) << "BasicCodeModules::BasicCodeModules requires "
"|that|";
assert(that);
- map_.SetEnableShrinkDown(that->IsModuleShrinkEnabled());
+ map_.SetMergeStrategy(strategy);
const CodeModule *main_module = that->GetMainModule();
if (main_module)
@@ -140,7 +141,7 @@ const CodeModule* BasicCodeModules::GetModuleAtIndex(
}
const CodeModules* BasicCodeModules::Copy() const {
- return new BasicCodeModules(this);
+ return new BasicCodeModules(this, map_.GetMergeStrategy());
}
vector<linked_ptr<const CodeModule> >
@@ -148,8 +149,4 @@ BasicCodeModules::GetShrunkRangeModules() const {
return shrunk_range_modules_;
}
-bool BasicCodeModules::IsModuleShrinkEnabled() const {
- return map_.IsShrinkDownEnabled();
-}
-
} // namespace google_breakpad
diff --git a/src/processor/basic_code_modules.h b/src/processor/basic_code_modules.h
index 50f8a03d..45ebc53b 100644
--- a/src/processor/basic_code_modules.h
+++ b/src/processor/basic_code_modules.h
@@ -58,7 +58,7 @@ class BasicCodeModules : public CodeModules {
// the CodeModules and CodeModule interfaces without requiring all of the
// resources that other implementations may require. A copy will be
// made of each contained CodeModule using CodeModule::Copy.
- explicit BasicCodeModules(const CodeModules *that);
+ BasicCodeModules(const CodeModules *that, MergeRangeStrategy strategy);
virtual ~BasicCodeModules();
@@ -71,7 +71,6 @@ class BasicCodeModules : public CodeModules {
virtual const CodeModules* Copy() const;
virtual std::vector<linked_ptr<const CodeModule> >
GetShrunkRangeModules() const;
- virtual bool IsModuleShrinkEnabled() const;
protected:
BasicCodeModules();
diff --git a/src/processor/microdump.cc b/src/processor/microdump.cc
index 13e261b4..d8141a2a 100644
--- a/src/processor/microdump.cc
+++ b/src/processor/microdump.cc
@@ -113,7 +113,8 @@ void MicrodumpModules::Add(const CodeModule* module) {
}
void MicrodumpModules::SetEnableModuleShrink(bool is_enabled) {
- map_.SetEnableShrinkDown(is_enabled);
+ map_.SetMergeStrategy(is_enabled ? MergeRangeStrategy::kTruncateUpper
+ : MergeRangeStrategy::kExclusiveRanges);
}
//
diff --git a/src/processor/minidump.cc b/src/processor/minidump.cc
index afc5f038..aebed0e3 100644
--- a/src/processor/minidump.cc
+++ b/src/processor/minidump.cc
@@ -2667,7 +2667,14 @@ MinidumpModuleList::MinidumpModuleList(Minidump* minidump)
range_map_(new RangeMap<uint64_t, unsigned int>()),
modules_(NULL),
module_count_(0) {
- range_map_->SetEnableShrinkDown(minidump_->IsAndroid());
+ MDOSPlatform platform;
+ if (minidump_->GetPlatform(&platform)) {
+ if (platform == MD_OS_ANDROID) {
+ range_map_->SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
+ } else if (platform == MD_OS_LINUX) {
+ range_map_->SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ }
+ }
}
@@ -2931,7 +2938,7 @@ const MinidumpModule* MinidumpModuleList::GetModuleAtIndex(
const CodeModules* MinidumpModuleList::Copy() const {
- return new BasicCodeModules(this);
+ return new BasicCodeModules(this, range_map_->GetMergeStrategy());
}
vector<linked_ptr<const CodeModule> >
@@ -2939,10 +2946,6 @@ MinidumpModuleList::GetShrunkRangeModules() const {
return vector<linked_ptr<const CodeModule> >();
}
-bool MinidumpModuleList::IsModuleShrinkEnabled() const {
- return range_map_->IsShrinkDownEnabled();
-}
-
void MinidumpModuleList::Print() {
if (!valid_) {
BPLOG(ERROR) << "MinidumpModuleList cannot print invalid data";
@@ -3870,7 +3873,7 @@ MinidumpUnloadedModuleList::MinidumpUnloadedModuleList(Minidump* minidump)
range_map_(new RangeMap<uint64_t, unsigned int>()),
unloaded_modules_(NULL),
module_count_(0) {
- range_map_->SetEnableShrinkDown(true);
+ range_map_->SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
}
MinidumpUnloadedModuleList::~MinidumpUnloadedModuleList() {
@@ -4048,7 +4051,7 @@ MinidumpUnloadedModuleList::GetModuleAtIndex(
}
const CodeModules* MinidumpUnloadedModuleList::Copy() const {
- return new BasicCodeModules(this);
+ return new BasicCodeModules(this, range_map_->GetMergeStrategy());
}
vector<linked_ptr<const CodeModule>>
@@ -4056,10 +4059,6 @@ MinidumpUnloadedModuleList::GetShrunkRangeModules() const {
return vector<linked_ptr<const CodeModule> >();
}
-bool MinidumpUnloadedModuleList::IsModuleShrinkEnabled() const {
- return range_map_->IsShrinkDownEnabled();
-}
-
//
// MinidumpMiscInfo
@@ -5386,6 +5385,11 @@ MinidumpLinuxMapsList *Minidump::GetLinuxMapsList() {
}
bool Minidump::IsAndroid() {
+ MDOSPlatform platform;
+ return GetPlatform(&platform) && platform == MD_OS_ANDROID;
+}
+
+bool Minidump::GetPlatform(MDOSPlatform* platform) {
// Save the current stream position
off_t saved_position = Tell();
if (saved_position == -1) {
@@ -5400,7 +5404,11 @@ bool Minidump::IsAndroid() {
return false;
}
- return system_info && system_info->platform_id == MD_OS_ANDROID;
+ if (!system_info) {
+ return false;
+ }
+ *platform = static_cast<MDOSPlatform>(system_info->platform_id);
+ return true;
}
MinidumpCrashpadInfo* Minidump::GetCrashpadInfo() {
diff --git a/src/processor/processor.gyp b/src/processor/processor.gyp
index feec7443..93896c0e 100644
--- a/src/processor/processor.gyp
+++ b/src/processor/processor.gyp
@@ -156,7 +156,8 @@
'minidump_unittest.cc',
'pathname_stripper_unittest.cc',
'postfix_evaluator_unittest.cc',
- 'range_map_shrink_down_unittest.cc',
+ 'range_map_truncate_lower_unittest.cc',
+ 'range_map_truncate_upper_unittest.cc',
'range_map_unittest.cc',
'stackwalker_address_list_unittest.cc',
'stackwalker_amd64_unittest.cc',
diff --git a/src/processor/range_map-inl.h b/src/processor/range_map-inl.h
index f7126098..4d3b0eb9 100644
--- a/src/processor/range_map-inl.h
+++ b/src/processor/range_map-inl.h
@@ -47,17 +47,6 @@
namespace google_breakpad {
template<typename AddressType, typename EntryType>
-void RangeMap<AddressType, EntryType>::SetEnableShrinkDown(
- bool enable_shrink_down) {
- enable_shrink_down_ = enable_shrink_down;
-}
-
-template<typename AddressType, typename EntryType>
-bool RangeMap<AddressType, EntryType>::IsShrinkDownEnabled() const {
- return enable_shrink_down_;
-}
-
-template<typename AddressType, typename EntryType>
bool RangeMap<AddressType, EntryType>::StoreRange(const AddressType &base,
const AddressType &size,
const EntryType &entry) {
@@ -88,11 +77,28 @@ bool RangeMap<AddressType, EntryType>::StoreRangeInternal(
MapConstIterator iterator_high = map_.lower_bound(high);
if (iterator_base != iterator_high) {
- // Some other range begins in the space used by this range. It may be
+ // Some other range ends in the space used by this range. It may be
// contained within the space used by this range, or it may extend lower.
- // If enable_shrink_down_ is true, shrink the current range down, otherwise
- // this is an error.
- if (enable_shrink_down_) {
+ if (merge_strategy_ == MergeRangeStrategy::kTruncateLower) {
+ // kTruncate the range with the lower base address.
+ AddressType other_base = iterator_base->second.base();
+ if (base < other_base) {
+ return StoreRangeInternal(base, delta, other_base - base, entry);
+ } else if (other_base < base) {
+ EntryType other_entry;
+ AddressType other_high, other_size, other_delta;
+ other_high = iterator_base->first;
+ RetrieveRange(other_high, &other_entry, &other_base, &other_delta,
+ &other_size);
+ map_.erase(iterator_base);
+ map_.insert(
+ MapValue(base - 1, Range(other_base, other_delta, other_entry)));
+ return StoreRangeInternal(base, delta, size, entry);
+ } else {
+ return false;
+ }
+ } else if (merge_strategy_ == MergeRangeStrategy::kTruncateUpper) {
+ // Truncate the lower portion of this range.
AddressType additional_delta = iterator_base->first - base + 1;
return StoreRangeInternal(base + additional_delta,
delta + additional_delta,
@@ -112,44 +118,57 @@ bool RangeMap<AddressType, EntryType>::StoreRangeInternal(
}
}
- if (iterator_high != map_.end()) {
- if (iterator_high->second.base() <= high) {
- // The range above this one overlaps with this one. It may fully
- // contain this range, or it may begin within this range and extend
- // higher. If enable_shrink_down_ is true, shrink the other range down,
- // otherwise this is an error.
- if (enable_shrink_down_ && iterator_high->first > high) {
- // Shrink the other range down.
- AddressType other_high = iterator_high->first;
- AddressType additional_delta =
- high - iterator_high->second.base() + 1;
+ if (iterator_high != map_.end() && iterator_high->second.base() <= high) {
+ // The range above this one overlaps with this one. It may fully
+ // contain this range, or it may begin within this range and extend
+ // higher.
+ if (merge_strategy_ == MergeRangeStrategy::kTruncateLower) {
+ AddressType other_base = iterator_high->second.base();
+ if (base < other_base) {
+ return StoreRangeInternal(base, delta, other_base - base, entry);
+ } else if (other_base < base) {
EntryType other_entry;
- AddressType other_base = AddressType();
- AddressType other_size = AddressType();
- AddressType other_delta = AddressType();
+ AddressType other_high, other_size, other_delta;
+ other_high = iterator_high->first;
RetrieveRange(other_high, &other_entry, &other_base, &other_delta,
&other_size);
map_.erase(iterator_high);
- map_.insert(MapValue(other_high,
- Range(other_base + additional_delta,
- other_delta + additional_delta,
- other_entry)));
- // Retry to store this range.
+ map_.insert(
+ MapValue(base - 1, Range(other_base, other_delta, other_entry)));
return StoreRangeInternal(base, delta, size, entry);
} else {
- // The processor hits this case too frequently with common symbol files.
- // This is most appropriate for a DEBUG channel, but since none exists
- // now simply comment out this logging.
- //
- // AddressType other_base = iterator_high->second.base();
- // AddressType other_size = iterator_high->first - other_base + 1;
- // BPLOG(INFO) << "StoreRangeInternal failed, an existing range "
- // << "contains or extends higher than the new range: new "
- // << HexString(base) << "+" << HexString(size)
- // << ", existing " << HexString(other_base) << "+"
- // << HexString(other_size);
return false;
}
+ } else if (merge_strategy_ == MergeRangeStrategy::kTruncateUpper &&
+ iterator_high->first > high) {
+ // Shrink the other range down.
+ AddressType other_high = iterator_high->first;
+ AddressType additional_delta = high - iterator_high->second.base() + 1;
+ EntryType other_entry;
+ AddressType other_base = AddressType();
+ AddressType other_size = AddressType();
+ AddressType other_delta = AddressType();
+ RetrieveRange(other_high, &other_entry, &other_base, &other_delta,
+ &other_size);
+ map_.erase(iterator_high);
+ map_.insert(MapValue(other_high,
+ Range(other_base + additional_delta,
+ other_delta + additional_delta, other_entry)));
+ // Retry to store this range.
+ return StoreRangeInternal(base, delta, size, entry);
+ } else {
+ // The processor hits this case too frequently with common symbol files.
+ // This is most appropriate for a DEBUG channel, but since none exists
+ // now simply comment out this logging.
+ //
+ // AddressType other_base = iterator_high->second.base();
+ // AddressType other_size = iterator_high->first - other_base + 1;
+ // BPLOG(INFO) << "StoreRangeInternal failed, an existing range "
+ // << "contains or extends higher than the new range: new "
+ // << HexString(base) << "+" << HexString(size)
+ // << ", existing " << HexString(other_base) << "+"
+ // << HexString(other_size);
+ return false;
}
}
diff --git a/src/processor/range_map.h b/src/processor/range_map.h
index d90a6732..33f32973 100644
--- a/src/processor/range_map.h
+++ b/src/processor/range_map.h
@@ -49,18 +49,29 @@ namespace google_breakpad {
// Forward declarations (for later friend declarations of specialized template).
template<class, class> class RangeMapSerializer;
+// Determines what happens when two ranges overlap.
+enum class MergeRangeStrategy {
+ // When two ranges overlap, the new range fails to be inserted. The default
+ // strategy.
+ kExclusiveRanges,
+
+ // The range with the lower base address will be truncated such that it's
+ // high address is one less than the range above it.
+ kTruncateLower,
+
+ // The range with the greater high address has its range truncated such that
+ // its base address is one higher than the range below it.
+ kTruncateUpper
+};
+
template<typename AddressType, typename EntryType>
class RangeMap {
public:
- RangeMap() : enable_shrink_down_(false), map_() {}
+ RangeMap() : merge_strategy_(MergeRangeStrategy::kExclusiveRanges), map_() {}
+
+ void SetMergeStrategy(MergeRangeStrategy strat) { merge_strategy_ = strat; }
- // |enable_shrink_down| tells whether overlapping ranges can be shrunk down.
- // If true, then adding a new range that overlaps with an existing one can
- // be a successful operation. The range which ends at the higher address
- // will be shrunk down by moving its start position to a higher address so
- // that it does not overlap anymore.
- void SetEnableShrinkDown(bool enable_shrink_down);
- bool IsShrinkDownEnabled() const;
+ MergeRangeStrategy GetMergeStrategy() const { return merge_strategy_; }
// Inserts a range into the map. Returns false for a parameter error,
// or if the location of the range would conflict with a range already
@@ -147,8 +158,7 @@ class RangeMap {
typedef typename AddressToRangeMap::const_iterator MapConstIterator;
typedef typename AddressToRangeMap::value_type MapValue;
- // Whether overlapping ranges can be shrunk down.
- bool enable_shrink_down_;
+ MergeRangeStrategy merge_strategy_;
// Maps the high address of each range to a EntryType.
AddressToRangeMap map_;
diff --git a/src/processor/range_map_truncate_lower_unittest.cc b/src/processor/range_map_truncate_lower_unittest.cc
new file mode 100644
index 00000000..0499103c
--- /dev/null
+++ b/src/processor/range_map_truncate_lower_unittest.cc
@@ -0,0 +1,348 @@
+// Copyright (c) 2019, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
+
+#include <limits.h>
+#include <stdio.h>
+
+#include "processor/range_map-inl.h"
+
+#include "breakpad_googletest_includes.h"
+#include "common/scoped_ptr.h"
+#include "processor/linked_ptr.h"
+#include "processor/logging.h"
+
+namespace {
+
+using google_breakpad::linked_ptr;
+using google_breakpad::MergeRangeStrategy;
+using google_breakpad::RangeMap;
+using google_breakpad::scoped_ptr;
+
+// A CountedObject holds an int. A global (not thread safe!) count of
+// allocated CountedObjects is maintained to help test memory management.
+class CountedObject {
+ public:
+ explicit CountedObject(int id) : id_(id) { ++count_; }
+ ~CountedObject() { --count_; }
+
+ static int count() { return count_; }
+ int id() const { return id_; }
+
+ private:
+ static int count_;
+ int id_;
+};
+
+int CountedObject::count_;
+
+typedef int AddressType;
+typedef RangeMap<AddressType, linked_ptr<CountedObject>> TestMap;
+
+// Same range cannot be stored wice.
+TEST(RangeMapTruncateLower, SameRange) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_1));
+
+ // Same range cannot be stored wice.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_FALSE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_2));
+}
+
+// If a range is completely contained by another range, then the larger range
+// should be truncated.
+TEST(RangeMapTruncateLower, CompletelyContained) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ // Larger range is added first.
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_1));
+ // Smaller (contained) range is added second.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(10 /* base address */, 80 /* size */, object_2));
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The first range contains the second, so the first range should have been
+ // shrunk to [0, 10]. Range [90, 99] should be free.
+ EXPECT_FALSE(range_map.RetrieveRange(90, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_FALSE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_TRUE(range_map.RetrieveRange(9, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(10, retrieved_size);
+ // Validate the properties of the smaller range (should be untouched).
+ EXPECT_TRUE(range_map.RetrieveRange(10, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(10, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(80, retrieved_size);
+}
+
+// Same as the previous test, however the larger range is added second.
+TEST(RangeMapTruncateLower, CompletelyContained_LargerAddedSecond) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ // Smaller (contained) range is added first.
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(10 /* base address */, 80 /* size */, object_1));
+ // Larger range is added second.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_2));
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The second range contains the first, so the second range should have been
+ // truncated to [0, 9]. Range [90, 99] should be free.
+ EXPECT_FALSE(range_map.RetrieveRange(90, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_FALSE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_TRUE(range_map.RetrieveRange(9, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(10, retrieved_size);
+ // Validate the properties of the smaller range (should be untouched).
+ EXPECT_TRUE(range_map.RetrieveRange(10, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(10, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(80, retrieved_size);
+}
+
+TEST(RangeMapTruncateLower, PartialOverlap_AtBeginning) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_1));
+
+ // Partial overlap at the beginning of the new range.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(90 /* base address */, 110 /* size */, object_2));
+
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The first range should be truncated, so 99 should address the second range.
+ EXPECT_TRUE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(90, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(110, retrieved_size);
+ // Validate the properties of the truncated range.
+ EXPECT_TRUE(range_map.RetrieveRange(89, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(90, retrieved_size);
+}
+
+TEST(RangeMapTruncateLower, PartialOverlap_AtEnd) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(50 /* base address */, 50 /* size */, object_1));
+
+ // Partial overlap at the end of the new range.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 70 /* size */, object_2));
+
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The second range should be truncated so 69 addresses the first range.
+ EXPECT_TRUE(range_map.RetrieveRange(69, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(50, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(50, retrieved_size);
+ // Validate the properties of the truncated range.
+ EXPECT_TRUE(range_map.RetrieveRange(49, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(50, retrieved_size);
+}
+
+// A new range is overlapped at both ends. The new range and the range
+// that overlaps at the beginning should be truncated. The range that overlaps
+// at the end should be left untouched.
+TEST(RangeMapTruncateLower, OverlapAtBothEnds) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ // This should overlap object_3 at the beginning.
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 100 /* size */, object_1));
+
+ // This should overlap object_3 at the end.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(100 /* base address */, 100 /* size */, object_2));
+
+ // This should be overlapped on both ends by object_1 and object_2.
+ linked_ptr<CountedObject> object_3(new CountedObject(3));
+ EXPECT_TRUE(
+ range_map.StoreRange(50 /* base address */, 100 /* size */, object_3));
+
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The first range should be truncated.
+ EXPECT_TRUE(range_map.RetrieveRange(0, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(50, retrieved_size);
+ // The second range should be intact.
+ EXPECT_TRUE(range_map.RetrieveRange(150, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(100, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(100, retrieved_size);
+ // The third range (in the middle) should be truncated.
+ EXPECT_TRUE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(3, object->id());
+ EXPECT_EQ(50, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(50, retrieved_size);
+}
+
+TEST(RangeMapTruncateLower, MultipleConflicts) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ // This should overlap with object_3.
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(10 /* base address */, 90 /* size */, object_1));
+
+ // This should also overlap with object_3 but after object_1.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(100 /* base address */, 100 /* size */, object_2));
+
+ // This should be overlapped on both object_1 and object_2.
+ linked_ptr<CountedObject> object_3(new CountedObject(3));
+ EXPECT_TRUE(
+ range_map.StoreRange(0 /* base address */, 300 /* size */, object_3));
+
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The first range should be intact.
+ EXPECT_TRUE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(10, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(90, retrieved_size);
+ // The second range should be intact.
+ EXPECT_TRUE(range_map.RetrieveRange(199, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(100, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(100, retrieved_size);
+ // The third range should be truncated.
+ EXPECT_TRUE(range_map.RetrieveRange(9, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(3, object->id());
+ EXPECT_EQ(0, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(10, retrieved_size);
+}
+
+// Adding two ranges without overlap should succeed and the ranges should
+// be left intact.
+TEST(RangeMapTruncateLower, NoConflicts) {
+ TestMap range_map;
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateLower);
+ // Adding range 1.
+ linked_ptr<CountedObject> object_1(new CountedObject(1));
+ EXPECT_TRUE(
+ range_map.StoreRange(10 /* base address */, 90 /* size */, object_1));
+
+ // Adding range 2 - no overlap with range 1.
+ linked_ptr<CountedObject> object_2(new CountedObject(2));
+ EXPECT_TRUE(
+ range_map.StoreRange(110 /* base address */, 90 /* size */, object_2));
+
+ linked_ptr<CountedObject> object;
+ AddressType retrieved_base = AddressType();
+ AddressType retrieved_delta = AddressType();
+ AddressType retrieved_size = AddressType();
+ // The first range should be intact.
+ EXPECT_TRUE(range_map.RetrieveRange(99, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(1, object->id());
+ EXPECT_EQ(10, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(90, retrieved_size);
+ // The second range should be intact.
+ EXPECT_TRUE(range_map.RetrieveRange(199, &object, &retrieved_base,
+ &retrieved_delta, &retrieved_size));
+ EXPECT_EQ(2, object->id());
+ EXPECT_EQ(110, retrieved_base);
+ EXPECT_EQ(0, retrieved_delta);
+ EXPECT_EQ(90, retrieved_size);
+}
+
+} // namespace
diff --git a/src/processor/range_map_shrink_down_unittest.cc b/src/processor/range_map_truncate_upper_unittest.cc
index 8dd0e709..e5248d63 100644
--- a/src/processor/range_map_shrink_down_unittest.cc
+++ b/src/processor/range_map_truncate_upper_unittest.cc
@@ -45,8 +45,9 @@
namespace {
using google_breakpad::linked_ptr;
-using google_breakpad::scoped_ptr;
+using google_breakpad::MergeRangeStrategy;
using google_breakpad::RangeMap;
+using google_breakpad::scoped_ptr;
// A CountedObject holds an int. A global (not thread safe!) count of
// allocated CountedObjects is maintained to help test memory management.
@@ -69,9 +70,9 @@ typedef int AddressType;
typedef RangeMap<AddressType, linked_ptr<CountedObject>> TestMap;
// Same range cannot be stored wice.
-TEST(RangeMap, TestShinkDown_SameRange) {
+TEST(RangeMapTruncateUpper, SameRange) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(0 /* base address */, 100 /* size */,
object_1));
@@ -84,9 +85,9 @@ TEST(RangeMap, TestShinkDown_SameRange) {
// If a range is completely contained by another range, then the larger range
// should be shrinked down.
-TEST(RangeMap, TestShinkDown_CompletelyContained) {
+TEST(RangeMapTruncateUpper, CompletelyContained) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
// Larger range is added first.
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(0 /* base address */, 100 /* size */,
@@ -121,9 +122,9 @@ TEST(RangeMap, TestShinkDown_CompletelyContained) {
}
// Same as the previous test, however the larger range is added second.
-TEST(RangeMap, TestShinkDown_CompletelyContained_LargerAddedSecond) {
+TEST(RangeMapTruncateUpper, CompletelyContained_LargerAddedSecond) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
// Smaller (contained) range is added first.
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(10 /* base address */, 80 /* size */,
@@ -157,9 +158,9 @@ TEST(RangeMap, TestShinkDown_CompletelyContained_LargerAddedSecond) {
EXPECT_EQ(80, retrieved_size);
}
-TEST(RangeMap, TestShinkDown_PartialOverlap_AtBeginning) {
+TEST(RangeMapTruncateUpper, PartialOverlap_AtBeginning) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(0 /* base address */, 100 /* size */,
object_1));
@@ -190,9 +191,9 @@ TEST(RangeMap, TestShinkDown_PartialOverlap_AtBeginning) {
EXPECT_EQ(100, retrieved_size);
}
-TEST(RangeMap, TestShinkDown_PartialOverlap_AtEnd) {
+TEST(RangeMapTruncateUpper, PartialOverlap_AtEnd) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(50 /* base address */, 50 /* size */,
object_1));
@@ -226,9 +227,9 @@ TEST(RangeMap, TestShinkDown_PartialOverlap_AtEnd) {
// A new range is overlapped at both ends. The new range and the range
// that overlaps at the end should be shrink. The range that overlaps at the
// beginning should be left untouched.
-TEST(RangeMap, TestShinkDown_OverlapAtBothEnds) {
+TEST(RangeMapTruncateUpper, OverlapAtBothEnds) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
// This should overlap object_3 at the beginning.
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(0 /* base address */, 100 /* size */,
@@ -271,9 +272,9 @@ TEST(RangeMap, TestShinkDown_OverlapAtBothEnds) {
EXPECT_EQ(50, retrieved_size);
}
-TEST(RangeMap, TestShinkDown_MultipleConflicts) {
+TEST(RangeMapTruncateUpper, MultipleConflicts) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
// This should overlap with object_3.
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(10 /* base address */, 90 /* size */,
@@ -319,9 +320,9 @@ TEST(RangeMap, TestShinkDown_MultipleConflicts) {
// Adding two ranges without overlap should succeed and the ranges should
// be left intact.
-TEST(RangeMap, TestShinkDown_NoConflicts) {
+TEST(RangeMapTruncateUpper, NoConflicts) {
TestMap range_map;
- range_map.SetEnableShrinkDown(true);
+ range_map.SetMergeStrategy(MergeRangeStrategy::kTruncateUpper);
// Adding range 1.
linked_ptr<CountedObject> object_1(new CountedObject(1));
EXPECT_TRUE(range_map.StoreRange(10 /* base address */, 90 /* size */,
diff --git a/src/processor/stackwalker_unittest_utils.h b/src/processor/stackwalker_unittest_utils.h
index d7f34755..3a92a5ea 100644
--- a/src/processor/stackwalker_unittest_utils.h
+++ b/src/processor/stackwalker_unittest_utils.h
@@ -168,11 +168,6 @@ class MockCodeModules: public google_breakpad::CodeModules {
return std::vector<google_breakpad::linked_ptr<const CodeModule> >();
}
- // Returns true, if module address range shrink is enabled.
- bool IsModuleShrinkEnabled() const {
- return false;
- }
-
private:
typedef std::vector<const MockCodeModule *> ModuleVector;
ModuleVector modules_;