diff --git a/groups/bsl/bslma/bslma_testallocator.cpp b/groups/bsl/bslma/bslma_testallocator.cpp index 7fa26d2a7f..6b87464d73 100644 --- a/groups/bsl/bslma/bslma_testallocator.cpp +++ b/groups/bsl/bslma/bslma_testallocator.cpp @@ -382,33 +382,29 @@ TestAllocator::~TestAllocator() // MANIPULATORS void *TestAllocator::allocate(size_type size) { - BlockHeader *header_p = 0; - if (size > 0) { - // Perform this allocation before acquiring a mutex, so as to not hold - // onto the mutex over a potentially expensive operation. - const std::size_t totalBytes = - bsls::AlignmentUtil::roundUpToMaximalAlignment( - sizeof(BlockHeader) + size + k_SENTINEL_SIZE); - header_p = - static_cast(d_allocator_p->allocate(totalBytes)); - if (!header_p) { - // We could not satisfy this request. Throw 'std::bad_alloc'. - bsls::BslExceptionUtil::throwBadAlloc(); - } - } - + // All updates are protected by a mutex lock, so as to not interleave the + // action of multiple threads. Note that the lock is needed even for + // atomic variables, as concurrent writes to different statistics could put + // the variables in an inconsistent state. bsls::BslLockGuard guard(&d_lock); - // Increment the total number of allocations from this allocator. Use the - // result to generate the unique ID for this allocation, which is the - // number of allocations prior to this one. - bsls::Types::Int64 allocationId = d_numAllocations.addRelaxed(1) - 1; - + // The 'd_numAllocations', 'd_lastAllocatedNumBytes', and + // 'd_lastAllocatedAddress_p' stats are updated before attempting any + // allocations. These updates have caused confusion in cases where the + // allocation subsequently fails by means of an exception, but many + // existing tests work around the strange behavior; such work-arounds would + // be cause test failures if the behavior were to change (i.e., if the + // "bug" were fixed). Confusing or not, therefore, this behavior is here + // to stay and is now documented in the interface for this component. + d_numAllocations.addRelaxed(1); d_lastAllocatedNumBytes.storeRelaxed( static_cast(size)); + d_lastAllocatedAddress_p.storeRelaxed(0); #ifdef BDE_BUILD_TARGET_EXC if (0 <= allocationLimit()) { + // An exception-test allocation limit has been set. Decrement the + // limit and throw a special exception if it goes negative. if (0 > d_allocationLimit.addRelaxed(-1)) { throw TestAllocatorException(static_cast(size)); } @@ -416,16 +412,36 @@ void *TestAllocator::allocate(size_type size) #endif if (0 == size) { - d_lastAllocatedAddress_p.storeRelaxed(0); return 0; // RETURN } + const std::size_t totalBytes = + bsls::AlignmentUtil::roundUpToMaximalAlignment( + sizeof(BlockHeader) + size + k_SENTINEL_SIZE); + + // Allocate a block from the upstream allocator. While it is not ideal to + // hold a mutex over a potentially expensive operation, there is no + // guarantee that the upstream allocator is thread safe, so certain uses of + // 'TestAllocator' might depend on this allocation taking place with the + // mutex lock held. + BlockHeader *header_p = + static_cast(d_allocator_p->allocate(totalBytes)); + if (!header_p) { + // We could not satisfy this request. Throw 'std::bad_alloc'. + bsls::BslExceptionUtil::throwBadAlloc(); + } + // Ensure 'allocate' returned maximally aligned memory. BSLS_ASSERT(isAligned(header_p, k_MAX_ALIGNMENT)); + // Use the number of allocations prior to this one as a unique ID for this + // allocation. + bsls::Types::Int64 allocationId = d_numAllocations.loadRelaxed() - 1; + + // Set the fields in the header block header_p->d_magicNumber = k_ALLOCATED_MEMORY_MAGIC_NUMBER; - // Insert into linked list of allocated blocks + // Insert into linked list of allocated blocks. header_p->d_next_p = 0; if (d_blockListHead_p) { header_p->d_prev_p = d_blockListTail_p; @@ -443,14 +459,6 @@ void *TestAllocator::allocate(size_type size) void *address = header_p + 1; - // Fill sentinel bytes before and after user segment with known values. - // Note that we don't initialize the user portion of the segment because - // that would undermine Purify's 'UMR: uninitialized memory read' checking. - std::memset(static_cast(address) - k_SENTINEL_SIZE, - k_SENTINEL_BYTE, k_SENTINEL_SIZE); - std::memset(static_cast(address) + size, - k_SENTINEL_BYTE, k_SENTINEL_SIZE); - // Update stats. The stats are updated as a group under a mutex lock, but // are atomic so that accessors can retrieve individual stats without // acquiring the mutex. @@ -468,6 +476,14 @@ void *TestAllocator::allocate(size_type size) d_lastAllocatedAddress_p.storeRelaxed(reinterpret_cast(address)); + // Fill sentinel bytes before and after user segment with known values. + // Note that we don't initialize the user portion of the segment because + // that would undermine Purify's 'UMR: uninitialized memory read' checking. + std::memset(static_cast(address) - k_SENTINEL_SIZE, + k_SENTINEL_BYTE, k_SENTINEL_SIZE); + std::memset(static_cast(address) + size, + k_SENTINEL_BYTE, k_SENTINEL_SIZE); + if (isVerbose()) { // In verbose mode, print a message to 'stdout' -- e.g., diff --git a/groups/bsl/bslma/bslma_testallocator.h b/groups/bsl/bslma/bslma_testallocator.h index e5deb235b6..c6a48404ee 100644 --- a/groups/bsl/bslma/bslma_testallocator.h +++ b/groups/bsl/bslma/bslma_testallocator.h @@ -484,7 +484,10 @@ class TestAllocator : public Allocator { // invoke the 'allocate' method of the allocator supplied at // construction, increment the number of currently (and cumulatively) // allocated blocks, and increase the number of currently allocated - // bytes by 'size'. Update all other fields accordingly. + // bytes by 'size'. Update all other fields accordingly; if the + // allocation fails via an exception, 'numAllocations()' is + // incremented, 'lastAllocatedNumBytes()' is set to 'size', and + // 'lastDeallocatedAddress()' is set to 0. void deallocate(void *address); // Return the memory block at the specified 'address' back to this diff --git a/groups/bsl/bslma/bslma_testallocator.t.cpp b/groups/bsl/bslma/bslma_testallocator.t.cpp index 606138f4d2..f6b20ee2fa 100644 --- a/groups/bsl/bslma/bslma_testallocator.t.cpp +++ b/groups/bsl/bslma/bslma_testallocator.t.cpp @@ -1066,6 +1066,8 @@ int main(int argc, char *argv[]) ASSERTV(upstream.numBlocksTotal(), 0 < upstream.numBlocksTotal()); ASSERTV(iterations, 1 < iterations); +#else + if (verbose) printf("\nNo testing. Exceptions are not enabled.\n"); #endif // BDE_BUILD_TARGET_EXC } break; @@ -2136,6 +2138,8 @@ int main(int argc, char *argv[]) // for every allocation request that occurs after the number of // requests exceeds the current allocation limit. Also verify that // exception is never thrown for negative allocation limits. + // Finally, verify that nothing is allocated when an exception is + // thrown. // // Testing: // CONCERN: exception is thrown after allocation limit is exceeded @@ -2152,7 +2156,11 @@ int main(int argc, char *argv[]) const int LIMIT[] = { 0, 1, 4, 5, -1, -100 }; const int NUM_TEST = sizeof LIMIT / sizeof *LIMIT; - Obj mX(veryVeryVerbose); + Obj upstream("Upstream"); + Obj mX(veryVeryVerbose, &upstream); + + const bsls::Types::Int64 k_INITIAL_UPSTREAM_BLOCKS = + upstream.numBlocksInUse(); for (int ti = 0; ti < NUM_TEST; ++ti) { mX.setAllocationLimit(LIMIT[ti]); @@ -2161,18 +2169,37 @@ int main(int argc, char *argv[]) const bslma::Allocator::size_type SIZE = ai + 1; // alloc size varies for each test if (veryVerbose) { P_(ti); P_(ai); P_(SIZE); P(LIMIT[ti]); } + + const bsls::Types::Int64 NUM_ALLOCS = mX.numAllocations(); + try { void *p = mX.allocate(SIZE); + ASSERTV(ti, ai, NUM_ALLOCS + 1 == mX.numAllocations()); + ASSERTV(ti, ai, SIZE == mX.lastAllocatedNumBytes()); + ASSERTV(ti, ai, p == mX.lastAllocatedAddress()); mX.deallocate(p); + ASSERTV(ti, ai, LIMIT[ti] != ai); } catch (bslma::TestAllocatorException& e) { bslma::Allocator::size_type numBytes = e.numBytes(); if (veryVerbose) { printf("Caught: "); P(numBytes); } - LOOP2_ASSERT(ti, ai, LIMIT[ti] == ai); - LOOP2_ASSERT(ti, ai, SIZE == numBytes); - continue; + + ASSERTV(ti, ai, SIZE == numBytes); + ASSERTV(ti, ai, k_INITIAL_UPSTREAM_BLOCKS == + upstream.numBlocksInUse()); + ASSERTV(ti, ai, LIMIT[ti] == ai); + + // An allocation will increment 'numAllocations()' and set + // 'lastAllocatedNumBytes()' and + // 'lastDeallocatedAddress()', even if the allocation fails + // by means of an exception. This behaviour could be + // considered a bug, but there exist long-standing + // workarounds in test drivers, so the behavior is now + // enshrined. + ASSERTV(ti, ai, NUM_ALLOCS + 1 == mX.numAllocations()); + ASSERTV(ti, ai, SIZE == mX.lastAllocatedNumBytes()); + ASSERTV(ti, ai, 0 == mX.lastAllocatedAddress()); } - LOOP2_ASSERT(ti, ai, LIMIT[ti] != ai); } } #else diff --git a/groups/bsl/bslstl/bslstl_deque.0.t.cpp b/groups/bsl/bslstl/bslstl_deque.0.t.cpp index d1cb1663fd..1dab013860 100644 --- a/groups/bsl/bslstl/bslstl_deque.0.t.cpp +++ b/groups/bsl/bslstl/bslstl_deque.0.t.cpp @@ -261,14 +261,16 @@ namespace { int testStatus = 0; -#ifndef BSLSTL_LIST_0T_AS_INCLUDE -void aSsErT(bool condition, const char *message, int line) BSLA_UNUSED; -#endif +// Special 'aSsErT' expands file name before calling aSsErTF function (below). +// This macro ensures that the correct file name is reported on an 'ASSERT' +// failure within a subordinate test file (e.g., 'bslstl_vector.1.t.cpp'). +#define aSsErT(COND, MSG, LINE) aSsErTF(COND, MSG, __FILE__, __LINE__) -void aSsErT(bool condition, const char *message, int line) +// Special version of 'aSsErT' taking a filename argument. +void aSsErTF(bool condition, const char *message, const char *file, int line) { if (condition) { - printf("Error " __FILE__ "(%d): %s (failed)\n", line, message); + printf("Error %s(%d): %s (failed)\n", file, line, message); if (0 <= testStatus && testStatus <= 100) { ++testStatus; diff --git a/groups/bsl/bslstl/bslstl_list.0.t.cpp b/groups/bsl/bslstl/bslstl_list.0.t.cpp index 079d424010..bc50ace5c2 100644 --- a/groups/bsl/bslstl/bslstl_list.0.t.cpp +++ b/groups/bsl/bslstl/bslstl_list.0.t.cpp @@ -265,14 +265,16 @@ namespace { int testStatus = 0; -#ifndef BSLSTL_LIST_0T_AS_INCLUDE -void aSsErT(bool condition, const char *message, int line) BSLA_UNUSED; -#endif +// Special 'aSsErT' expands file name before calling aSsErTF function (below). +// This macro ensures that the correct file name is reported on an 'ASSERT' +// failure within a subordinate test file (e.g., 'bslstl_vector.1.t.cpp'). +#define aSsErT(COND, MSG, LINE) aSsErTF(COND, MSG, __FILE__, __LINE__) -void aSsErT(bool condition, const char *message, int line) +// Special version of 'aSsErT' taking a filename argument. +void aSsErTF(bool condition, const char *message, const char *file, int line) { if (condition) { - printf("Error " __FILE__ "(%d): %s (failed)\n", line, message); + printf("Error %s(%d): %s (failed)\n", file, line, message); if (0 <= testStatus && testStatus <= 100) { ++testStatus; diff --git a/groups/bsl/bslstl/bslstl_vector.0.t.cpp b/groups/bsl/bslstl/bslstl_vector.0.t.cpp index 50ea69225e..9680be7d4c 100644 --- a/groups/bsl/bslstl/bslstl_vector.0.t.cpp +++ b/groups/bsl/bslstl/bslstl_vector.0.t.cpp @@ -258,14 +258,16 @@ namespace { int testStatus = 0; -#ifndef BSLSTL_LIST_0T_AS_INCLUDE -void aSsErT(bool condition, const char *message, int line) BSLA_UNUSED; -#endif +// Special 'aSsErT' expands file name before calling aSsErTF function (below). +// This macro ensures that the correct file name is reported on an 'ASSERT' +// failure within a subordinate test file (e.g., 'bslstl_vector.1.t.cpp'). +#define aSsErT(COND, MSG, LINE) aSsErTF(COND, MSG, __FILE__, __LINE__) -void aSsErT(bool condition, const char *message, int line) +// Special version of 'aSsErT' taking a filename argument. +void aSsErTF(bool condition, const char *message, const char *file, int line) { if (condition) { - printf("Error " __FILE__ "(%d): %s (failed)\n", line, message); + printf("Error %s(%d): %s (failed)\n", file, line, message); if (0 <= testStatus && testStatus <= 100) { ++testStatus;