| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269 | // Copyright 2017 The Abseil Authors.//// Licensed under the Apache License, Version 2.0 (the "License");// you may not use this file except in compliance with the License.// You may obtain a copy of the License at////      https://www.apache.org/licenses/LICENSE-2.0//// Unless required by applicable law or agreed to in writing, software// distributed under the License is distributed on an "AS IS" BASIS,// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.// See the License for the specific language governing permissions and// limitations under the License.// A bunch of threads repeatedly hash an array of ints protected by a// spinlock.  If the spinlock is working properly, all elements of the// array should be equal at the end of the test.#include <cstdint>#include <limits>#include <random>#include <thread>  // NOLINT(build/c++11)#include <vector>#include "gtest/gtest.h"#include "absl/base/attributes.h"#include "absl/base/internal/low_level_scheduling.h"#include "absl/base/internal/scheduling_mode.h"#include "absl/base/internal/spinlock.h"#include "absl/base/internal/sysinfo.h"#include "absl/base/macros.h"#include "absl/synchronization/blocking_counter.h"#include "absl/synchronization/notification.h"constexpr int32_t kNumThreads = 10;constexpr int32_t kIters = 1000;namespace absl {namespace base_internal {// This is defined outside of anonymous namespace so that it can be// a friend of SpinLock to access protected methods for testing.struct SpinLockTest {  static uint32_t EncodeWaitCycles(int64_t wait_start_time,                                   int64_t wait_end_time) {    return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);  }  static uint64_t DecodeWaitCycles(uint32_t lock_value) {    return SpinLock::DecodeWaitCycles(lock_value);  }};namespace {static constexpr int kArrayLength = 10;static uint32_t values[kArrayLength];static SpinLock static_spinlock(base_internal::kLinkerInitialized);static SpinLock static_cooperative_spinlock(    base_internal::kLinkerInitialized,    base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);static SpinLock static_noncooperative_spinlock(    base_internal::kLinkerInitialized, base_internal::SCHEDULE_KERNEL_ONLY);// Simple integer hash function based on the public domain lookup2 hash.// http://burtleburtle.net/bob/c/lookup2.cstatic uint32_t Hash32(uint32_t a, uint32_t c) {  uint32_t b = 0x9e3779b9UL;  // The golden ratio; an arbitrary value.  a -= b; a -= c; a ^= (c >> 13);  b -= c; b -= a; b ^= (a << 8);  c -= a; c -= b; c ^= (b >> 13);  a -= b; a -= c; a ^= (c >> 12);  b -= c; b -= a; b ^= (a << 16);  c -= a; c -= b; c ^= (b >> 5);  a -= b; a -= c; a ^= (c >> 3);  b -= c; b -= a; b ^= (a << 10);  c -= a; c -= b; c ^= (b >> 15);  return c;}static void TestFunction(int thread_salt, SpinLock* spinlock) {  for (int i = 0; i < kIters; i++) {    SpinLockHolder h(spinlock);    for (int j = 0; j < kArrayLength; j++) {      const int index = (j + thread_salt) % kArrayLength;      values[index] = Hash32(values[index], thread_salt);      std::this_thread::yield();    }  }}static void ThreadedTest(SpinLock* spinlock) {  std::vector<std::thread> threads;  for (int i = 0; i < kNumThreads; ++i) {    threads.push_back(std::thread(TestFunction, i, spinlock));  }  for (auto& thread : threads) {    thread.join();  }  SpinLockHolder h(spinlock);  for (int i = 1; i < kArrayLength; i++) {    EXPECT_EQ(values[0], values[i]);  }}TEST(SpinLock, StackNonCooperativeDisablesScheduling) {  SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);  spinlock.Lock();  EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());  spinlock.Unlock();}TEST(SpinLock, StaticNonCooperativeDisablesScheduling) {  static_noncooperative_spinlock.Lock();  EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed());  static_noncooperative_spinlock.Unlock();}TEST(SpinLock, WaitCyclesEncoding) {  // These are implementation details not exported by SpinLock.  const int kProfileTimestampShift = 7;  const int kLockwordReservedShift = 3;  const uint32_t kSpinLockSleeper = 8;  // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping  // but the lower kProfileTimestampShift will be dropped.  const int kMaxCyclesShift =    32 - kLockwordReservedShift + kProfileTimestampShift;  const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;  // These bits should be zero after encoding.  const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;  // These bits are dropped when wait cycles are encoded.  const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;  // Test a bunch of random values  std::default_random_engine generator;  // Shift to avoid overflow below.  std::uniform_int_distribution<uint64_t> time_distribution(      0, std::numeric_limits<uint64_t>::max() >> 4);  std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);  for (int i = 0; i < 100; i++) {    int64_t start_time = time_distribution(generator);    int64_t cycles = cycle_distribution(generator);    int64_t end_time = start_time + cycles;    uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);    EXPECT_EQ(0, lock_value & kLockwordReservedMask);    uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);    EXPECT_EQ(0, decoded & kProfileTimestampMask);    EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);  }  // Test corner cases  int64_t start_time = time_distribution(generator);  EXPECT_EQ(kSpinLockSleeper,            SpinLockTest::EncodeWaitCycles(start_time, start_time));  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0));  EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask));  EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask,            SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask));  // Check that we cannot produce kSpinLockSleeper during encoding.  int64_t sleeper_cycles =      kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift);  uint32_t sleeper_value =      SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles);  EXPECT_NE(sleeper_value, kSpinLockSleeper);  // Test clamping  uint32_t max_value =    SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);  uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);  uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;  EXPECT_EQ(expected_max_value_decoded, max_value_decoded);  const int64_t step = (1 << kProfileTimestampShift);  uint32_t after_max_value =    SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);  uint64_t after_max_value_decoded =      SpinLockTest::DecodeWaitCycles(after_max_value);  EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);  uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(      start_time, start_time + kMaxCycles - step);  uint64_t before_max_value_decoded =    SpinLockTest::DecodeWaitCycles(before_max_value);  EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);}TEST(SpinLockWithThreads, StaticSpinLock) {  ThreadedTest(&static_spinlock);}TEST(SpinLockWithThreads, StackSpinLock) {  SpinLock spinlock;  ThreadedTest(&spinlock);}TEST(SpinLockWithThreads, StackCooperativeSpinLock) {  SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);  ThreadedTest(&spinlock);}TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) {  SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY);  ThreadedTest(&spinlock);}TEST(SpinLockWithThreads, StaticCooperativeSpinLock) {  ThreadedTest(&static_cooperative_spinlock);}TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) {  ThreadedTest(&static_noncooperative_spinlock);}TEST(SpinLockWithThreads, DoesNotDeadlock) {  struct Helper {    static void NotifyThenLock(Notification* locked, SpinLock* spinlock,                               BlockingCounter* b) {      locked->WaitForNotification();  // Wait for LockThenWait() to hold "s".      b->DecrementCount();      SpinLockHolder l(spinlock);    }    static void LockThenWait(Notification* locked, SpinLock* spinlock,                             BlockingCounter* b) {      SpinLockHolder l(spinlock);      locked->Notify();      b->Wait();    }    static void DeadlockTest(SpinLock* spinlock, int num_spinners) {      Notification locked;      BlockingCounter counter(num_spinners);      std::vector<std::thread> threads;      threads.push_back(          std::thread(Helper::LockThenWait, &locked, spinlock, &counter));      for (int i = 0; i < num_spinners; ++i) {        threads.push_back(            std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter));      }      for (auto& thread : threads) {        thread.join();      }    }  };  SpinLock stack_cooperative_spinlock(      base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL);  SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY);  Helper::DeadlockTest(&stack_cooperative_spinlock,                       base_internal::NumCPUs() * 2);  Helper::DeadlockTest(&stack_noncooperative_spinlock,                       base_internal::NumCPUs() * 2);  Helper::DeadlockTest(&static_cooperative_spinlock,                       base_internal::NumCPUs() * 2);  Helper::DeadlockTest(&static_noncooperative_spinlock,                       base_internal::NumCPUs() * 2);}}  // namespace}  // namespace base_internal}  // namespace absl
 |