stack_lockfree.cc 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. *
  3. * Copyright 2015 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. #include "src/core/lib/support/stack_lockfree.h"
  19. #include <stdlib.h>
  20. #include <string.h>
  21. #include <grpc/support/alloc.h>
  22. #include <grpc/support/atm.h>
  23. #include <grpc/support/log.h>
  24. #include <grpc/support/port_platform.h>
  25. /* The lockfree node structure is a single architecture-level
  26. word that allows for an atomic CAS to set it up. */
  27. struct lockfree_node_contents {
  28. /* next thing to look at. Actual index for head, next index otherwise */
  29. uint16_t index;
  30. #ifdef GPR_ARCH_64
  31. uint16_t pad;
  32. uint32_t aba_ctr;
  33. #else
  34. #ifdef GPR_ARCH_32
  35. uint16_t aba_ctr;
  36. #else
  37. #error Unsupported bit width architecture
  38. #endif
  39. #endif
  40. };
  41. /* Use a union to make sure that these are in the same bits as an atm word */
  42. typedef union lockfree_node {
  43. gpr_atm atm;
  44. struct lockfree_node_contents contents;
  45. } lockfree_node;
  46. /* make sure that entries aligned to 8-bytes */
  47. #define ENTRY_ALIGNMENT_BITS 3
  48. /* reserve this entry as invalid */
  49. #define INVALID_ENTRY_INDEX ((1 << 16) - 1)
  50. struct gpr_stack_lockfree {
  51. lockfree_node* entries;
  52. lockfree_node head; /* An atomic entry describing curr head */
  53. };
  54. gpr_stack_lockfree* gpr_stack_lockfree_create(size_t entries) {
  55. gpr_stack_lockfree* stack;
  56. stack = (gpr_stack_lockfree*)gpr_malloc(sizeof(*stack));
  57. /* Since we only allocate 16 bits to represent an entry number,
  58. * make sure that we are within the desired range */
  59. /* Reserve the highest entry number as a dummy */
  60. GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
  61. stack->entries = (lockfree_node*)gpr_malloc_aligned(
  62. entries * sizeof(stack->entries[0]), ENTRY_ALIGNMENT_BITS);
  63. /* Clear out all entries */
  64. memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
  65. memset(&stack->head, 0, sizeof(stack->head));
  66. GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents));
  67. /* Point the head at reserved dummy entry */
  68. stack->head.contents.index = INVALID_ENTRY_INDEX;
  69. /* Fill in the pad and aba_ctr to avoid confusing memcheck tools */
  70. #ifdef GPR_ARCH_64
  71. stack->head.contents.pad = 0;
  72. #endif
  73. stack->head.contents.aba_ctr = 0;
  74. return stack;
  75. }
  76. void gpr_stack_lockfree_destroy(gpr_stack_lockfree* stack) {
  77. gpr_free_aligned(stack->entries);
  78. gpr_free(stack);
  79. }
  80. int gpr_stack_lockfree_push(gpr_stack_lockfree* stack, int entry) {
  81. lockfree_node head;
  82. lockfree_node newhead;
  83. lockfree_node curent;
  84. lockfree_node newent;
  85. /* First fill in the entry's index and aba ctr for new head */
  86. newhead.contents.index = (uint16_t)entry;
  87. #ifdef GPR_ARCH_64
  88. /* Fill in the pad to avoid confusing memcheck tools */
  89. newhead.contents.pad = 0;
  90. #endif
  91. /* Also post-increment the aba_ctr */
  92. curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
  93. newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
  94. gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);
  95. do {
  96. /* Atomically get the existing head value for use */
  97. head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
  98. /* Point to it */
  99. newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
  100. newent.contents.index = head.contents.index;
  101. gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
  102. } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
  103. /* Use rel_cas above to make sure that entry index is set properly */
  104. return head.contents.index == INVALID_ENTRY_INDEX;
  105. }
  106. int gpr_stack_lockfree_pop(gpr_stack_lockfree* stack) {
  107. lockfree_node head;
  108. lockfree_node newhead;
  109. do {
  110. head.atm = gpr_atm_acq_load(&(stack->head.atm));
  111. if (head.contents.index == INVALID_ENTRY_INDEX) {
  112. return -1;
  113. }
  114. newhead.atm =
  115. gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));
  116. } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
  117. return head.contents.index;
  118. }