|  | @@ -0,0 +1,130 @@
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Copyright 2015, Google Inc.
 | 
	
		
			
				|  |  | + * All rights reserved.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * Redistribution and use in source and binary forms, with or without
 | 
	
		
			
				|  |  | + * modification, are permitted provided that the following conditions are
 | 
	
		
			
				|  |  | + * met:
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *     * Redistributions of source code must retain the above copyright
 | 
	
		
			
				|  |  | + * notice, this list of conditions and the following disclaimer.
 | 
	
		
			
				|  |  | + *     * Redistributions in binary form must reproduce the above
 | 
	
		
			
				|  |  | + * copyright notice, this list of conditions and the following disclaimer
 | 
	
		
			
				|  |  | + * in the documentation and/or other materials provided with the
 | 
	
		
			
				|  |  | + * distribution.
 | 
	
		
			
				|  |  | + *     * Neither the name of Google Inc. nor the names of its
 | 
	
		
			
				|  |  | + * contributors may be used to endorse or promote products derived from
 | 
	
		
			
				|  |  | + * this software without specific prior written permission.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 | 
	
		
			
				|  |  | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 | 
	
		
			
				|  |  | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 | 
	
		
			
				|  |  | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 | 
	
		
			
				|  |  | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 | 
	
		
			
				|  |  | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 | 
	
		
			
				|  |  | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
	
		
			
				|  |  | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | 
	
		
			
				|  |  | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
	
		
			
				|  |  | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
	
		
			
				|  |  | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#include "src/core/support/stack_lockfree.h"
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#include <stdlib.h>
 | 
	
		
			
				|  |  | +#include <string.h>
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#include <grpc/support/port_platform.h>
 | 
	
		
			
				|  |  | +#include <grpc/support/alloc.h>
 | 
	
		
			
				|  |  | +#include <grpc/support/atm.h>
 | 
	
		
			
				|  |  | +#include <grpc/support/log.h>
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/* The lockfree node structure is a single architecture-level
 | 
	
		
			
				|  |  | +   word that allows for an atomic CAS to set it up. */
 | 
	
		
			
				|  |  | +struct lockfree_node_contents {
 | 
	
		
			
				|  |  | +  /* next thing to look at. Actual index for head, next index otherwise */
 | 
	
		
			
				|  |  | +  gpr_uint16 index;
 | 
	
		
			
				|  |  | +#ifdef GPR_ARCH_64
 | 
	
		
			
				|  |  | +  gpr_uint16 pad;
 | 
	
		
			
				|  |  | +  gpr_uint32 aba_ctr;
 | 
	
		
			
				|  |  | +#else
 | 
	
		
			
				|  |  | +#ifdef GPR_ARCH_32
 | 
	
		
			
				|  |  | +  gpr_uint16 aba_ctr;
 | 
	
		
			
				|  |  | +#else
 | 
	
		
			
				|  |  | +#error Unsupported bit width architecture
 | 
	
		
			
				|  |  | +#endif
 | 
	
		
			
				|  |  | +#endif
 | 
	
		
			
				|  |  | +};
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/* Use a union to make sure that these are in the same bits as an atm word */
 | 
	
		
			
				|  |  | +typedef union lockfree_node {
 | 
	
		
			
				|  |  | +  gpr_atm atm;
 | 
	
		
			
				|  |  | +  struct lockfree_node_contents contents;
 | 
	
		
			
				|  |  | +} lockfree_node;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define ENTRY_ALIGNMENT_BITS 3 /* make sure that entries aligned to 8-bytes */
 | 
	
		
			
				|  |  | +#define INVALID_ENTRY_INDEX ((1<<16)-1) /* reserve this entry as invalid */
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +struct gpr_stack_lockfree {
 | 
	
		
			
				|  |  | +  lockfree_node *entries;
 | 
	
		
			
				|  |  | +  lockfree_node head; /* An atomic entry describing curr head */
 | 
	
		
			
				|  |  | +};
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +gpr_stack_lockfree *gpr_stack_lockfree_create(int entries) {
 | 
	
		
			
				|  |  | +  gpr_stack_lockfree *stack;
 | 
	
		
			
				|  |  | +  stack = gpr_malloc(sizeof(*stack));
 | 
	
		
			
				|  |  | +  /* Since we only allocate 16 bits to represent an entry number,
 | 
	
		
			
				|  |  | +   * make sure that we are within the desired range */
 | 
	
		
			
				|  |  | +  /* Reserve the highest entry number as a dummy */
 | 
	
		
			
				|  |  | +  GPR_ASSERT(entries < INVALID_ENTRY_INDEX);
 | 
	
		
			
				|  |  | +  stack->entries = gpr_malloc_aligned(entries * sizeof(stack->entries[0]),
 | 
	
		
			
				|  |  | +                                      ENTRY_ALIGNMENT_BITS);
 | 
	
		
			
				|  |  | +  /* Clear out all entries */
 | 
	
		
			
				|  |  | +  memset(stack->entries, 0, entries * sizeof(stack->entries[0]));
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +  /* Point the head at reserved dummy entry */
 | 
	
		
			
				|  |  | +  stack->head.contents.index = INVALID_ENTRY_INDEX;
 | 
	
		
			
				|  |  | +  return stack;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) {
 | 
	
		
			
				|  |  | +  gpr_free_aligned(stack->entries);
 | 
	
		
			
				|  |  | +  gpr_free(stack);
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +void gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
 | 
	
		
			
				|  |  | +  lockfree_node head;
 | 
	
		
			
				|  |  | +  lockfree_node newhead;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +  /* First fill in the entry's index and aba ctr for new head */
 | 
	
		
			
				|  |  | +  newhead.contents.index = (gpr_uint16)entry;
 | 
	
		
			
				|  |  | +  /* Also post-increment the aba_ctr */
 | 
	
		
			
				|  |  | +  newhead.contents.aba_ctr = stack->entries[entry].contents.aba_ctr++;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +  do {
 | 
	
		
			
				|  |  | +    /* Atomically get the existing head value for use */
 | 
	
		
			
				|  |  | +    head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
 | 
	
		
			
				|  |  | +    /* Point to it */
 | 
	
		
			
				|  |  | +    stack->entries[entry].contents.index = head.contents.index;
 | 
	
		
			
				|  |  | +  } while (!gpr_atm_rel_cas(&(stack->head.atm),
 | 
	
		
			
				|  |  | +                            head.atm, newhead.atm));
 | 
	
		
			
				|  |  | +  /* Use rel_cas above to make sure that entry index is set properly */
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
 | 
	
		
			
				|  |  | +  lockfree_node head;
 | 
	
		
			
				|  |  | +  lockfree_node newhead;
 | 
	
		
			
				|  |  | +  do {
 | 
	
		
			
				|  |  | +    head.atm = gpr_atm_acq_load(&(stack->head.atm));
 | 
	
		
			
				|  |  | +    if (head.contents.index == INVALID_ENTRY_INDEX) {
 | 
	
		
			
				|  |  | +      return -1;
 | 
	
		
			
				|  |  | +    }
 | 
	
		
			
				|  |  | +    newhead.contents.index = stack->entries[head.contents.index].contents.index;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +  } while (!gpr_atm_no_barrier_cas(&(stack->head.atm),
 | 
	
		
			
				|  |  | +                                   head.atm,
 | 
	
		
			
				|  |  | +                                   newhead.atm));
 | 
	
		
			
				|  |  | +  return newhead.contents.index;
 | 
	
		
			
				|  |  | +}
 |