Răsfoiți Sursa

Merge github.com:grpc/grpc into esan

Craig Tiller 9 ani în urmă
părinte
comite
5f17aaea1b
40 a modificat fișierele cu 1821 adăugiri și 508 ștergeri
  1. 33 49
      include/grpc/census.h
  2. 19 4
      package.xml
  3. 64 85
      src/core/census/context.c
  4. 0 10
      src/csharp/Grpc.Core/Internal/AsyncCallServer.cs
  5. 1 3
      src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs
  6. 14 3
      src/csharp/Grpc.Tools.nuspec
  7. 10 7
      src/csharp/build_packages.bat
  8. 0 36
      src/php/README.md
  9. 5 4
      src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
  10. 8 4
      src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
  11. 2 1
      src/python/grpcio/grpc/_cython/_cygrpc/server.pxd.pxi
  12. 13 10
      src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
  13. 2 2
      src/python/grpcio/grpc/_cython/imports.generated.h
  14. 2 2
      src/ruby/ext/grpc/rb_grpc_imports.generated.h
  15. 69 42
      templates/README.md
  16. 19 4
      templates/package.xml.template
  17. 10 7
      templates/src/csharp/build_packages.bat.template
  18. 71 66
      test/core/census/context_test.c
  19. 0 93
      test/core/iomgr/tcp_client_posix_test.c
  20. 9 4
      test/cpp/end2end/end2end_test.cc
  21. 28 15
      test/cpp/interop/metrics_client.cc
  22. 1 1
      test/cpp/util/metrics_server.cc
  23. 56 26
      test/cpp/util/test_credentials_provider.cc
  24. 10 9
      test/cpp/util/test_credentials_provider.h
  25. 6 0
      test/distrib/csharp/DistribTest.sln
  26. 20 0
      test/distrib/csharp/DistribTest/DistribTest.csproj
  27. 49 0
      test/distrib/csharp/run_distrib_test.bat
  28. 1 3
      test/distrib/csharp/run_distrib_test.sh
  29. 9 1
      test/distrib/csharp/update_version.sh
  30. 5 0
      tools/dockerfile/grpc_interop_stress_cxx/Dockerfile
  31. 1 1
      tools/dockerfile/grpc_interop_stress_cxx/build_interop_stress.sh
  32. 187 0
      tools/gcp/stress_test/run_client.py
  33. 120 0
      tools/gcp/stress_test/run_server.py
  34. 197 0
      tools/gcp/stress_test/stress_test_utils.py
  35. 140 0
      tools/gcp/utils/big_query_utils.py
  36. 68 15
      tools/gcp/utils/kubernetes_api.py
  37. 3 0
      tools/jenkins/build_interop_stress_image.sh
  38. 11 0
      tools/run_tests/distribtest_targets.py
  39. 2 1
      tools/run_tests/jobset.py
  40. 556 0
      tools/run_tests/stress_test/run_stress_tests_on_gke.py

+ 33 - 49
include/grpc/census.h

@@ -80,18 +80,18 @@ CENSUSAPI int census_enabled(void);
   metrics will be recorded. Keys are unique within a context. */
 typedef struct census_context census_context;
 
-/* A tag is a key:value pair. The key is a non-empty, printable (UTF-8
-   encoded), nil-terminated string. The value is a binary string, that may be
-   printable. There are limits on the sizes of both keys and values (see
-   CENSUS_MAX_TAG_KB_LEN definition below), and the number of tags that can be
-   propagated (CENSUS_MAX_PROPAGATED_TAGS). Users should also remember that
-   some systems may have limits on, e.g., the number of bytes that can be
-   transmitted as metadata, and that larger tags means more memory consumed
-   and time in processing. */
+/* A tag is a key:value pair. Both keys and values are nil-terminated strings,
+   containing printable ASCII characters (decimal 32-126). Keys must be at
+   least one character in length. Both keys and values can have at most
+   CENSUS_MAX_TAG_KB_LEN characters (including the terminating nil). The
+   maximum number of tags that can be propagated is
+   CENSUS_MAX_PROPAGATED_TAGS. Users should also remember that some systems
+   may have limits on, e.g., the number of bytes that can be transmitted as
+   metadata, and that larger tags means more memory consumed and time in
+   processing. */
 typedef struct {
   const char *key;
   const char *value;
-  size_t value_len;
   uint8_t flags;
 } census_tag;
 
@@ -103,28 +103,25 @@ typedef struct {
 /* Tag flags. */
 #define CENSUS_TAG_PROPAGATE 1 /* Tag should be propagated over RPC */
 #define CENSUS_TAG_STATS 2     /* Tag will be used for statistics aggregation */
-#define CENSUS_TAG_BINARY 4    /* Tag value is not printable */
-#define CENSUS_TAG_RESERVED 8  /* Reserved for internal use. */
-/* Flag values 8,16,32,64,128 are reserved for future/internal use. Clients
+#define CENSUS_TAG_RESERVED 4  /* Reserved for internal use. */
+/* Flag values 4,8,16,32,64,128 are reserved for future/internal use. Clients
    should not use or rely on their values. */
 
 #define CENSUS_TAG_IS_PROPAGATED(flags) (flags & CENSUS_TAG_PROPAGATE)
 #define CENSUS_TAG_IS_STATS(flags) (flags & CENSUS_TAG_STATS)
-#define CENSUS_TAG_IS_BINARY(flags) (flags & CENSUS_TAG_BINARY)
 
 /* An instance of this structure is kept by every context, and records the
    basic information associated with the creation of that context. */
 typedef struct {
-  int n_propagated_tags;        /* number of propagated printable tags */
-  int n_propagated_binary_tags; /* number of propagated binary tags */
-  int n_local_tags;             /* number of non-propagated (local) tags */
-  int n_deleted_tags;           /* number of tags that were deleted */
-  int n_added_tags;             /* number of tags that were added */
-  int n_modified_tags;          /* number of tags that were modified */
-  int n_invalid_tags;           /* number of tags with bad keys or values (e.g.
-                                   longer than CENSUS_MAX_TAG_KV_LEN) */
-  int n_ignored_tags;           /* number of tags ignored because of
-                                   CENSUS_MAX_PROPAGATED_TAGS limit. */
+  int n_propagated_tags; /* number of propagated tags */
+  int n_local_tags;      /* number of non-propagated (local) tags */
+  int n_deleted_tags;    /* number of tags that were deleted */
+  int n_added_tags;      /* number of tags that were added */
+  int n_modified_tags;   /* number of tags that were modified */
+  int n_invalid_tags;    /* number of tags with bad keys or values (e.g.
+                            longer than CENSUS_MAX_TAG_KV_LEN) */
+  int n_ignored_tags;    /* number of tags ignored because of
+                            CENSUS_MAX_PROPAGATED_TAGS limit. */
 } census_context_status;
 
 /* Create a new context, adding and removing tags from an existing context.
@@ -132,10 +129,10 @@ typedef struct {
    to add as many tags in a single operation as is practical for the client.
    @param base Base context to build upon. Can be NULL.
    @param tags A set of tags to be added/changed/deleted. Tags with keys that
-   are in 'tags', but not 'base', are added to the tag set. Keys that are in
+   are in 'tags', but not 'base', are added to the context. Keys that are in
    both 'tags' and 'base' will have their value/flags modified. Tags with keys
-   in both, but with NULL or zero-length values, will be deleted from the tag
-   set. Tags with invalid (too long or short) keys or values will be ignored.
+   in both, but with NULL values, will be deleted from the context. Tags with
+   invalid (too long or short) keys or values will be ignored.
    If adding a tag will result in more than CENSUS_MAX_PROPAGATED_TAGS in either
    binary or non-binary tags, they will be ignored, as will deletions of
    tags that don't exist.
@@ -185,32 +182,19 @@ CENSUSAPI int census_context_get_tag(const census_context *context,
    for use by RPC systems only, for purposes of transmitting/receiving contexts.
    */
 
-/* Encode a context into a buffer. The propagated tags are encoded into the
-   buffer in two regions: one for printable tags, and one for binary tags.
+/* Encode a context into a buffer.
    @param context context to be encoded
-   @param buffer pointer to buffer. This address will be used to encode the
-                 printable tags.
+   @param buffer buffer into which the context will be encoded.
    @param buf_size number of available bytes in buffer.
-   @param print_buf_size Will be set to the number of bytes consumed by
-                         printable tags.
-   @param bin_buf_size Will be set to the number of bytes used to encode the
-                       binary tags.
-   @return A pointer to the binary tag's encoded, or NULL if the buffer was
-           insufficiently large to hold the encoded tags. Thus, if successful,
-           printable tags are encoded into
-           [buffer, buffer + *print_buf_size) and binary tags into
-           [returned-ptr, returned-ptr + *bin_buf_size) (and the returned
-           pointer should be buffer + *print_buf_size) */
-CENSUSAPI char *census_context_encode(const census_context *context,
-                                      char *buffer, size_t buf_size,
-                                      size_t *print_buf_size,
-                                      size_t *bin_buf_size);
-
-/* Decode context buffers encoded with census_context_encode(). Returns NULL
+   @return The number of buffer bytes consumed for the encoded context, or
+           zero if the buffer was of insufficient size. */
+CENSUSAPI size_t census_context_encode(const census_context *context,
+                                       char *buffer, size_t buf_size);
+
+/* Decode context buffer encoded with census_context_encode(). Returns NULL
    if there is an error in parsing either buffer. */
-CENSUSAPI census_context *census_context_decode(const char *buffer, size_t size,
-                                                const char *bin_buffer,
-                                                size_t bin_size);
+CENSUSAPI census_context *census_context_decode(const char *buffer,
+                                                size_t size);
 
 /* Distributed traces can have a number of options. */
 enum census_trace_mask_values {

+ 19 - 4
package.xml

@@ -10,11 +10,11 @@
   <email>grpc-packages@google.com</email>
   <active>yes</active>
  </lead>
- <date>2016-02-24</date>
+ <date>2016-03-01</date>
  <time>16:06:07</time>
  <version>
-  <release>0.8.0</release>
-  <api>0.8.0</api>
+  <release>0.14.0</release>
+  <api>0.14.0</api>
  </version>
  <stability>
   <release>beta</release>
@@ -22,7 +22,7 @@
  </stability>
  <license>BSD</license>
  <notes>
-- Simplify gRPC PHP installation #4517
+- Increase unit test code coverage #5225
  </notes>
  <contents>
   <dir baseinstalldir="/" name="/">
@@ -969,5 +969,20 @@ Update to wrap gRPC C Core version 0.10.0
 - Simplify gRPC PHP installation #4517
    </notes>
   </release>
+  <release>
+   <version>
+    <release>0.14.0</release>
+    <api>0.14.0</api>
+   </version>
+   <stability>
+    <release>beta</release>
+    <api>beta</api>
+   </stability>
+   <date>2016-03-01</date>
+   <license>BSD</license>
+   <notes>
+- Increase unit test code coverage #5225
+   </notes>
+  </release>
  </changelog>
 </package>

+ 64 - 85
src/core/census/context.c

@@ -60,10 +60,10 @@
 //   limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS.
 // * Keep all tag information (keys/values/flags) in a single memory buffer,
 //   that can be directly copied to the wire.
-// * Binary tags share the same structure as, but are encoded separately from,
-//   non-binary tags. This is primarily because non-binary tags are far more
-//   likely to be repeated across multiple RPC calls, so are more efficiently
-//   cached and compressed in any metadata schemes.
+
+// min and max valid chars in tag keys and values. All printable ASCII is OK.
+#define MIN_VALID_TAG_CHAR 32   // ' '
+#define MAX_VALID_TAG_CHAR 126  // '~'
 
 // Structure representing a set of tags. Essentially a count of number of tags
 // present, and pointer to a chunk of memory that contains the per-tag details.
@@ -77,7 +77,7 @@ struct tag_set {
   char *kvm;        // key/value memory. Consists of repeated entries of:
   //   Offset  Size  Description
   //     0      1    Key length, including trailing 0. (K)
-  //     1      1    Value length. (V)
+  //     1      1    Value length, including trailing 0 (V)
   //     2      1    Flags
   //     3      K    Key bytes
   //     3 + K  V    Value bytes
@@ -108,19 +108,36 @@ struct raw_tag {
 #define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED
 #define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED)
 
-// Primary (external) representation of a context. Composed of 3 underlying
-// tag_set structs, one for each of the binary/printable propagated tags, and
-// one for everything else. This is to efficiently support tag
-// encoding/decoding.
+// Primary representation of a context. Composed of 2 underlying tag_set
+// structs, one each for propagated and local (non-propagated) tags. This is
+// to efficiently support tag encoding/decoding.
+// TODO(aveitch): need to add tracing id's/structure.
 struct census_context {
-  struct tag_set tags[3];
+  struct tag_set tags[2];
   census_context_status status;
 };
 
 // Indices into the tags member of census_context
 #define PROPAGATED_TAGS 0
-#define PROPAGATED_BINARY_TAGS 1
-#define LOCAL_TAGS 2
+#define LOCAL_TAGS 1
+
+// Validate (check all characters are in range and size is less than limit) a
+// key or value string. Returns 0 if the string is invalid, or the length
+// (including terminator) if valid.
+static size_t validate_tag(const char *kv) {
+  size_t len = 1;
+  char ch;
+  while ((ch = *kv++) != 0) {
+    if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) {
+      return 0;
+    }
+    len++;
+  }
+  if (len > CENSUS_MAX_TAG_KV_LEN) {
+    return 0;
+  }
+  return len;
+}
 
 // Extract a raw tag given a pointer (raw) to the tag header. Allow for some
 // extra bytes in the tag header (see encode/decode functions for usage: this
@@ -166,9 +183,7 @@ static bool context_delete_tag(census_context *context, const census_tag *tag,
                                size_t key_len) {
   return (
       tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) ||
-      tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len) ||
-      tag_set_delete_tag(&context->tags[PROPAGATED_BINARY_TAGS], tag->key,
-                         key_len));
+      tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len));
 }
 
 // Add a tag to a tag_set. Return true on success, false if the tag could
@@ -176,11 +191,11 @@ static bool context_delete_tag(census_context *context, const census_tag *tag,
 // not be called if the tag may already exist (in a non-deleted state) in
 // the tag_set, as that would result in two tags with the same key.
 static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
-                            size_t key_len) {
+                            size_t key_len, size_t value_len) {
   if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) {
     return false;
   }
-  const size_t tag_size = key_len + tag->value_len + TAG_HEADER_SIZE;
+  const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE;
   if (tags->kvm_used + tag_size > tags->kvm_size) {
     // allocate new memory if needed
     tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE;
@@ -191,13 +206,12 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
   }
   char *kvp = tags->kvm + tags->kvm_used;
   *kvp++ = (char)key_len;
-  *kvp++ = (char)tag->value_len;
+  *kvp++ = (char)value_len;
   // ensure reserved flags are not used.
-  *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS |
-                                CENSUS_TAG_BINARY));
+  *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS));
   memcpy(kvp, tag->key, key_len);
   kvp += key_len;
-  memcpy(kvp, tag->value, tag->value_len);
+  memcpy(kvp, tag->value, value_len);
   tags->kvm_used += tag_size;
   tags->ntags++;
   tags->ntags_alloc++;
@@ -207,30 +221,20 @@ static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag,
 // Add/modify/delete a tag to/in a context. Caller must validate that tag key
 // etc. are valid.
 static void context_modify_tag(census_context *context, const census_tag *tag,
-                               size_t key_len) {
+                               size_t key_len, size_t value_len) {
   // First delete the tag if it is already present.
   bool deleted = context_delete_tag(context, tag, key_len);
-  // Determine if we need to add it back.
-  bool call_add = tag->value != NULL && tag->value_len != 0;
   bool added = false;
-  if (call_add) {
-    if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
-      if (CENSUS_TAG_IS_BINARY(tag->flags)) {
-        added = tag_set_add_tag(&context->tags[PROPAGATED_BINARY_TAGS], tag,
-                                key_len);
-      } else {
-        added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len);
-      }
-    } else {
-      added = tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len);
-    }
+  if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) {
+    added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len,
+                            value_len);
+  } else {
+    added =
+        tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len);
   }
+
   if (deleted) {
-    if (call_add) {
-      context->status.n_modified_tags++;
-    } else {
-      context->status.n_deleted_tags++;
-    }
+    context->status.n_modified_tags++;
   } else {
     if (added) {
       context->status.n_added_tags++;
@@ -292,8 +296,6 @@ census_context *census_context_create(const census_context *base,
     memset(context, 0, sizeof(census_context));
   } else {
     tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]);
-    tag_set_copy(&context->tags[PROPAGATED_BINARY_TAGS],
-                 &base->tags[PROPAGATED_BINARY_TAGS]);
     tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]);
     memset(&context->status, 0, sizeof(context->status));
   }
@@ -301,22 +303,29 @@ census_context *census_context_create(const census_context *base,
   // the context to add/replace/delete as required.
   for (int i = 0; i < ntags; i++) {
     const census_tag *tag = &tags[i];
-    size_t key_len = strlen(tag->key) + 1;
-    // ignore the tag if it is too long/short.
-    if (key_len != 1 && key_len <= CENSUS_MAX_TAG_KV_LEN &&
-        tag->value_len <= CENSUS_MAX_TAG_KV_LEN) {
-      context_modify_tag(context, tag, key_len);
-    } else {
+    size_t key_len = validate_tag(tag->key);
+    // ignore the tag if it is invalid or too short.
+    if (key_len <= 1) {
       context->status.n_invalid_tags++;
+    } else {
+      if (tag->value != NULL) {
+        size_t value_len = validate_tag(tag->value);
+        if (value_len != 0) {
+          context_modify_tag(context, tag, key_len, value_len);
+        } else {
+          context->status.n_invalid_tags++;
+        }
+      } else {
+        if (context_delete_tag(context, tag, key_len)) {
+          context->status.n_deleted_tags++;
+        }
+      }
     }
   }
   // Remove any deleted tags, update status if needed, and return.
   tag_set_flatten(&context->tags[PROPAGATED_TAGS]);
-  tag_set_flatten(&context->tags[PROPAGATED_BINARY_TAGS]);
   tag_set_flatten(&context->tags[LOCAL_TAGS]);
   context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
-  context->status.n_propagated_binary_tags =
-      context->tags[PROPAGATED_BINARY_TAGS].ntags;
   context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags;
   if (status) {
     *status = &context->status;
@@ -331,7 +340,6 @@ const census_context_status *census_context_get_status(
 
 void census_context_destroy(census_context *context) {
   gpr_free(context->tags[PROPAGATED_TAGS].kvm);
-  gpr_free(context->tags[PROPAGATED_BINARY_TAGS].kvm);
   gpr_free(context->tags[LOCAL_TAGS].kvm);
   gpr_free(context);
 }
@@ -343,9 +351,6 @@ void census_context_initialize_iterator(const census_context *context,
   if (context->tags[PROPAGATED_TAGS].ntags != 0) {
     iterator->base = PROPAGATED_TAGS;
     iterator->kvm = context->tags[PROPAGATED_TAGS].kvm;
-  } else if (context->tags[PROPAGATED_BINARY_TAGS].ntags != 0) {
-    iterator->base = PROPAGATED_BINARY_TAGS;
-    iterator->kvm = context->tags[PROPAGATED_BINARY_TAGS].kvm;
   } else if (context->tags[LOCAL_TAGS].ntags != 0) {
     iterator->base = LOCAL_TAGS;
     iterator->kvm = context->tags[LOCAL_TAGS].kvm;
@@ -363,7 +368,6 @@ int census_context_next_tag(census_context_iterator *iterator,
   iterator->kvm = decode_tag(&raw, iterator->kvm, 0);
   tag->key = raw.key;
   tag->value = raw.value;
-  tag->value_len = raw.value_len;
   tag->flags = raw.flags;
   if (++iterator->index == iterator->context->tags[iterator->base].ntags) {
     do {
@@ -388,7 +392,6 @@ static bool tag_set_get_tag(const struct tag_set *tags, const char *key,
     if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) {
       tag->key = raw.key;
       tag->value = raw.value;
-      tag->value_len = raw.value_len;
       tag->flags = raw.flags;
       return true;
     }
@@ -403,8 +406,6 @@ int census_context_get_tag(const census_context *context, const char *key,
     return 0;
   }
   if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) ||
-      tag_set_get_tag(&context->tags[PROPAGATED_BINARY_TAGS], key, key_len,
-                      tag) ||
       tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) {
     return 1;
   }
@@ -447,21 +448,9 @@ static size_t tag_set_encode(const struct tag_set *tags, char *buffer,
   return ENCODED_HEADER_SIZE + tags->kvm_used;
 }
 
-char *census_context_encode(const census_context *context, char *buffer,
-                            size_t buf_size, size_t *print_buf_size,
-                            size_t *bin_buf_size) {
-  *print_buf_size =
-      tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
-  if (*print_buf_size == 0) {
-    return NULL;
-  }
-  char *b_buffer = buffer + *print_buf_size;
-  *bin_buf_size = tag_set_encode(&context->tags[PROPAGATED_BINARY_TAGS],
-                                 b_buffer, buf_size - *print_buf_size);
-  if (*bin_buf_size == 0) {
-    return NULL;
-  }
-  return b_buffer;
+size_t census_context_encode(const census_context *context, char *buffer,
+                             size_t buf_size) {
+  return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size);
 }
 
 // Decode a tag set.
@@ -506,8 +495,7 @@ static void tag_set_decode(struct tag_set *tags, const char *buffer,
   }
 }
 
-census_context *census_context_decode(const char *buffer, size_t size,
-                                      const char *bin_buffer, size_t bin_size) {
+census_context *census_context_decode(const char *buffer, size_t size) {
   census_context *context = gpr_malloc(sizeof(census_context));
   memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set));
   if (buffer == NULL) {
@@ -515,16 +503,7 @@ census_context *census_context_decode(const char *buffer, size_t size,
   } else {
     tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size);
   }
-  if (bin_buffer == NULL) {
-    memset(&context->tags[PROPAGATED_BINARY_TAGS], 0, sizeof(struct tag_set));
-  } else {
-    tag_set_decode(&context->tags[PROPAGATED_BINARY_TAGS], bin_buffer,
-                   bin_size);
-  }
   memset(&context->status, 0, sizeof(context->status));
   context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags;
-  context->status.n_propagated_binary_tags =
-      context->tags[PROPAGATED_BINARY_TAGS].ntags;
-  // TODO(aveitch): check that BINARY flag is correct for each type.
   return context;
 }

+ 0 - 10
src/csharp/Grpc.Core/Internal/AsyncCallServer.cs

@@ -193,16 +193,6 @@ namespace Grpc.Core.Internal
             lock (myLock)
             {
                 finished = true;
-
-                if (cancelled)
-                {
-                    // Once we cancel, we don't have to care that much 
-                    // about reads and writes.
-
-                    // TODO(jtattermusch): is this still necessary?
-                    Cancel();
-                }
-
                 ReleaseResourcesIfPossible();
             }
             // TODO(jtattermusch): handle error

+ 1 - 3
src/csharp/Grpc.IntegrationTesting/InteropClientServerTest.cs

@@ -1,6 +1,6 @@
 #region Copyright notice and license
 
-// Copyright 2015, Google Inc.
+// Copyright 2015-2016, Google Inc.
 // All rights reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -140,14 +140,12 @@ namespace Grpc.IntegrationTesting
         }
 
         [Test]
-        [Ignore("TODO: see #4427")]
         public async Task StatusCodeAndMessage()
         {
             await InteropClient.RunStatusCodeAndMessageAsync(client);
         }
 
         [Test]
-        [Ignore("TODO: see #4427")]
         public void UnimplementedMethod()
         {
             InteropClient.RunUnimplementedMethod(UnimplementedService.NewClient(channel));

+ 14 - 3
src/csharp/Grpc.Tools.nuspec

@@ -4,18 +4,29 @@
     <id>Grpc.Tools</id>
     <title>gRPC C# Tools</title>
     <summary>Tools for C# implementation of gRPC - an RPC library and framework</summary>
-    <description>Precompiled Windows binary for generating gRPC client/server code</description>
+    <description>Precompiled protobuf compiler and gRPC protobuf compiler plugin for generating gRPC client/server C# code. Binaries are available for Windows, Linux and MacOS.</description>
     <version>$version$</version>
     <authors>Google Inc.</authors>
     <owners>grpc-packages</owners>
     <licenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</licenseUrl>
     <projectUrl>https://github.com/grpc/grpc</projectUrl>
     <requireLicenseAcceptance>false</requireLicenseAcceptance>
-    <releaseNotes>grpc_csharp_plugin.exe - gRPC C# protoc plugin version $version$</releaseNotes>
+    <releaseNotes>Release $version$</releaseNotes>
     <copyright>Copyright 2015, Google Inc.</copyright>
     <tags>gRPC RPC Protocol HTTP/2</tags>
   </metadata>
   <files>
-    <file src="..\..\vsprojects\Release\grpc_csharp_plugin.exe" target="tools" />
+    <file src="protoc_plugins\windows_x86\protoc.exe" target="tools\windows_x86\protoc.exe" />
+    <file src="protoc_plugins\windows_x86\grpc_csharp_plugin.exe" target="tools\windows_x86\grpc_csharp_plugin.exe" />
+    <file src="protoc_plugins\windows_x64\protoc.exe" target="tools\windows_x64\protoc.exe" />
+    <file src="protoc_plugins\windows_x64\grpc_csharp_plugin.exe" target="tools\windows_x64\grpc_csharp_plugin.exe" />
+    <file src="protoc_plugins\linux_x86\protoc" target="tools\linux_x86\protoc" />
+    <file src="protoc_plugins\linux_x86\grpc_csharp_plugin" target="tools\linux_x86\grpc_csharp_plugin" />
+    <file src="protoc_plugins\linux_x64\protoc" target="tools\linux_x64\protoc" />
+    <file src="protoc_plugins\linux_x64\grpc_csharp_plugin" target="tools\linux_x64\grpc_csharp_plugin" />
+    <file src="protoc_plugins\macosx_x86\protoc" target="tools\macosx_x86\protoc" />
+    <file src="protoc_plugins\macosx_x86\grpc_csharp_plugin" target="tools\macosx_x86\grpc_csharp_plugin" />
+    <file src="protoc_plugins\macosx_x64\protoc" target="tools\macosx_x64\protoc" />
+    <file src="protoc_plugins\macosx_x64\grpc_csharp_plugin" target="tools\macosx_x64\grpc_csharp_plugin" />
   </files>
 </package>

+ 10 - 7
src/csharp/build_packages.bat

@@ -19,6 +19,14 @@ xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=linux\artifacts\* gr
 xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* grpc.native.csharp\macosx_x86\
 xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* grpc.native.csharp\macosx_x64\
 
+@rem Collect protoc artifacts built by the previous build step
+xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x86\
+xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x64\
+xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=linux\artifacts\* protoc_plugins\linux_x86\
+xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=linux\artifacts\* protoc_plugins\linux_x64\
+xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=macos\artifacts\* protoc_plugins\macosx_x86\
+xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=macos\artifacts\* protoc_plugins\macosx_x64\
+
 @rem Fetch all dependencies
 %NUGET% restore ..\..\vsprojects\grpc_csharp_ext.sln || goto :error
 %NUGET% restore Grpc.sln || goto :error
@@ -27,24 +35,19 @@ setlocal
 
 @call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" x86
 
-@rem We won't use the native libraries from this step, but without this Grpc.sln will fail.  
+@rem We won't use the native libraries from this step, but without this Grpc.sln will fail.
 msbuild ..\..\vsprojects\grpc_csharp_ext.sln /p:Configuration=Release /p:PlatformToolset=v120 || goto :error
 
 msbuild Grpc.sln /p:Configuration=ReleaseSigned || goto :error
 
 endlocal
 
-@rem TODO(jtattermusch): re-enable protoc plugin building
-@rem @call ..\..\vsprojects\build_plugins.bat || goto :error
-
 %NUGET% pack grpc.native.csharp\grpc.native.csharp.nuspec -Version %VERSION% || goto :error
 %NUGET% pack Grpc.Auth\Grpc.Auth.nuspec -Symbols -Version %VERSION% || goto :error
 %NUGET% pack Grpc.Core\Grpc.Core.nuspec -Symbols -Version %VERSION% || goto :error
 %NUGET% pack Grpc.HealthCheck\Grpc.HealthCheck.nuspec -Symbols -Version %VERSION_WITH_BETA% -Properties ProtobufVersion=%PROTOBUF_VERSION% || goto :error
 %NUGET% pack Grpc.nuspec -Version %VERSION% || goto :error
-
-@rem TODO(jtattermusch): re-enable building Grpc.Tools package
-@rem %NUGET% pack Grpc.Tools.nuspec -Version %VERSION% || goto :error
+%NUGET% pack Grpc.Tools.nuspec -Version %VERSION% || goto :error
 
 @rem copy resulting nuget packages to artifacts directory
 xcopy /Y /I *.nupkg ..\..\artifacts\

+ 0 - 36
src/php/README.md

@@ -33,45 +33,12 @@ $ sudo mv phpunit.phar /usr/local/bin/phpunit
 
 ## Quick Install
 
-**Linux (Debian):**
-
-Add [Debian jessie-backports][] to your `sources.list` file. Example:
-
-```sh
-echo "deb http://http.debian.net/debian jessie-backports main" | \
-sudo tee -a /etc/apt/sources.list
-```
-
-Install the gRPC Debian package
-
-```sh
-sudo apt-get update
-sudo apt-get install libgrpc-dev
-```
-
 Install the gRPC PHP extension
 
 ```sh
 sudo pecl install grpc-beta
 ```
 
-**Mac OS X:**
-
-Install [homebrew][]. Example:
-
-```sh
-ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-```
-
-Install the gRPC core library and the PHP extension in one step
-
-```sh
-$ curl -fsSL https://goo.gl/getgrpc | bash -s php
-```
-
-This will download and run the [gRPC install script][] and compile the gRPC PHP extension.
-
-
 ## Build from Source
 
 Clone this repository
@@ -297,7 +264,4 @@ Connect to `localhost/math_client.php` in your browser, or run this from command
 $ curl localhost/math_client.php
 ```
 
-[homebrew]:http://brew.sh
-[gRPC install script]:https://raw.githubusercontent.com/grpc/homebrew-grpc/master/scripts/install
 [Node]:https://github.com/grpc/grpc/tree/master/src/node/examples
-[Debian jessie-backports]:http://backports.debian.org/Instructions/

+ 5 - 4
src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi

@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -89,12 +89,13 @@ cdef class Channel:
 
   def check_connectivity_state(self, bint try_to_connect):
     return grpc_channel_check_connectivity_state(self.c_channel,
-                                                      try_to_connect)
+                                                 try_to_connect)
 
   def watch_connectivity_state(
-      self, last_observed_state, Timespec deadline not None,
-      CompletionQueue queue not None, tag):
+      self, grpc_connectivity_state last_observed_state,
+      Timespec deadline not None, CompletionQueue queue not None, tag):
     cdef OperationTag operation_tag = OperationTag(tag)
+    operation_tag.references = [self, queue]
     cpython.Py_INCREF(operation_tag)
     grpc_channel_watch_connectivity_state(
         self.c_channel, last_observed_state, deadline.c_time,

+ 8 - 4
src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi

@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -137,10 +137,14 @@ cdef class CompletionQueue:
       pass
 
   def __dealloc__(self):
+    cdef gpr_timespec c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
     if self.c_completion_queue != NULL:
-      # Ensure shutdown, pump the queue
+      # Ensure shutdown
       if not self.is_shutting_down:
-        self.shutdown()
+        grpc_completion_queue_shutdown(self.c_completion_queue)
+      # Pump the queue
       while not self.is_shutdown:
-        self.poll()
+        event = grpc_completion_queue_next(
+            self.c_completion_queue, c_deadline, NULL)
+        self._interpret_event(event)
       grpc_completion_queue_destroy(self.c_completion_queue)

+ 2 - 1
src/python/grpcio/grpc/_cython/_cygrpc/server.pxd.pxi

@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -39,4 +39,5 @@ cdef class Server:
   cdef list references
   cdef list registered_completion_queues
 
+  cdef _c_shutdown(self, CompletionQueue queue, tag)
   cdef notify_shutdown_complete(self)

+ 13 - 10
src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi

@@ -1,4 +1,4 @@
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -102,6 +102,16 @@ cdef class Server:
     else:
       return grpc_server_add_insecure_http2_port(self.c_server, address)
 
+  cdef _c_shutdown(self, CompletionQueue queue, tag):
+    self.is_shutting_down = True
+    operation_tag = OperationTag(tag)
+    operation_tag.shutting_down_server = self
+    operation_tag.references.extend([self, queue])
+    cpython.Py_INCREF(operation_tag)
+    grpc_server_shutdown_and_notify(
+        self.c_server, queue.c_completion_queue,
+        <cpython.PyObject *>operation_tag)
+
   def shutdown(self, CompletionQueue queue not None, tag):
     cdef OperationTag operation_tag
     if queue.is_shutting_down:
@@ -113,14 +123,7 @@ cdef class Server:
     elif queue not in self.registered_completion_queues:
       raise ValueError("expected registered completion queue")
     else:
-      self.is_shutting_down = True
-      operation_tag = OperationTag(tag)
-      operation_tag.shutting_down_server = self
-      operation_tag.references.extend([self, queue])
-      cpython.Py_INCREF(operation_tag)
-      grpc_server_shutdown_and_notify(
-          self.c_server, queue.c_completion_queue,
-          <cpython.PyObject *>operation_tag)
+      self._c_shutdown(queue, tag)
 
   cdef notify_shutdown_complete(self):
     # called only by a completion queue on receiving our shutdown operation tag
@@ -142,7 +145,7 @@ cdef class Server:
         pass
       elif not self.is_shutting_down:
         # the user didn't call shutdown - use our backup queue
-        self.shutdown(self.backup_shutdown_queue, None)
+        self._c_shutdown(self.backup_shutdown_queue, None)
         # and now we wait
         while not self.is_shutdown:
           self.backup_shutdown_queue.poll()

+ 2 - 2
src/python/grpcio/grpc/_cython/imports.generated.h

@@ -91,10 +91,10 @@ extern census_context_next_tag_type census_context_next_tag_import;
 typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag);
 extern census_context_get_tag_type census_context_get_tag_import;
 #define census_context_get_tag census_context_get_tag_import
-typedef char *(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size, size_t *print_buf_size, size_t *bin_buf_size);
+typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size);
 extern census_context_encode_type census_context_encode_import;
 #define census_context_encode census_context_encode_import
-typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size, const char *bin_buffer, size_t bin_size);
+typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size);
 extern census_context_decode_type census_context_decode_import;
 #define census_context_decode census_context_decode_import
 typedef int(*census_trace_mask_type)(const census_context *context);

+ 2 - 2
src/ruby/ext/grpc/rb_grpc_imports.generated.h

@@ -91,10 +91,10 @@ extern census_context_next_tag_type census_context_next_tag_import;
 typedef int(*census_context_get_tag_type)(const census_context *context, const char *key, census_tag *tag);
 extern census_context_get_tag_type census_context_get_tag_import;
 #define census_context_get_tag census_context_get_tag_import
-typedef char *(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size, size_t *print_buf_size, size_t *bin_buf_size);
+typedef size_t(*census_context_encode_type)(const census_context *context, char *buffer, size_t buf_size);
 extern census_context_encode_type census_context_encode_import;
 #define census_context_encode census_context_encode_import
-typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size, const char *bin_buffer, size_t bin_size);
+typedef census_context *(*census_context_decode_type)(const char *buffer, size_t size);
 extern census_context_decode_type census_context_decode_import;
 #define census_context_decode census_context_decode_import
 typedef int(*census_trace_mask_type)(const census_context *context);

+ 69 - 42
templates/README.md

@@ -6,78 +6,92 @@ was going to single handedly cover all of our usage cases.
 
 So instead we decided to work the following way:
 
-* A build.json file at the root is the source of truth for listing all of the
-target and files needed to build grpc and its tests, as well as basic system
-dependencies description.
+* A `build.yaml` file at the root is the source of truth for listing all the
+targets and files needed to build grpc and its tests, as well as a basic system
+for dependency description.
 
 * Each project file (Makefile, Visual Studio project files, Bazel's BUILD) is
-a plain-text template that uses the build.json file to generate the final
-output file.
+a [YAML](http://yaml.org) file used by the `build.yaml` file to generate the
+final output file.
 
 This way we can maintain as many project system as we see fit, without having
 to manually maintain them when we add or remove new code to the repository.
 Only the structure of the project file is relevant to the template. The actual
 list of source code and targets isn't.
 
-We currently have template files for GNU Make, Visual Studio 2010 to 2015,
-and Bazel. In the future, we would like to expand to generating gyp or cmake
-project files (or potentially both), XCode project files, and an Android.mk
-file to be able to compile gRPC using Android's NDK.
+We currently have template files for GNU Make, Visual Studio 2013,
+[Bazel](http://bazel.io) and [gyp](https://gyp.gsrc.io/) (albeit only for
+Node.js). In the future, we
+would like to expand to also generate [cmake](https://cmake.org)
+project files, XCode project files, and an Android.mk file allowing to compile
+gRPC using Android's NDK.
 
 We'll gladly accept contribution that'd create additional project files
 using that system.
 
-# Structure of build.json
+# Structure of `build.yaml`
 
-The build.json file has the following structure:
+The `build.yaml` file has the following structure:
 
 ```
-{
-  "settings": { ... },   # global settings, such as version number
-  "filegroups": [ ... ], # groups of file that is automatically expanded
-  "libs": [ ... ],       # list of libraries to build
-  "targets": [ ... ],    # list of targets to build
-}
+settings:  # global settings, such as version number
+  ...
+filegroups:  # groups of files that are automatically expanded
+  ...
+libs:  # list of libraries to build
+  ...
+target:   # list of targets to build
+  ...
 ```
 
 The `filegroups` are helpful to re-use a subset of files in multiple targets.
 One `filegroups` entry has the following structure:
 
 ```
-{
-  "name": "arbitrary string", # the name of the filegroup
-  "public_headers": [ ... ],  # list of public headers defined in that filegroup
-  "headers": [ ... ],         # list of headers defined in that filegroup
-  "src": [ ... ],             # list of source files defined in that filegroup
-}
+- name: "arbitrary string", # the name of the filegroup
+  public_headers: # list of public headers defined in that filegroup
+  - ...
+  headers: # list of headers defined in that filegroup
+  - ...
+  src: # list of source files defined in that filegroup
+  - ...
 ```
 
-The `libs` array contains the list of all the libraries we describe. Some may be
+The `libs` collection contains the list of all the libraries we describe. Some may be
 helper libraries for the tests. Some may be installable libraries. Some may be
 helper libraries for installable binaries.
 
 The `targets` array contains the list of all the binary targets we describe. Some may
 be installable binaries.
 
-One `libs` or `targets` entry has the following structure:
+One `libs` or `targets` entry has the following structure (see below for
+details):
 
 ```
-{
-  "name": "arbitrary string", # the name of the library
-  "build": "build type",      # in which situation we want that library to be
-                              # built and potentially installed
-  "language": "...",          # the language tag; "c" or "c++"
-  "public_headers": [ ... ],  # list of public headers to install
-  "headers": [ ... ],         # list of headers used by that target
-  "src": [ ... ],             # list of files to compile
-  "secure": "...",            # "yes", "no" or "check"
-  "baselib": boolean,         # this is a low level library that has system
-                              # dependencies
-  "vs_project_guid: "...",    # Visual Studio's unique guid for that project
-  "filegroups": [ ... ],      # list of filegroups to merge to that project
-                              # note that this will be expanded automatically
-  "deps": [ ... ],            # list of libraries this target depends on
-}
+name: "arbitrary string", # the name of the library
+build: "build type",      # in which situation we want that library to be
+                          # built and potentially installed (see below).
+language: "...",          # the language tag; "c" or "c++"
+public_headers:           # list of public headers to install
+headers:                  # list of headers used by that target
+src:                      # list of files to compile
+secure: boolean,          # see below
+baselib: boolean,         # this is a low level library that has system
+                          # dependencies
+vs_project_guid: '{...}', # Visual Studio's unique guid for that project
+filegroups:               # list of filegroups to merge to that project
+                          # note that this will be expanded automatically
+deps:                     # list of libraries this target depends on
+deps_linkage: "..."       # "static"  or "dynamic". Used by the Makefile only to
+                          # determine the way dependencies are linkned. Defaults
+                          # to "dynamic".
+dll: "..."                # see below.
+dll_def: "..."            # Visual Studio's dll definition file.
+vs_props:                 # List of property sheets to attach to that project.
+vs_config_type: "..."     # DynamicLibrary/StaticLibrary. Used only when
+                          # creating a library. Specifies if we're building a
+                          # static library or a dll. Use in conjunction with `dll_def`.
+vs_packages:              # List of nuget packages this project depends on.
 ```
 
 ## The `"build"` tag
@@ -86,8 +100,9 @@ Currently, the "`build`" tag have these meanings:
 
 * `"all"`: library to build on `"make all"`, and install on the system.
 * `"protoc"`: a protoc plugin to build on `"make all"` and install on the system.
-* `"priviate"`: a library to only build for tests.
+* `"private"`: a library to only build for tests.
 * `"test"`: a test binary to run on `"make test"`.
+* `"tool"`: a binary to be built upon `"make tools"`.
 
 All of the targets should always be present in the generated project file, if
 possible and applicable. But the build tag is what should group the targets
@@ -111,6 +126,18 @@ should merge OpenSSL, protobuf or zlib inside that library. That effect depends
 on the `"language"` tag. OpenSSL and zlib are for `"c"` libraries, while
 protobuf is for `"c++"` ones.
 
+## The `"dll"` tag
+
+Used only by Visual Studio's project files. "true" means the project will be
+built with both static and dynamic runtimes. "false" means it'll only be built
+with static runtime. "only" means it'll only be built with the dll runtime.
+
+## The `"dll_def"` tag
+
+Specifies the visual studio's dll definition file. When creating a DLL, you
+sometimes (not always) need a def file (see grpc.def).
+
+
 # The template system
 
 We're currently using the [mako templates](http://www.makotemplates.org/)

+ 19 - 4
templates/package.xml.template

@@ -12,11 +12,11 @@
     <email>grpc-packages@google.com</email>
     <active>yes</active>
    </lead>
-   <date>2016-02-24</date>
+   <date>2016-03-01</date>
    <time>16:06:07</time>
    <version>
-    <release>0.8.0</release>
-    <api>0.8.0</api>
+    <release>0.14.0</release>
+    <api>0.14.0</api>
    </version>
    <stability>
     <release>beta</release>
@@ -24,7 +24,7 @@
    </stability>
    <license>BSD</license>
    <notes>
-  - Simplify gRPC PHP installation #4517
+  - Increase unit test code coverage #5225
    </notes>
    <contents>
     <dir baseinstalldir="/" name="/">
@@ -155,5 +155,20 @@
   - Simplify gRPC PHP installation #4517
      </notes>
     </release>
+    <release>
+     <version>
+      <release>0.14.0</release>
+      <api>0.14.0</api>
+     </version>
+     <stability>
+      <release>beta</release>
+      <api>beta</api>
+     </stability>
+     <date>2016-03-01</date>
+     <license>BSD</license>
+     <notes>
+  - Increase unit test code coverage #5225
+     </notes>
+    </release>
    </changelog>
   </package>

+ 10 - 7
templates/src/csharp/build_packages.bat.template

@@ -21,6 +21,14 @@
   xcopy /Y /I ..\..\architecture=x86,language=csharp,platform=macos\artifacts\* grpc.native.csharp\macosx_x86${"\\"}
   xcopy /Y /I ..\..\architecture=x64,language=csharp,platform=macos\artifacts\* grpc.native.csharp\macosx_x64${"\\"}
   
+  @rem Collect protoc artifacts built by the previous build step
+  xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=windows\artifacts\* protoc_plugins\windows_x64${"\\"}
+  xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=linux\artifacts\* protoc_plugins\linux_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=linux\artifacts\* protoc_plugins\linux_x64${"\\"}
+  xcopy /Y /I ..\..\architecture=x86,language=protoc,platform=macos\artifacts\* protoc_plugins\macosx_x86${"\\"}
+  xcopy /Y /I ..\..\architecture=x64,language=protoc,platform=macos\artifacts\* protoc_plugins\macosx_x64${"\\"}
+  
   @rem Fetch all dependencies
   %%NUGET% restore ..\..\vsprojects\grpc_csharp_ext.sln || goto :error
   %%NUGET% restore Grpc.sln || goto :error
@@ -29,24 +37,19 @@
   
   @call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" x86
   
-  @rem We won't use the native libraries from this step, but without this Grpc.sln will fail.  
+  @rem We won't use the native libraries from this step, but without this Grpc.sln will fail.
   msbuild ..\..\vsprojects\grpc_csharp_ext.sln /p:Configuration=Release /p:PlatformToolset=v120 || goto :error
   
   msbuild Grpc.sln /p:Configuration=ReleaseSigned || goto :error
   
   endlocal
   
-  @rem TODO(jtattermusch): re-enable protoc plugin building
-  @rem @call ..\..\vsprojects\build_plugins.bat || goto :error
-  
   %%NUGET% pack grpc.native.csharp\grpc.native.csharp.nuspec -Version %VERSION% || goto :error
   %%NUGET% pack Grpc.Auth\Grpc.Auth.nuspec -Symbols -Version %VERSION% || goto :error
   %%NUGET% pack Grpc.Core\Grpc.Core.nuspec -Symbols -Version %VERSION% || goto :error
   %%NUGET% pack Grpc.HealthCheck\Grpc.HealthCheck.nuspec -Symbols -Version %VERSION_WITH_BETA% -Properties ProtobufVersion=%PROTOBUF_VERSION% || goto :error
   %%NUGET% pack Grpc.nuspec -Version %VERSION% || goto :error
-  
-  @rem TODO(jtattermusch): re-enable building Grpc.Tools package
-  @rem %NUGET% pack Grpc.Tools.nuspec -Version %VERSION% || goto :error
+  %%NUGET% pack Grpc.Tools.nuspec -Version %VERSION% || goto :error
   
   @rem copy resulting nuget packages to artifacts directory
   xcopy /Y /I *.nupkg ..\..\artifacts${"\\"}

+ 71 - 66
test/core/census/context_test.c

@@ -42,60 +42,48 @@
 #include <string.h>
 #include "test/core/util/test_config.h"
 
-static uint8_t one_byte_val = 7;
-static uint32_t four_byte_val = 0x12345678;
-static uint64_t eight_byte_val = 0x1234567890abcdef;
-
-// A set of tags Used to create a basic context for testing. Each tag has a
-// unique set of flags. Note that replace_add_delete_test() relies on specific
-// offsets into this array - if you add or delete entries, you will also need
-// to change the test.
+// A set of tags Used to create a basic context for testing. Note that
+// replace_add_delete_test() relies on specific offsets into this array - if
+// you add or delete entries, you will also need to change the test.
 #define BASIC_TAG_COUNT 8
 static census_tag basic_tags[BASIC_TAG_COUNT] = {
-    /* 0 */ {"key0", "printable", 10, 0},
-    /* 1 */ {"k1", "a", 2, CENSUS_TAG_PROPAGATE},
-    /* 2 */ {"k2", "longer printable string", 24, CENSUS_TAG_STATS},
-    /* 3 */ {"key_three", (char *)&one_byte_val, 1, CENSUS_TAG_BINARY},
-    /* 4 */ {"really_long_key_4", "random", 7,
+    /* 0 */ {"key0", "tag value", 0},
+    /* 1 */ {"k1", "a", CENSUS_TAG_PROPAGATE},
+    /* 2 */ {"k2", "a longer tag value supercalifragilisticexpialiadocious",
+             CENSUS_TAG_STATS},
+    /* 3 */ {"key_three", "", 0},
+    /* 4 */ {"a_really_really_really_really_long_key_4", "random",
              CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS},
-    /* 5 */ {"k5", (char *)&four_byte_val, 4,
-             CENSUS_TAG_PROPAGATE | CENSUS_TAG_BINARY},
-    /* 6 */ {"k6", (char *)&eight_byte_val, 8,
-             CENSUS_TAG_STATS | CENSUS_TAG_BINARY},
-    /* 7 */ {"k7", (char *)&four_byte_val, 4,
-             CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS | CENSUS_TAG_BINARY}};
+    /* 5 */ {"k5", "v5", CENSUS_TAG_PROPAGATE},
+    /* 6 */ {"k6", "v6", CENSUS_TAG_STATS},
+    /* 7 */ {"k7", "v7", CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS}};
 
 // Set of tags used to modify the basic context. Note that
 // replace_add_delete_test() relies on specific offsets into this array - if
 // you add or delete entries, you will also need to change the test. Other
 // tests that rely on specific instances have XXX_XXX_OFFSET definitions (also
 // change the defines below if you add/delete entires).
-#define MODIFY_TAG_COUNT 11
+#define MODIFY_TAG_COUNT 10
 static census_tag modify_tags[MODIFY_TAG_COUNT] = {
 #define REPLACE_VALUE_OFFSET 0
-    /* 0 */ {"key0", "replace printable", 18, 0},  // replaces tag value only
+    /* 0 */ {"key0", "replace key0", 0},  // replaces tag value only
 #define ADD_TAG_OFFSET 1
-    /* 1 */ {"new_key", "xyzzy", 6, CENSUS_TAG_STATS},  // new tag
+    /* 1 */ {"new_key", "xyzzy", CENSUS_TAG_STATS},  // new tag
 #define DELETE_TAG_OFFSET 2
-    /* 2 */ {"k5", NULL, 5,
-             0},  // should delete tag, despite bogus value length
-    /* 3 */ {"k6", "foo", 0, 0},  // should delete tag, despite bogus value
-    /* 4 */ {"k6", "foo", 0, 0},  // try deleting already-deleted tag
-    /* 5 */ {"non-existent", NULL, 0, 0},  // another non-existent tag
-#define REPLACE_FLAG_OFFSET 6
-    /* 6 */ {"k1", "a", 2, 0},                   // change flags only
-    /* 7 */ {"k7", "bar", 4, CENSUS_TAG_STATS},  // change flags and value
-    /* 8 */ {"k2", (char *)&eight_byte_val, 8,
-             CENSUS_TAG_BINARY | CENSUS_TAG_PROPAGATE},  // more flags change
-                                                         // non-binary -> binary
-    /* 9 */ {"k6", "bar", 4, 0},  // add back tag, with different value
-    /* 10 */ {"foo", "bar", 4, CENSUS_TAG_PROPAGATE},  // another new tag
+    /* 2 */ {"k5", NULL, 0},            // should delete tag
+    /* 3 */ {"k5", NULL, 0},            // try deleting already-deleted tag
+    /* 4 */ {"non-existent", NULL, 0},  // delete non-existent tag
+#define REPLACE_FLAG_OFFSET 5
+    /* 5 */ {"k1", "a", 0},                    // change flags only
+    /* 6 */ {"k7", "bar", CENSUS_TAG_STATS},   // change flags and value
+    /* 7 */ {"k2", "", CENSUS_TAG_PROPAGATE},  // more value and flags change
+    /* 8 */ {"k5", "bar", 0},  // add back tag, with different value
+    /* 9 */ {"foo", "bar", CENSUS_TAG_PROPAGATE},  // another new tag
 };
 
 // Utility function to compare tags. Returns true if all fields match.
 static bool compare_tag(const census_tag *t1, const census_tag *t2) {
-  return (strcmp(t1->key, t2->key) == 0 && t1->value_len == t2->value_len &&
-          memcmp(t1->value, t2->value, t1->value_len) == 0 &&
+  return (strcmp(t1->key, t2->key) == 0 && strcmp(t1->value, t2->value) == 0 &&
           t1->flags == t2->flags);
 }
 
@@ -111,7 +99,7 @@ static void empty_test(void) {
   struct census_context *context = census_context_create(NULL, NULL, 0, NULL);
   GPR_ASSERT(context != NULL);
   const census_context_status *status = census_context_get_status(context);
-  census_context_status expected = {0, 0, 0, 0, 0, 0, 0, 0};
+  census_context_status expected = {0, 0, 0, 0, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_destroy(context);
 }
@@ -121,7 +109,7 @@ static void basic_test(void) {
   const census_context_status *status;
   struct census_context *context =
       census_context_create(NULL, basic_tags, BASIC_TAG_COUNT, &status);
-  census_context_status expected = {2, 2, 4, 0, 8, 0, 0, 0};
+  census_context_status expected = {4, 4, 0, 8, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_iterator it;
   census_context_initialize_iterator(context, &it);
@@ -161,15 +149,18 @@ static void invalid_test(void) {
   memset(key, 'k', 299);
   key[299] = 0;
   char value[300];
-  memset(value, 'v', 300);
-  census_tag tag = {key, value, 3, CENSUS_TAG_BINARY};
+  memset(value, 'v', 299);
+  value[299] = 0;
+  census_tag tag = {key, value, 0};
   // long keys, short value. Key lengths (including terminator) should be
   // <= 255 (CENSUS_MAX_TAG_KV_LEN)
+  value[3] = 0;
+  GPR_ASSERT(strlen(value) == 3);
   GPR_ASSERT(strlen(key) == 299);
   const census_context_status *status;
   struct census_context *context =
       census_context_create(NULL, &tag, 1, &status);
-  census_context_status expected = {0, 0, 0, 0, 0, 0, 1, 0};
+  census_context_status expected = {0, 0, 0, 0, 0, 1, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_destroy(context);
   key[CENSUS_MAX_TAG_KV_LEN] = 0;
@@ -180,24 +171,44 @@ static void invalid_test(void) {
   key[CENSUS_MAX_TAG_KV_LEN - 1] = 0;
   GPR_ASSERT(strlen(key) == CENSUS_MAX_TAG_KV_LEN - 1);
   context = census_context_create(NULL, &tag, 1, &status);
-  census_context_status expected2 = {0, 0, 1, 0, 1, 0, 0, 0};
+  census_context_status expected2 = {0, 1, 0, 1, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected2, sizeof(expected2)) == 0);
   census_context_destroy(context);
   // now try with long values
-  tag.value_len = 300;
+  value[3] = 'v';
+  GPR_ASSERT(strlen(value) == 299);
   context = census_context_create(NULL, &tag, 1, &status);
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_destroy(context);
-  tag.value_len = CENSUS_MAX_TAG_KV_LEN + 1;
+  value[CENSUS_MAX_TAG_KV_LEN] = 0;
+  GPR_ASSERT(strlen(value) == CENSUS_MAX_TAG_KV_LEN);
   context = census_context_create(NULL, &tag, 1, &status);
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_destroy(context);
-  tag.value_len = CENSUS_MAX_TAG_KV_LEN;
+  value[CENSUS_MAX_TAG_KV_LEN - 1] = 0;
+  GPR_ASSERT(strlen(value) == CENSUS_MAX_TAG_KV_LEN - 1);
   context = census_context_create(NULL, &tag, 1, &status);
   GPR_ASSERT(memcmp(status, &expected2, sizeof(expected2)) == 0);
   census_context_destroy(context);
   // 0 length key.
   key[0] = 0;
+  GPR_ASSERT(strlen(key) == 0);
+  context = census_context_create(NULL, &tag, 1, &status);
+  GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
+  census_context_destroy(context);
+  // invalid key character
+  key[0] = 31;  // 32 (' ') is the first valid character value
+  key[1] = 0;
+  GPR_ASSERT(strlen(key) == 1);
+  context = census_context_create(NULL, &tag, 1, &status);
+  GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
+  census_context_destroy(context);
+  // invalid value character
+  key[0] = ' ';
+  value[5] = 127;  // 127 (DEL) is ('~' + 1)
+  value[8] = 0;
+  GPR_ASSERT(strlen(key) == 1);
+  GPR_ASSERT(strlen(value) == 8);
   context = census_context_create(NULL, &tag, 1, &status);
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_context_destroy(context);
@@ -210,7 +221,7 @@ static void copy_test(void) {
   const census_context_status *status;
   struct census_context *context2 =
       census_context_create(context, NULL, 0, &status);
-  census_context_status expected = {2, 2, 4, 0, 0, 0, 0, 0};
+  census_context_status expected = {4, 4, 0, 0, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   for (int i = 0; i < BASIC_TAG_COUNT; i++) {
     census_tag tag;
@@ -228,7 +239,7 @@ static void replace_value_test(void) {
   const census_context_status *status;
   struct census_context *context2 = census_context_create(
       context, modify_tags + REPLACE_VALUE_OFFSET, 1, &status);
-  census_context_status expected = {2, 2, 4, 0, 0, 1, 0, 0};
+  census_context_status expected = {4, 4, 0, 0, 1, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_tag tag;
   GPR_ASSERT(census_context_get_tag(
@@ -245,7 +256,7 @@ static void replace_flags_test(void) {
   const census_context_status *status;
   struct census_context *context2 = census_context_create(
       context, modify_tags + REPLACE_FLAG_OFFSET, 1, &status);
-  census_context_status expected = {1, 2, 5, 0, 0, 1, 0, 0};
+  census_context_status expected = {3, 5, 0, 0, 1, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_tag tag;
   GPR_ASSERT(census_context_get_tag(
@@ -262,7 +273,7 @@ static void delete_tag_test(void) {
   const census_context_status *status;
   struct census_context *context2 = census_context_create(
       context, modify_tags + DELETE_TAG_OFFSET, 1, &status);
-  census_context_status expected = {2, 1, 4, 1, 0, 0, 0, 0};
+  census_context_status expected = {3, 4, 1, 0, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_tag tag;
   GPR_ASSERT(census_context_get_tag(
@@ -278,7 +289,7 @@ static void add_tag_test(void) {
   const census_context_status *status;
   struct census_context *context2 =
       census_context_create(context, modify_tags + ADD_TAG_OFFSET, 1, &status);
-  census_context_status expected = {2, 2, 5, 0, 1, 0, 0, 0};
+  census_context_status expected = {4, 5, 0, 1, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   census_tag tag;
   GPR_ASSERT(census_context_get_tag(context2, modify_tags[ADD_TAG_OFFSET].key,
@@ -295,24 +306,24 @@ static void replace_add_delete_test(void) {
   const census_context_status *status;
   struct census_context *context2 =
       census_context_create(context, modify_tags, MODIFY_TAG_COUNT, &status);
-  census_context_status expected = {2, 1, 6, 2, 3, 4, 0, 2};
+  census_context_status expected = {3, 7, 1, 3, 4, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   // validate context contents. Use specific indices into the two arrays
   // holding tag values.
   GPR_ASSERT(validate_tag(context2, &basic_tags[3]));
   GPR_ASSERT(validate_tag(context2, &basic_tags[4]));
+  GPR_ASSERT(validate_tag(context2, &basic_tags[6]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[0]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[1]));
+  GPR_ASSERT(validate_tag(context2, &modify_tags[5]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[6]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[7]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[8]));
   GPR_ASSERT(validate_tag(context2, &modify_tags[9]));
-  GPR_ASSERT(validate_tag(context2, &modify_tags[10]));
   GPR_ASSERT(!validate_tag(context2, &basic_tags[0]));
   GPR_ASSERT(!validate_tag(context2, &basic_tags[1]));
   GPR_ASSERT(!validate_tag(context2, &basic_tags[2]));
   GPR_ASSERT(!validate_tag(context2, &basic_tags[5]));
-  GPR_ASSERT(!validate_tag(context2, &basic_tags[6]));
   GPR_ASSERT(!validate_tag(context2, &basic_tags[7]));
   census_context_destroy(context);
   census_context_destroy(context2);
@@ -325,21 +336,15 @@ static void encode_decode_test(void) {
   char buffer[BUF_SIZE];
   struct census_context *context =
       census_context_create(NULL, basic_tags, BASIC_TAG_COUNT, NULL);
-  size_t print_bsize;
-  size_t bin_bsize;
   // Test with too small a buffer
-  GPR_ASSERT(census_context_encode(context, buffer, 2, &print_bsize,
-                                   &bin_bsize) == NULL);
-  char *b_buffer = census_context_encode(context, buffer, BUF_SIZE,
-                                         &print_bsize, &bin_bsize);
-  GPR_ASSERT(b_buffer != NULL && print_bsize > 0 && bin_bsize > 0 &&
-             print_bsize + bin_bsize <= BUF_SIZE &&
-             b_buffer == buffer + print_bsize);
-  census_context *context2 =
-      census_context_decode(buffer, print_bsize, b_buffer, bin_bsize);
+  GPR_ASSERT(census_context_encode(context, buffer, 2) == 0);
+  // Test with sufficient buffer
+  size_t buf_used = census_context_encode(context, buffer, BUF_SIZE);
+  GPR_ASSERT(buf_used != 0);
+  census_context *context2 = census_context_decode(buffer, buf_used);
   GPR_ASSERT(context2 != NULL);
   const census_context_status *status = census_context_get_status(context2);
-  census_context_status expected = {2, 2, 0, 0, 0, 0, 0, 0};
+  census_context_status expected = {4, 0, 0, 0, 0, 0, 0};
   GPR_ASSERT(memcmp(status, &expected, sizeof(expected)) == 0);
   for (int i = 0; i < BASIC_TAG_COUNT; i++) {
     census_tag tag;

+ 0 - 93
test/core/iomgr/tcp_client_posix_test.c

@@ -178,98 +178,6 @@ void test_fails(void) {
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
-void test_times_out(void) {
-  struct sockaddr_in addr;
-  socklen_t addr_len = sizeof(addr);
-  int svr_fd;
-#define NUM_CLIENT_CONNECTS 100
-  int client_fd[NUM_CLIENT_CONNECTS];
-  int i;
-  int r;
-  int connections_complete_before;
-  gpr_timespec connect_deadline;
-  grpc_closure done;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-
-  gpr_log(GPR_DEBUG, "test_times_out");
-
-  memset(&addr, 0, sizeof(addr));
-  addr.sin_family = AF_INET;
-
-  /* create a dummy server */
-  svr_fd = socket(AF_INET, SOCK_STREAM, 0);
-  GPR_ASSERT(svr_fd >= 0);
-  GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
-  GPR_ASSERT(0 == listen(svr_fd, 1));
-  /* Get its address */
-  GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
-
-  /* tie up the listen buffer, which is somewhat arbitrarily sized. */
-  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
-    client_fd[i] = socket(AF_INET, SOCK_STREAM, 0);
-    grpc_set_socket_nonblocking(client_fd[i], 1);
-    do {
-      r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
-    } while (r == -1 && errno == EINTR);
-    GPR_ASSERT(r < 0);
-    GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS);
-  }
-
-  /* connect to dummy server address */
-
-  connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);
-
-  gpr_mu_lock(g_mu);
-  connections_complete_before = g_connections_complete;
-  gpr_mu_unlock(g_mu);
-
-  grpc_closure_init(&done, must_fail, NULL);
-  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
-                          (struct sockaddr *)&addr, addr_len, connect_deadline);
-
-  /* Make sure the event doesn't trigger early */
-  gpr_mu_lock(g_mu);
-  for (;;) {
-    grpc_pollset_worker *worker = NULL;
-    gpr_timespec now = gpr_now(connect_deadline.clock_type);
-    gpr_timespec continue_verifying_time =
-        gpr_time_from_seconds(5, GPR_TIMESPAN);
-    gpr_timespec grace_time = gpr_time_from_seconds(3, GPR_TIMESPAN);
-    gpr_timespec finish_time =
-        gpr_time_add(connect_deadline, continue_verifying_time);
-    gpr_timespec restart_verifying_time =
-        gpr_time_add(connect_deadline, grace_time);
-    int is_after_deadline = gpr_time_cmp(now, connect_deadline) > 0;
-    if (gpr_time_cmp(now, finish_time) > 0) {
-      break;
-    }
-    gpr_log(GPR_DEBUG, "now=%lld.%09d connect_deadline=%lld.%09d",
-            (long long)now.tv_sec, (int)now.tv_nsec,
-            (long long)connect_deadline.tv_sec, (int)connect_deadline.tv_nsec);
-    if (is_after_deadline && gpr_time_cmp(now, restart_verifying_time) <= 0) {
-      /* allow some slack before insisting that things be done */
-    } else {
-      GPR_ASSERT(g_connections_complete ==
-                 connections_complete_before + is_after_deadline);
-    }
-    gpr_timespec polling_deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
-    if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
-      grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
-    }
-    gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
-    gpr_mu_lock(g_mu);
-  }
-  gpr_mu_unlock(g_mu);
-
-  grpc_exec_ctx_finish(&exec_ctx);
-
-  close(svr_fd);
-  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
-    close(client_fd[i]);
-  }
-}
-
 static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool success) {
   grpc_pollset_destroy(p);
 }
@@ -287,7 +195,6 @@ int main(int argc, char **argv) {
   test_succeeds();
   gpr_log(GPR_ERROR, "End of first test");
   test_fails();
-  test_times_out();
   grpc_pollset_set_destroy(g_pollset_set);
   grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);

+ 9 - 4
test/cpp/end2end/end2end_test.cc

@@ -437,9 +437,10 @@ class End2endServerTryCancelTest : public End2endTest {
         break;
 
       case CANCEL_AFTER_PROCESSING:
-        // Server cancelled after writing all messages. Client must have read
-        // all messages
-        EXPECT_EQ(num_msgs_read, kNumResponseStreamsMsgs);
+        // Even though the Server cancelled after writing all messages, the RPC
+        // may be cancelled before the Client got a chance to read all the
+        // messages.
+        EXPECT_LE(num_msgs_read, kNumResponseStreamsMsgs);
         break;
 
       default: {
@@ -519,7 +520,11 @@ class End2endServerTryCancelTest : public End2endTest {
 
       case CANCEL_AFTER_PROCESSING:
         EXPECT_EQ(num_msgs_sent, num_messages);
-        EXPECT_EQ(num_msgs_read, num_msgs_sent);
+
+        // The Server cancelled after reading the last message and after writing
+        // the message to the client. However, the RPC cancellation might have
+        // taken effect before the client actually read the response.
+        EXPECT_LE(num_msgs_read, num_msgs_sent);
         break;
 
       default:

+ 28 - 15
test/cpp/interop/metrics_client.cc

@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2015, Google Inc.
+ * Copyright 2015-2016, Google Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -37,39 +37,45 @@
 #include <gflags/gflags.h>
 #include <grpc++/grpc++.h>
 
-#include "test/cpp/util/metrics_server.h"
-#include "test/cpp/util/test_config.h"
 #include "src/proto/grpc/testing/metrics.grpc.pb.h"
 #include "src/proto/grpc/testing/metrics.pb.h"
+#include "test/cpp/util/metrics_server.h"
+#include "test/cpp/util/test_config.h"
 
 DEFINE_string(metrics_server_address, "",
               "The metrics server addresses in the fomrat <hostname>:<port>");
+DEFINE_bool(total_only, false,
+            "If true, this prints only the total value of all gauges");
+
+int kDeadlineSecs = 10;
 
 using grpc::testing::EmptyMessage;
 using grpc::testing::GaugeResponse;
 using grpc::testing::MetricsService;
 using grpc::testing::MetricsServiceImpl;
 
-void PrintMetrics(const grpc::string& server_address) {
-  gpr_log(GPR_INFO, "creating a channel to %s", server_address.c_str());
-  std::shared_ptr<grpc::Channel> channel(
-      grpc::CreateChannel(server_address, grpc::InsecureChannelCredentials()));
-
-  std::unique_ptr<MetricsService::Stub> stub(MetricsService::NewStub(channel));
-
+// Prints the values of all Gauges (unless total_only is set to 'true' in which
+// case this only prints the sum of all gauge values).
+bool PrintMetrics(std::unique_ptr<MetricsService::Stub> stub, bool total_only) {
   grpc::ClientContext context;
   EmptyMessage message;
 
+  std::chrono::system_clock::time_point deadline =
+      std::chrono::system_clock::now() + std::chrono::seconds(kDeadlineSecs);
+
+  context.set_deadline(deadline);
+
   std::unique_ptr<grpc::ClientReader<GaugeResponse>> reader(
       stub->GetAllGauges(&context, message));
 
   GaugeResponse gauge_response;
   long overall_qps = 0;
-  int idx = 0;
   while (reader->Read(&gauge_response)) {
     if (gauge_response.value_case() == GaugeResponse::kLongValue) {
-      gpr_log(GPR_INFO, "Gauge: %d (%s: %ld)", ++idx,
-              gauge_response.name().c_str(), gauge_response.long_value());
+      if (!total_only) {
+        gpr_log(GPR_INFO, "%s: %ld", gauge_response.name().c_str(),
+                gauge_response.long_value());
+      }
       overall_qps += gauge_response.long_value();
     } else {
       gpr_log(GPR_INFO, "Gauge %s is not a long value",
@@ -77,12 +83,14 @@ void PrintMetrics(const grpc::string& server_address) {
     }
   }
 
-  gpr_log(GPR_INFO, "OVERALL: %ld", overall_qps);
+  gpr_log(GPR_INFO, "%ld", overall_qps);
 
   const grpc::Status status = reader->Finish();
   if (!status.ok()) {
     gpr_log(GPR_ERROR, "Error in getting metrics from the client");
   }
+
+  return status.ok();
 }
 
 int main(int argc, char** argv) {
@@ -97,7 +105,12 @@ int main(int argc, char** argv) {
     return 1;
   }
 
-  PrintMetrics(FLAGS_metrics_server_address);
+  std::shared_ptr<grpc::Channel> channel(grpc::CreateChannel(
+      FLAGS_metrics_server_address, grpc::InsecureChannelCredentials()));
+
+  if (!PrintMetrics(MetricsService::NewStub(channel), FLAGS_total_only)) {
+    return 1;
+  }
 
   return 0;
 }

+ 1 - 1
test/cpp/util/metrics_server.cc

@@ -57,7 +57,7 @@ long Gauge::Get() {
 grpc::Status MetricsServiceImpl::GetAllGauges(
     ServerContext* context, const EmptyMessage* request,
     ServerWriter<GaugeResponse>* writer) {
-  gpr_log(GPR_INFO, "GetAllGauges called");
+  gpr_log(GPR_DEBUG, "GetAllGauges called");
 
   std::lock_guard<std::mutex> lock(mu_);
   for (auto it = gauges_.begin(); it != gauges_.end(); it++) {

+ 56 - 26
test/cpp/util/test_credentials_provider.cc

@@ -34,6 +34,8 @@
 
 #include "test/cpp/util/test_credentials_provider.h"
 
+#include <unordered_map>
+
 #include <grpc/support/sync.h>
 #include <grpc++/impl/sync.h>
 
@@ -48,12 +50,36 @@ using grpc::InsecureServerCredentials;
 using grpc::ServerCredentials;
 using grpc::SslCredentialsOptions;
 using grpc::SslServerCredentialsOptions;
-using grpc::testing::CredentialsProvider;
+using grpc::testing::CredentialTypeProvider;
+
+// Provide test credentials. Thread-safe.
+class CredentialsProvider {
+ public:
+  virtual ~CredentialsProvider() {}
+
+  virtual void AddSecureType(
+      const grpc::string& type,
+      std::unique_ptr<CredentialTypeProvider> type_provider) = 0;
+  virtual std::shared_ptr<ChannelCredentials> GetChannelCredentials(
+      const grpc::string& type, ChannelArguments* args) = 0;
+  virtual std::shared_ptr<ServerCredentials> GetServerCredentials(
+      const grpc::string& type) = 0;
+  virtual std::vector<grpc::string> GetSecureCredentialsTypeList() = 0;
+};
 
 class DefaultCredentialsProvider : public CredentialsProvider {
  public:
   ~DefaultCredentialsProvider() override {}
 
+  void AddSecureType(
+      const grpc::string& type,
+      std::unique_ptr<CredentialTypeProvider> type_provider) override {
+    // This clobbers any existing entry for type, except the defaults, which
+    // can't be clobbered.
+    grpc::unique_lock<grpc::mutex> lock(mu_);
+    added_secure_types_[type] = std::move(type_provider);
+  }
+
   std::shared_ptr<ChannelCredentials> GetChannelCredentials(
       const grpc::string& type, ChannelArguments* args) override {
     if (type == grpc::testing::kInsecureCredentialsType) {
@@ -63,9 +89,14 @@ class DefaultCredentialsProvider : public CredentialsProvider {
       args->SetSslTargetNameOverride("foo.test.google.fr");
       return SslCredentials(ssl_opts);
     } else {
-      gpr_log(GPR_ERROR, "Unsupported credentials type %s.", type.c_str());
+      grpc::unique_lock<grpc::mutex> lock(mu_);
+      auto it(added_secure_types_.find(type));
+      if (it == added_secure_types_.end()) {
+        gpr_log(GPR_ERROR, "Unsupported credentials type %s.", type.c_str());
+        return nullptr;
+      }
+      return it->second->GetChannelCredentials(args);
     }
-    return nullptr;
   }
 
   std::shared_ptr<ServerCredentials> GetServerCredentials(
@@ -80,33 +111,38 @@ class DefaultCredentialsProvider : public CredentialsProvider {
       ssl_opts.pem_key_cert_pairs.push_back(pkcp);
       return SslServerCredentials(ssl_opts);
     } else {
-      gpr_log(GPR_ERROR, "Unsupported credentials type %s.", type.c_str());
+      grpc::unique_lock<grpc::mutex> lock(mu_);
+      auto it(added_secure_types_.find(type));
+      if (it == added_secure_types_.end()) {
+        gpr_log(GPR_ERROR, "Unsupported credentials type %s.", type.c_str());
+        return nullptr;
+      }
+      return it->second->GetServerCredentials();
     }
-    return nullptr;
   }
   std::vector<grpc::string> GetSecureCredentialsTypeList() override {
     std::vector<grpc::string> types;
     types.push_back(grpc::testing::kTlsCredentialsType);
+    grpc::unique_lock<grpc::mutex> lock(mu_);
+    for (const auto& type_pair : added_secure_types_) {
+      types.push_back(type_pair.first);
+    }
     return types;
   }
+
+ private:
+  grpc::mutex mu_;
+  std::unordered_map<grpc::string, std::unique_ptr<CredentialTypeProvider> >
+      added_secure_types_;
 };
 
-gpr_once g_once_init_provider_mu = GPR_ONCE_INIT;
-grpc::mutex* g_provider_mu = nullptr;
+gpr_once g_once_init_provider = GPR_ONCE_INIT;
 CredentialsProvider* g_provider = nullptr;
 
-void InitProviderMu() { g_provider_mu = new grpc::mutex; }
-
-grpc::mutex& GetMu() {
-  gpr_once_init(&g_once_init_provider_mu, &InitProviderMu);
-  return *g_provider_mu;
-}
+void CreateDefaultProvider() { g_provider = new DefaultCredentialsProvider; }
 
 CredentialsProvider* GetProvider() {
-  grpc::unique_lock<grpc::mutex> lock(GetMu());
-  if (g_provider == nullptr) {
-    g_provider = new DefaultCredentialsProvider;
-  }
+  gpr_once_init(&g_once_init_provider, &CreateDefaultProvider);
   return g_provider;
 }
 
@@ -115,15 +151,9 @@ CredentialsProvider* GetProvider() {
 namespace grpc {
 namespace testing {
 
-// Note that it is not thread-safe to set a provider while concurrently using
-// the previously set provider, as this deletes and replaces it. nullptr may be
-// given to reset to the default.
-void SetTestCredentialsProvider(std::unique_ptr<CredentialsProvider> provider) {
-  grpc::unique_lock<grpc::mutex> lock(GetMu());
-  if (g_provider != nullptr) {
-    delete g_provider;
-  }
-  g_provider = provider.release();
+void AddSecureType(const grpc::string& type,
+                   std::unique_ptr<CredentialTypeProvider> type_provider) {
+  GetProvider()->AddSecureType(type, std::move(type_provider));
 }
 
 std::shared_ptr<ChannelCredentials> GetChannelCredentials(

+ 10 - 9
test/cpp/util/test_credentials_provider.h

@@ -46,20 +46,21 @@ namespace testing {
 const char kInsecureCredentialsType[] = "INSECURE_CREDENTIALS";
 const char kTlsCredentialsType[] = "TLS_CREDENTIALS";
 
-class CredentialsProvider {
+// Provide test credentials of a particular type.
+class CredentialTypeProvider {
  public:
-  virtual ~CredentialsProvider() {}
+  virtual ~CredentialTypeProvider() {}
 
   virtual std::shared_ptr<ChannelCredentials> GetChannelCredentials(
-      const grpc::string& type, ChannelArguments* args) = 0;
-  virtual std::shared_ptr<ServerCredentials> GetServerCredentials(
-      const grpc::string& type) = 0;
-  virtual std::vector<grpc::string> GetSecureCredentialsTypeList() = 0;
+      ChannelArguments* args) = 0;
+  virtual std::shared_ptr<ServerCredentials> GetServerCredentials() = 0;
 };
 
-// Set the CredentialsProvider used by the other functions in this file. If this
-// is not set, a default provider will be used.
-void SetTestCredentialsProvider(std::unique_ptr<CredentialsProvider> provider);
+// Add a secure type in addition to the defaults above
+// (kInsecureCredentialsType, kTlsCredentialsType) that can be returned from the
+// functions below.
+void AddSecureType(const grpc::string& type,
+                   std::unique_ptr<CredentialTypeProvider> type_provider);
 
 // Provide channel credentials according to the given type. Alter the channel
 // arguments if needed.

+ 6 - 0
test/distrib/csharp/DistribTest.sln

@@ -8,13 +8,19 @@ EndProject
 Global
 	GlobalSection(SolutionConfigurationPlatforms) = preSolution
 		Debug|Any CPU = Debug|Any CPU
+		Debug|x64 = Debug|x64
 		Release|Any CPU = Release|Any CPU
+		Release|x64 = Release|x64
 	EndGlobalSection
 	GlobalSection(ProjectConfigurationPlatforms) = postSolution
 		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
 		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Debug|Any CPU.Build.0 = Debug|Any CPU
+		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Debug|x64.ActiveCfg = Debug|x64
+		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Debug|x64.Build.0 = Debug|x64
 		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Release|Any CPU.ActiveCfg = Release|Any CPU
 		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Release|Any CPU.Build.0 = Release|Any CPU
+		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Release|x64.ActiveCfg = Release|x64
+		{A3E61CC3-3710-49A3-A830-A0066EDBCE2F}.Release|x64.Build.0 = Release|x64
 	EndGlobalSection
 	GlobalSection(SolutionProperties) = preSolution
 		HideSolutionNode = FALSE

+ 20 - 0
test/distrib/csharp/DistribTest/DistribTest.csproj

@@ -32,6 +32,26 @@
     <ErrorReport>prompt</ErrorReport>
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Debug|x64'">
+    <DebugSymbols>true</DebugSymbols>
+    <OutputPath>bin\x64\Debug\</OutputPath>
+    <DefineConstants>DEBUG;TRACE</DefineConstants>
+    <DebugType>full</DebugType>
+    <PlatformTarget>x64</PlatformTarget>
+    <ErrorReport>prompt</ErrorReport>
+    <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
+    <Prefer32Bit>true</Prefer32Bit>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Release|x64'">
+    <OutputPath>bin\x64\Release\</OutputPath>
+    <DefineConstants>TRACE</DefineConstants>
+    <Optimize>true</Optimize>
+    <DebugType>pdbonly</DebugType>
+    <PlatformTarget>x64</PlatformTarget>
+    <ErrorReport>prompt</ErrorReport>
+    <CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
+    <Prefer32Bit>true</Prefer32Bit>
+  </PropertyGroup>
   <ItemGroup>
     <Reference Include="BouncyCastle.Crypto">
       <HintPath>..\packages\BouncyCastle.1.7.0\lib\Net40-Client\BouncyCastle.Crypto.dll</HintPath>

+ 49 - 0
test/distrib/csharp/run_distrib_test.bat

@@ -0,0 +1,49 @@
+@rem Copyright 2016, Google Inc.
+@rem All rights reserved.
+@rem
+@rem Redistribution and use in source and binary forms, with or without
+@rem modification, are permitted provided that the following conditions are
+@rem met:
+@rem
+@rem     * Redistributions of source code must retain the above copyright
+@rem notice, this list of conditions and the following disclaimer.
+@rem     * Redistributions in binary form must reproduce the above
+@rem copyright notice, this list of conditions and the following disclaimer
+@rem in the documentation and/or other materials provided with the
+@rem distribution.
+@rem     * Neither the name of Google Inc. nor the names of its
+@rem contributors may be used to endorse or promote products derived from
+@rem this software without specific prior written permission.
+@rem
+@rem THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+@rem "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+@rem LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+@rem A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+@rem OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+@rem SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+@rem LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+@rem DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+@rem THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+@rem (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+@rem OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+@rem enter this directory
+cd /d %~dp0
+
+@rem extract input artifacts
+powershell -Command "Add-Type -Assembly 'System.IO.Compression.FileSystem'; [System.IO.Compression.ZipFile]::ExtractToDirectory('../../../input_artifacts/csharp_nugets.zip', 'TestNugetFeed');"
+
+update_version.sh auto
+
+set NUGET=C:\nuget\nuget.exe
+%NUGET% restore || goto :error
+
+@call build_vs2015.bat DistribTest.sln %MSBUILD_EXTRA_ARGS% || goto :error
+
+%DISTRIBTEST_OUTPATH%\DistribTest.exe || goto :error
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%

+ 1 - 3
test/distrib/csharp/run_distrib_test.sh

@@ -34,9 +34,7 @@ cd $(dirname $0)
 
 unzip -o "$EXTERNAL_GIT_ROOT/input_artifacts/csharp_nugets.zip" -d TestNugetFeed
 
-# Extract the version number from Grpc nuget package name.
-CSHARP_VERSION=$(ls TestNugetFeed | grep '^Grpc\.[0-9].*\.nupkg$' | sed s/^Grpc\.// | sed s/\.nupkg$//)
-./update_version.sh $CSHARP_VERSION
+./update_version.sh auto
 
 nuget restore
 

+ 9 - 1
test/distrib/csharp/update_version.sh

@@ -32,5 +32,13 @@ set -e
 
 cd $(dirname $0)
 
+CSHARP_VERSION="$1"
+if [ "$CSHARP_VERSION" == "auto" ]
+then
+  # autodetect C# version
+  CSHARP_VERSION=$(ls TestNugetFeed | grep '^Grpc\.[0-9].*\.nupkg$' | sed s/^Grpc\.// | sed s/\.nupkg$//)
+  echo "Autodetected nuget ${CSHARP_VERSION}"
+fi
+
 # Replaces version placeholder with value provided as first argument.
-sed -ibak "s/__GRPC_NUGET_VERSION__/$1/g" DistribTest/packages.config DistribTest/DistribTest.csproj
+sed -ibak "s/__GRPC_NUGET_VERSION__/${CSHARP_VERSION}/g" DistribTest/packages.config DistribTest/DistribTest.csproj

+ 5 - 0
tools/dockerfile/grpc_interop_stress_cxx/Dockerfile

@@ -59,6 +59,8 @@ RUN apt-get update && apt-get install -y \
   wget \
   zip && apt-get clean
 
+RUN easy_install -U pip
+
 # Prepare ccache
 RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
 RUN ln -s /usr/bin/ccache /usr/local/bin/g++
@@ -71,5 +73,8 @@ RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
 # C++ dependencies
 RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang
 
+# Google Cloud platform API libraries (for BigQuery)
+RUN pip install --upgrade google-api-python-client
+
 # Define the default command.
 CMD ["bash"]

+ 1 - 1
tools/dockerfile/grpc_interop_stress_cxx/build_interop_stress.sh

@@ -42,4 +42,4 @@ cd /var/local/git/grpc
 make install-certs
 
 # build C++ interop stress client, interop client and server
-make stress_test interop_client interop_server
+make stress_test metrics_client interop_client interop_server

+ 187 - 0
tools/gcp/stress_test/run_client.py

@@ -0,0 +1,187 @@
+#!/usr/bin/env python2.7
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import os
+import re
+import select
+import subprocess
+import sys
+import time
+
+from stress_test_utils import EventType
+from stress_test_utils import BigQueryHelper
+
+
+# TODO (sree): Write a python grpc client to directly query the metrics instead
+# of calling metrics_client
+def _get_qps(metrics_cmd):
+  qps = 0
+  try:
+    # Note: gpr_log() writes even non-error messages to stderr stream. So it is 
+    # important that we set stderr=subprocess.STDOUT
+    p = subprocess.Popen(args=metrics_cmd,
+                         stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT)
+    retcode = p.wait()
+    (out_str, err_str) = p.communicate()
+    if retcode != 0:
+      print 'Error in reading metrics information'
+      print 'Output: ', out_str
+    else:
+      # The overall qps is printed at the end of the line
+      m = re.search('\d+$', out_str)
+      qps = int(m.group()) if m else 0
+  except Exception as ex:
+    print 'Exception while reading metrics information: ' + str(ex)
+  return qps
+
+
+def run_client():
+  """This is a wrapper around the stress test client and performs the following:
+      1) Create the following two tables in Big Query:
+         (i) Summary table: To record events like the test started, completed
+                            successfully or failed
+        (ii) Qps table: To periodically record the QPS sent by this client
+      2) Start the stress test client and add a row in the Big Query summary
+         table
+      3) Once every few seconds (as specificed by the poll_interval_secs) poll
+         the status of the stress test client process and perform the
+         following:
+          3.1) If the process is still running, get the current qps by invoking
+               the metrics client program and add a row in the Big Query
+               Qps table. Sleep for a duration specified by poll_interval_secs
+          3.2) If the process exited successfully, add a row in the Big Query
+               Summary table and exit
+          3.3) If the process failed, add a row in Big Query summary table and
+               wait forever.
+               NOTE: This script typically runs inside a GKE pod which means
+               that the pod gets destroyed when the script exits. However, in
+               case the stress test client fails, we would not want the pod to
+               be destroyed (since we might want to connect to the pod for
+               examining logs). This is the reason why the script waits forever
+               in case of failures
+  """
+  env = dict(os.environ)
+  image_type = env['STRESS_TEST_IMAGE_TYPE']
+  image_name = env['STRESS_TEST_IMAGE']
+  args_str = env['STRESS_TEST_ARGS_STR']
+  metrics_client_image = env['METRICS_CLIENT_IMAGE']
+  metrics_client_args_str = env['METRICS_CLIENT_ARGS_STR']
+  run_id = env['RUN_ID']
+  pod_name = env['POD_NAME']
+  logfile_name = env.get('LOGFILE_NAME')
+  poll_interval_secs = float(env['POLL_INTERVAL_SECS'])
+  project_id = env['GCP_PROJECT_ID']
+  dataset_id = env['DATASET_ID']
+  summary_table_id = env['SUMMARY_TABLE_ID']
+  qps_table_id = env['QPS_TABLE_ID']
+
+  bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
+                             dataset_id, summary_table_id, qps_table_id)
+  bq_helper.initialize()
+
+  # Create BigQuery Dataset and Tables: Summary Table and Metrics Table
+  if not bq_helper.setup_tables():
+    print 'Error in creating BigQuery tables'
+    return
+
+  start_time = datetime.datetime.now()
+
+  logfile = None
+  details = 'Logging to stdout'
+  if logfile_name is not None:
+    print 'Opening logfile: %s ...' % logfile_name
+    details = 'Logfile: %s' % logfile_name
+    logfile = open(logfile_name, 'w')
+
+  # Update status that the test is starting (in the status table)
+  bq_helper.insert_summary_row(EventType.STARTING, details)
+
+  metrics_cmd = [metrics_client_image
+                ] + [x for x in metrics_client_args_str.split()]
+  stress_cmd = [image_name] + [x for x in args_str.split()]
+
+  print 'Launching process %s ...' % stress_cmd
+  stress_p = subprocess.Popen(args=stress_cmd,
+                              stdout=logfile,
+                              stderr=subprocess.STDOUT)
+
+  qps_history = [1, 1, 1]  # Maintain the last 3 qps readings
+  qps_history_idx = 0  # Index into the qps_history list
+
+  is_error = False
+  while True:
+    # Check if stress_client is still running. If so, collect metrics and upload
+    # to BigQuery status table
+    if stress_p.poll() is not None:
+      end_time = datetime.datetime.now().isoformat()
+      event_type = EventType.SUCCESS
+      details = 'End time: %s' % end_time
+      if stress_p.returncode != 0:
+        event_type = EventType.FAILURE
+        details = 'Return code = %d. End time: %s' % (stress_p.returncode,
+                                                      end_time)
+        is_error = True
+      bq_helper.insert_summary_row(event_type, details)
+      print details
+      break
+
+    # Stress client still running. Get metrics
+    qps = _get_qps(metrics_cmd)
+    qps_recorded_at = datetime.datetime.now().isoformat()
+    print 'qps: %d at %s' % (qps, qps_recorded_at)
+
+    # If QPS has been zero for the last 3 iterations, flag it as error and exit
+    qps_history[qps_history_idx] = qps
+    qps_history_idx = (qps_history_idx + 1) % len(qps_history)
+    if sum(qps_history) == 0:
+      details = 'QPS has been zero for the last %d seconds - as of : %s' % (
+          poll_interval_secs * 3, qps_recorded_at)
+      is_error = True
+      bq_helper.insert_summary_row(EventType.FAILURE, details)
+      print details
+      break
+
+    # Upload qps metrics to BiqQuery
+    bq_helper.insert_qps_row(qps, qps_recorded_at)
+
+    time.sleep(poll_interval_secs)
+
+  if is_error:
+    print 'Waiting indefinitely..'
+    select.select([], [], [])
+
+  print 'Completed'
+  return
+
+
+if __name__ == '__main__':
+  run_client()

+ 120 - 0
tools/gcp/stress_test/run_server.py

@@ -0,0 +1,120 @@
+#!/usr/bin/env python2.7
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import os
+import select
+import subprocess
+import sys
+import time
+
+from stress_test_utils import BigQueryHelper
+from stress_test_utils import EventType
+
+
+def run_server():
+  """This is a wrapper around the interop server and performs the following:
+      1) Create a 'Summary table' in Big Query to record events like the server
+         started, completed successfully or failed. NOTE: This also creates
+         another table called the QPS table which is currently NOT needed on the
+         server (it is needed on the stress test clients)
+      2) Start the server process and add a row in Big Query summary table
+      3) Wait for the server process to terminate. The server process does not
+         terminate unless there is an error.
+         If the server process terminated with a failure, add a row in Big Query
+         and wait forever.
+         NOTE: This script typically runs inside a GKE pod which means that the
+         pod gets destroyed when the script exits. However, in case the server
+         process fails, we would not want the pod to be destroyed (since we
+         might want to connect to the pod for examining logs). This is the
+         reason why the script waits forever in case of failures.
+  """
+
+  # Read the parameters from environment variables
+  env = dict(os.environ)
+
+  run_id = env['RUN_ID']  # The unique run id for this test
+  image_type = env['STRESS_TEST_IMAGE_TYPE']
+  image_name = env['STRESS_TEST_IMAGE']
+  args_str = env['STRESS_TEST_ARGS_STR']
+  pod_name = env['POD_NAME']
+  project_id = env['GCP_PROJECT_ID']
+  dataset_id = env['DATASET_ID']
+  summary_table_id = env['SUMMARY_TABLE_ID']
+  qps_table_id = env['QPS_TABLE_ID']
+
+  logfile_name = env.get('LOGFILE_NAME')
+
+  print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, '
+        'summary_table_id: %s, qps_table_id: %s') % (
+            pod_name, project_id, run_id, dataset_id, summary_table_id,
+            qps_table_id)
+
+  bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
+                             dataset_id, summary_table_id, qps_table_id)
+  bq_helper.initialize()
+
+  # Create BigQuery Dataset and Tables: Summary Table and Metrics Table
+  if not bq_helper.setup_tables():
+    print 'Error in creating BigQuery tables'
+    return
+
+  start_time = datetime.datetime.now()
+
+  logfile = None
+  details = 'Logging to stdout'
+  if logfile_name is not None:
+    print 'Opening log file: ', logfile_name
+    logfile = open(logfile_name, 'w')
+    details = 'Logfile: %s' % logfile_name
+
+  # Update status that the test is starting (in the status table)
+  bq_helper.insert_summary_row(EventType.STARTING, details)
+
+  stress_cmd = [image_name] + [x for x in args_str.split()]
+
+  print 'Launching process %s ...' % stress_cmd
+  stress_p = subprocess.Popen(args=stress_cmd,
+                              stdout=logfile,
+                              stderr=subprocess.STDOUT)
+
+  returncode = stress_p.wait()
+  if returncode != 0:
+    end_time = datetime.datetime.now().isoformat()
+    event_type = EventType.FAILURE
+    details = 'Returncode: %d; End time: %s' % (returncode, end_time)
+    bq_helper.insert_summary_row(event_type, details)
+    print 'Waiting indefinitely..'
+    select.select([], [], [])
+  return returncode
+
+
+if __name__ == '__main__':
+  run_server()

+ 197 - 0
tools/gcp/stress_test/stress_test_utils.py

@@ -0,0 +1,197 @@
+#!/usr/bin/env python2.7
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import json
+import os
+import re
+import select
+import subprocess
+import sys
+import time
+
+# Import big_query_utils module
+bq_utils_dir = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '../utils'))
+sys.path.append(bq_utils_dir)
+import big_query_utils as bq_utils
+
+
+class EventType:
+  STARTING = 'STARTING'
+  SUCCESS = 'SUCCESS'
+  FAILURE = 'FAILURE'
+
+
+class BigQueryHelper:
+  """Helper class for the stress test wrappers to interact with BigQuery.
+  """
+
+  def __init__(self, run_id, image_type, pod_name, project_id, dataset_id,
+               summary_table_id, qps_table_id):
+    self.run_id = run_id
+    self.image_type = image_type
+    self.pod_name = pod_name
+    self.project_id = project_id
+    self.dataset_id = dataset_id
+    self.summary_table_id = summary_table_id
+    self.qps_table_id = qps_table_id
+
+  def initialize(self):
+    self.bq = bq_utils.create_big_query()
+
+  def setup_tables(self):
+    return bq_utils.create_dataset(self.bq, self.project_id, self.dataset_id) \
+        and self.__create_summary_table() \
+        and self.__create_qps_table()
+
+  def insert_summary_row(self, event_type, details):
+    row_values_dict = {
+        'run_id': self.run_id,
+        'image_type': self.image_type,
+        'pod_name': self.pod_name,
+        'event_date': datetime.datetime.now().isoformat(),
+        'event_type': event_type,
+        'details': details
+    }
+    # row_unique_id is something that uniquely identifies the row (BigQuery uses
+    # it for duplicate detection).
+    row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, event_type)
+    row = bq_utils.make_row(row_unique_id, row_values_dict)
+    return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
+                                self.summary_table_id, [row])
+
+  def insert_qps_row(self, qps, recorded_at):
+    row_values_dict = {
+        'run_id': self.run_id,
+        'pod_name': self.pod_name,
+        'recorded_at': recorded_at,
+        'qps': qps
+    }
+
+    # row_unique_id is something that uniquely identifies the row (BigQuery uses
+    # it for duplicate detection).
+    row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, recorded_at)
+    row = bq_utils.make_row(row_unique_id, row_values_dict)
+    return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
+                                self.qps_table_id, [row])
+
+  def check_if_any_tests_failed(self, num_query_retries=3):
+    query = ('SELECT event_type FROM %s.%s WHERE run_id = \'%s\' AND '
+             'event_type="%s"') % (self.dataset_id, self.summary_table_id,
+                                   self.run_id, EventType.FAILURE)
+    query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
+    page = self.bq.jobs().getQueryResults(**query_job['jobReference']).execute(
+        num_retries=num_query_retries)
+    num_failures = int(page['totalRows'])
+    print 'num rows: ', num_failures
+    return num_failures > 0
+
+  def print_summary_records(self, num_query_retries=3):
+    line = '-' * 120
+    print line
+    print 'Summary records'
+    print 'Run Id: ', self.run_id
+    print 'Dataset Id: ', self.dataset_id
+    print line
+    query = ('SELECT pod_name, image_type, event_type, event_date, details'
+             ' FROM %s.%s WHERE run_id = \'%s\' ORDER by event_date;') % (
+                 self.dataset_id, self.summary_table_id, self.run_id)
+    query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
+
+    print '{:<25} {:<12} {:<12} {:<30} {}'.format(
+        'Pod name', 'Image type', 'Event type', 'Date', 'Details')
+    print line
+    page_token = None
+    while True:
+      page = self.bq.jobs().getQueryResults(
+          pageToken=page_token,
+          **query_job['jobReference']).execute(num_retries=num_query_retries)
+      rows = page.get('rows', [])
+      for row in rows:
+        print '{:<25} {:<12} {:<12} {:<30} {}'.format(
+            row['f'][0]['v'], row['f'][1]['v'], row['f'][2]['v'],
+            row['f'][3]['v'], row['f'][4]['v'])
+      page_token = page.get('pageToken')
+      if not page_token:
+        break
+
+  def print_qps_records(self, num_query_retries=3):
+    line = '-' * 80
+    print line
+    print 'QPS Summary'
+    print 'Run Id: ', self.run_id
+    print 'Dataset Id: ', self.dataset_id
+    print line
+    query = (
+        'SELECT pod_name, recorded_at, qps FROM %s.%s WHERE run_id = \'%s\' '
+        'ORDER by recorded_at;') % (self.dataset_id, self.qps_table_id,
+                                    self.run_id)
+    query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
+    print '{:<25} {:30} {}'.format('Pod name', 'Recorded at', 'Qps')
+    print line
+    page_token = None
+    while True:
+      page = self.bq.jobs().getQueryResults(
+          pageToken=page_token,
+          **query_job['jobReference']).execute(num_retries=num_query_retries)
+      rows = page.get('rows', [])
+      for row in rows:
+        print '{:<25} {:30} {}'.format(row['f'][0]['v'], row['f'][1]['v'],
+                                       row['f'][2]['v'])
+      page_token = page.get('pageToken')
+      if not page_token:
+        break
+
+  def __create_summary_table(self):
+    summary_table_schema = [
+        ('run_id', 'STRING', 'Test run id'),
+        ('image_type', 'STRING', 'Client or Server?'),
+        ('pod_name', 'STRING', 'GKE pod hosting this image'),
+        ('event_date', 'STRING', 'The date of this event'),
+        ('event_type', 'STRING', 'STARTED/SUCCESS/FAILURE'),
+        ('details', 'STRING', 'Any other relevant details')
+    ]
+    desc = ('The table that contains START/SUCCESS/FAILURE events for '
+            ' the stress test clients and servers')
+    return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
+                                 self.summary_table_id, summary_table_schema,
+                                 desc)
+
+  def __create_qps_table(self):
+    qps_table_schema = [
+        ('run_id', 'STRING', 'Test run id'),
+        ('pod_name', 'STRING', 'GKE pod hosting this image'),
+        ('recorded_at', 'STRING', 'Metrics recorded at time'),
+        ('qps', 'INTEGER', 'Queries per second')
+    ]
+    desc = 'The table that cointains the qps recorded at various intervals'
+    return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
+                                 self.qps_table_id, qps_table_schema, desc)

+ 140 - 0
tools/gcp/utils/big_query_utils.py

@@ -0,0 +1,140 @@
+#!/usr/bin/env python2.7
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import argparse
+import json
+import uuid
+import httplib2
+
+from apiclient import discovery
+from apiclient.errors import HttpError
+from oauth2client.client import GoogleCredentials
+
+NUM_RETRIES = 3
+
+
+def create_big_query():
+  """Authenticates with cloud platform and gets a BiqQuery service object
+  """
+  creds = GoogleCredentials.get_application_default()
+  return discovery.build('bigquery', 'v2', credentials=creds)
+
+
+def create_dataset(biq_query, project_id, dataset_id):
+  is_success = True
+  body = {
+      'datasetReference': {
+          'projectId': project_id,
+          'datasetId': dataset_id
+      }
+  }
+
+  try:
+    dataset_req = biq_query.datasets().insert(projectId=project_id, body=body)
+    dataset_req.execute(num_retries=NUM_RETRIES)
+  except HttpError as http_error:
+    if http_error.resp.status == 409:
+      print 'Warning: The dataset %s already exists' % dataset_id
+    else:
+      # Note: For more debugging info, print "http_error.content"
+      print 'Error in creating dataset: %s. Err: %s' % (dataset_id, http_error)
+      is_success = False
+  return is_success
+
+
+def create_table(big_query, project_id, dataset_id, table_id, table_schema,
+                 description):
+  is_success = True
+
+  body = {
+      'description': description,
+      'schema': {
+          'fields': [{
+              'name': field_name,
+              'type': field_type,
+              'description': field_description
+          } for (field_name, field_type, field_description) in table_schema]
+      },
+      'tableReference': {
+          'datasetId': dataset_id,
+          'projectId': project_id,
+          'tableId': table_id
+      }
+  }
+
+  try:
+    table_req = big_query.tables().insert(projectId=project_id,
+                                          datasetId=dataset_id,
+                                          body=body)
+    res = table_req.execute(num_retries=NUM_RETRIES)
+    print 'Successfully created %s "%s"' % (res['kind'], res['id'])
+  except HttpError as http_error:
+    if http_error.resp.status == 409:
+      print 'Warning: Table %s already exists' % table_id
+    else:
+      print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
+      is_success = False
+  return is_success
+
+
+def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
+  is_success = True
+  body = {'rows': rows_list}
+  try:
+    insert_req = big_query.tabledata().insertAll(projectId=project_id,
+                                                 datasetId=dataset_id,
+                                                 tableId=table_id,
+                                                 body=body)
+    print body
+    res = insert_req.execute(num_retries=NUM_RETRIES)
+    print res
+  except HttpError as http_error:
+    print 'Error in inserting rows in the table %s' % table_id
+    is_success = False
+  return is_success
+
+
+def sync_query_job(big_query, project_id, query, timeout=5000):
+  query_data = {'query': query, 'timeoutMs': timeout}
+  query_job = None
+  try:
+    query_job = big_query.jobs().query(
+        projectId=project_id,
+        body=query_data).execute(num_retries=NUM_RETRIES)
+  except HttpError as http_error:
+    print 'Query execute job failed with error: %s' % http_error
+    print http_error.content
+  return query_job
+
+  # List of (column name, column type, description) tuples
+def make_row(unique_row_id, row_values_dict):
+  """row_values_dict is a dictionary of column name and column value.
+  """
+  return {'insertId': unique_row_id, 'json': row_values_dict}

+ 68 - 15
tools/gke/kubernetes_api.py → tools/gcp/utils/kubernetes_api.py

@@ -1,5 +1,5 @@
 #!/usr/bin/env python2.7
-# Copyright 2015, Google Inc.
+# Copyright 2015-2016, Google Inc.
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -33,8 +33,9 @@ import json
 
 _REQUEST_TIMEOUT_SECS = 10
 
+
 def _make_pod_config(pod_name, image_name, container_port_list, cmd_list,
-                    arg_list):
+                     arg_list, env_dict):
   """Creates a string containing the Pod defintion as required by the Kubernetes API"""
   body = {
       'kind': 'Pod',
@@ -48,20 +49,23 @@ def _make_pod_config(pod_name, image_name, container_port_list, cmd_list,
               {
                   'name': pod_name,
                   'image': image_name,
-                  'ports': []
+                  'ports': [{'containerPort': port,
+                             'protocol': 'TCP'}
+                            for port in container_port_list],
+                  'imagePullPolicy': 'Always'
               }
           ]
       }
   }
-  # Populate the 'ports' list
-  for port in container_port_list:
-    port_entry = {'containerPort': port, 'protocol': 'TCP'}
-    body['spec']['containers'][0]['ports'].append(port_entry)
+
+  env_list = [{'name': k, 'value': v} for (k, v) in env_dict.iteritems()]
+  if len(env_list) > 0:
+    body['spec']['containers'][0]['env'] = env_list
 
   # Add the 'Command' and 'Args' attributes if they are passed.
   # Note:
   #  - 'Command' overrides the ENTRYPOINT in the Docker Image
-  #  - 'Args' override the COMMAND in Docker image (yes, it is confusing!)
+  #  - 'Args' override the CMD in Docker image (yes, it is confusing!)
   if len(cmd_list) > 0:
     body['spec']['containers'][0]['command'] = cmd_list
   if len(arg_list) > 0:
@@ -70,7 +74,7 @@ def _make_pod_config(pod_name, image_name, container_port_list, cmd_list,
 
 
 def _make_service_config(service_name, pod_name, service_port_list,
-                        container_port_list, is_headless):
+                         container_port_list, is_headless):
   """Creates a string containing the Service definition as required by the Kubernetes API.
 
   NOTE:
@@ -124,6 +128,7 @@ def _print_connection_error(msg):
   print('ERROR: Connection failed. Did you remember to run Kubenetes proxy on '
         'localhost (i.e kubectl proxy --port=<proxy_port>) ?. Error: %s' % msg)
 
+
 def _do_post(post_url, api_name, request_body):
   """Helper to do HTTP POST.
 
@@ -135,7 +140,9 @@ def _do_post(post_url, api_name, request_body):
   """
   is_success = True
   try:
-    r = requests.post(post_url, data=request_body, timeout=_REQUEST_TIMEOUT_SECS)
+    r = requests.post(post_url,
+                      data=request_body,
+                      timeout=_REQUEST_TIMEOUT_SECS)
     if r.status_code == requests.codes.conflict:
       print('WARN: Looks like the resource already exists. Api: %s, url: %s' %
             (api_name, post_url))
@@ -143,7 +150,8 @@ def _do_post(post_url, api_name, request_body):
       print('ERROR: %s API returned error. HTTP response: (%d) %s' %
             (api_name, r.status_code, r.text))
       is_success = False
-  except(requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
+  except (requests.exceptions.Timeout,
+          requests.exceptions.ConnectionError) as e:
     is_success = False
     _print_connection_error(str(e))
   return is_success
@@ -165,7 +173,8 @@ def _do_delete(del_url, api_name):
       print('ERROR: %s API returned error. HTTP response: %s' %
             (api_name, r.text))
       is_success = False
-  except(requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
+  except (requests.exceptions.Timeout,
+          requests.exceptions.ConnectionError) as e:
     is_success = False
     _print_connection_error(str(e))
   return is_success
@@ -179,12 +188,12 @@ def create_service(kube_host, kube_port, namespace, service_name, pod_name,
   post_url = 'http://%s:%d/api/v1/namespaces/%s/services' % (
       kube_host, kube_port, namespace)
   request_body = _make_service_config(service_name, pod_name, service_port_list,
-                                     container_port_list, is_headless)
+                                      container_port_list, is_headless)
   return _do_post(post_url, 'Create Service', request_body)
 
 
 def create_pod(kube_host, kube_port, namespace, pod_name, image_name,
-               container_port_list, cmd_list, arg_list):
+               container_port_list, cmd_list, arg_list, env_dict):
   """Creates a Kubernetes Pod.
 
   Note that it is generally NOT considered a good practice to directly create
@@ -200,7 +209,7 @@ def create_pod(kube_host, kube_port, namespace, pod_name, image_name,
   post_url = 'http://%s:%d/api/v1/namespaces/%s/pods' % (kube_host, kube_port,
                                                          namespace)
   request_body = _make_pod_config(pod_name, image_name, container_port_list,
-                                 cmd_list, arg_list)
+                                  cmd_list, arg_list, env_dict)
   return _do_post(post_url, 'Create Pod', request_body)
 
 
@@ -214,3 +223,47 @@ def delete_pod(kube_host, kube_port, namespace, pod_name):
   del_url = 'http://%s:%d/api/v1/namespaces/%s/pods/%s' % (kube_host, kube_port,
                                                            namespace, pod_name)
   return _do_delete(del_url, 'Delete Pod')
+
+
+def create_pod_and_service(kube_host, kube_port, namespace, pod_name,
+                           image_name, container_port_list, cmd_list, arg_list,
+                           env_dict, is_headless_service):
+  """A helper function that creates a pod and a service (if pod creation was successful)."""
+  is_success = create_pod(kube_host, kube_port, namespace, pod_name, image_name,
+                          container_port_list, cmd_list, arg_list, env_dict)
+  if not is_success:
+    print 'Error in creating Pod'
+    return False
+
+  is_success = create_service(
+      kube_host,
+      kube_port,
+      namespace,
+      pod_name,  # Use pod_name for service
+      pod_name,
+      container_port_list,  # Service port list same as container port list
+      container_port_list,
+      is_headless_service)
+  if not is_success:
+    print 'Error in creating Service'
+    return False
+
+  print 'Successfully created the pod/service %s' % pod_name
+  return True
+
+
+def delete_pod_and_service(kube_host, kube_port, namespace, pod_name):
+  """ A helper function that calls delete_pod and delete_service """
+  is_success = delete_pod(kube_host, kube_port, namespace, pod_name)
+  if not is_success:
+    print 'Error in deleting pod %s' % pod_name
+    return False
+
+  # Note: service name assumed to the the same as pod name
+  is_success = delete_service(kube_host, kube_port, namespace, pod_name)
+  if not is_success:
+    print 'Error in deleting service %s' % pod_name
+    return False
+
+  print 'Successfully deleted the Pod/Service: %s' % pod_name
+  return True

+ 3 - 0
tools/jenkins/build_interop_stress_image.sh

@@ -35,6 +35,8 @@ set -x
 
 # Params:
 #  INTEROP_IMAGE - name of tag of the final interop image
+#  INTEROP_IMAGE_TAG - Optional. If set, the created image will be tagged using
+#    the command: 'docker tag $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG'
 #  BASE_NAME - base name used to locate the base Dockerfile and build script
 #  TTY_FLAG - optional -t flag to make docker allocate tty
 #  BUILD_INTEROP_DOCKER_EXTRA_ARGS - optional args to be passed to the
@@ -77,6 +79,7 @@ CONTAINER_NAME="build_${BASE_NAME}_$(uuidgen)"
   $BASE_IMAGE \
   bash -l /var/local/jenkins/grpc/tools/dockerfile/$BASE_NAME/build_interop_stress.sh \
   && docker commit $CONTAINER_NAME $INTEROP_IMAGE \
+  && ( if [ -n "$INTEROP_IMAGE_REPOSITORY_TAG" ]; then docker tag -f $INTEROP_IMAGE $INTEROP_IMAGE_REPOSITORY_TAG ; fi ) \
   && echo "Successfully built image $INTEROP_IMAGE")
 EXITCODE=$?
 

+ 11 - 0
tools/run_tests/distribtest_targets.py

@@ -96,6 +96,15 @@ class CSharpDistribTest(object):
       return create_jobspec(self.name,
           ['test/distrib/csharp/run_distrib_test.sh'],
           environ={'EXTERNAL_GIT_ROOT': '../../..'})
+    elif self.platform == 'windows':
+      if self.arch == 'x64':
+        environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64',
+                 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
+      else:
+        environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\\Debug'}
+      return create_jobspec(self.name,
+          ['test\\distrib\\csharp\\run_distrib_test.bat'],
+          environ=environ)
     else:
       raise Exception("Not supported yet.")
 
@@ -240,6 +249,8 @@ def targets():
           CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
           CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
           CSharpDistribTest('macos', 'x86'),
+          CSharpDistribTest('windows', 'x86'),
+          CSharpDistribTest('windows', 'x64'),
           PythonDistribTest('linux', 'x64', 'wheezy'),
           PythonDistribTest('linux', 'x64', 'jessie'),
           PythonDistribTest('linux', 'x86', 'jessie'),

+ 2 - 1
tools/run_tests/jobset.py

@@ -384,7 +384,8 @@ class Jobset(object):
                 self._travis,
                 self._add_env)
       self._running.add(job)
-      self.resultset[job.GetSpec().shortname] = []
+      if not self.resultset.has_key(job.GetSpec().shortname):
+        self.resultset[job.GetSpec().shortname] = []
     return True
 
   def reap(self):

+ 556 - 0
tools/run_tests/stress_test/run_stress_tests_on_gke.py

@@ -0,0 +1,556 @@
+#!/usr/bin/env python2.7
+# Copyright 2015-2016, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import argparse
+import datetime
+import os
+import subprocess
+import sys
+import time
+
+stress_test_utils_dir = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '../../gcp/stress_test'))
+sys.path.append(stress_test_utils_dir)
+from stress_test_utils import BigQueryHelper
+
+kubernetes_api_dir = os.path.abspath(os.path.join(
+    os.path.dirname(__file__), '../../gcp/utils'))
+sys.path.append(kubernetes_api_dir)
+
+import kubernetes_api
+
+_GRPC_ROOT = os.path.abspath(os.path.join(
+    os.path.dirname(sys.argv[0]), '../../..'))
+os.chdir(_GRPC_ROOT)
+
+# num of seconds to wait for the GKE image to start and warmup
+_GKE_IMAGE_WARMUP_WAIT_SECS = 60
+
+_SERVER_POD_NAME = 'stress-server'
+_CLIENT_POD_NAME_PREFIX = 'stress-client'
+_DATASET_ID_PREFIX = 'stress_test'
+_SUMMARY_TABLE_ID = 'summary'
+_QPS_TABLE_ID = 'qps'
+
+_DEFAULT_DOCKER_IMAGE_NAME = 'grpc_stress_test'
+
+# The default port on which the kubernetes proxy server is started on localhost
+# (i.e kubectl proxy --port=<port>)
+_DEFAULT_KUBERNETES_PROXY_PORT = 8001
+
+# How frequently should the stress client wrapper script (running inside a GKE
+# container) poll the health of the stress client (also running inside the GKE
+# container) and upload metrics to BigQuery
+_DEFAULT_STRESS_CLIENT_POLL_INTERVAL_SECS = 60
+
+# The default setting for stress test server and client
+_DEFAULT_STRESS_SERVER_PORT = 8080
+_DEFAULT_METRICS_PORT = 8081
+_DEFAULT_TEST_CASES_STR = 'empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1'
+_DEFAULT_NUM_CHANNELS_PER_SERVER = 5
+_DEFAULT_NUM_STUBS_PER_CHANNEL = 10
+_DEFAULT_METRICS_COLLECTION_INTERVAL_SECS = 30
+
+# Number of stress client instances to launch
+_DEFAULT_NUM_CLIENTS = 3
+
+# How frequently should this test monitor the health of Stress clients and
+# Servers running in GKE
+_DEFAULT_TEST_POLL_INTERVAL_SECS = 60
+
+# Default run time for this test (2 hour)
+_DEFAULT_TEST_DURATION_SECS = 7200
+
+# The number of seconds it would take a GKE pod to warm up (i.e get to 'Running'
+# state from the time of creation). Ideally this is something the test should
+# automatically determine by using Kubernetes API to poll the pods status.
+_DEFAULT_GKE_WARMUP_SECS = 60
+
+
+class KubernetesProxy:
+  """ Class to start a proxy on localhost to the Kubernetes API server """
+
+  def __init__(self, api_port):
+    self.port = api_port
+    self.p = None
+    self.started = False
+
+  def start(self):
+    cmd = ['kubectl', 'proxy', '--port=%d' % self.port]
+    self.p = subprocess.Popen(args=cmd)
+    self.started = True
+    time.sleep(2)
+    print '..Started'
+
+  def get_port(self):
+    return self.port
+
+  def is_started(self):
+    return self.started
+
+  def __del__(self):
+    if self.p is not None:
+      print 'Shutting down Kubernetes proxy..'
+      self.p.kill()
+
+
+class TestSettings:
+
+  def __init__(self, build_docker_image, test_poll_interval_secs,
+               test_duration_secs, kubernetes_proxy_port):
+    self.build_docker_image = build_docker_image
+    self.test_poll_interval_secs = test_poll_interval_secs
+    self.test_duration_secs = test_duration_secs
+    self.kubernetes_proxy_port = kubernetes_proxy_port
+
+
+class GkeSettings:
+
+  def __init__(self, project_id, docker_image_name):
+    self.project_id = project_id
+    self.docker_image_name = docker_image_name
+    self.tag_name = 'gcr.io/%s/%s' % (project_id, docker_image_name)
+
+
+class BigQuerySettings:
+
+  def __init__(self, run_id, dataset_id, summary_table_id, qps_table_id):
+    self.run_id = run_id
+    self.dataset_id = dataset_id
+    self.summary_table_id = summary_table_id
+    self.qps_table_id = qps_table_id
+
+
+class StressServerSettings:
+
+  def __init__(self, server_pod_name, server_port):
+    self.server_pod_name = server_pod_name
+    self.server_port = server_port
+
+
+class StressClientSettings:
+
+  def __init__(self, num_clients, client_pod_name_prefix, server_pod_name,
+               server_port, metrics_port, metrics_collection_interval_secs,
+               stress_client_poll_interval_secs, num_channels_per_server,
+               num_stubs_per_channel, test_cases_str):
+    self.num_clients = num_clients
+    self.client_pod_name_prefix = client_pod_name_prefix
+    self.server_pod_name = server_pod_name
+    self.server_port = server_port
+    self.metrics_port = metrics_port
+    self.metrics_collection_interval_secs = metrics_collection_interval_secs
+    self.stress_client_poll_interval_secs = stress_client_poll_interval_secs
+    self.num_channels_per_server = num_channels_per_server
+    self.num_stubs_per_channel = num_stubs_per_channel
+    self.test_cases_str = test_cases_str
+
+    # == Derived properties ==
+    # Note: Client can accept a list of server addresses (a comma separated list
+    # of 'server_name:server_port'). In this case, we only have one server
+    # address to pass
+    self.server_addresses = '%s.default.svc.cluster.local:%d' % (
+        server_pod_name, server_port)
+    self.client_pod_names_list = ['%s-%d' % (client_pod_name_prefix, i)
+                                  for i in range(1, num_clients + 1)]
+
+
+def _build_docker_image(image_name, tag_name):
+  """ Build the docker image and add tag it to the GKE repository """
+  print 'Building docker image: %s' % image_name
+  os.environ['INTEROP_IMAGE'] = image_name
+  os.environ['INTEROP_IMAGE_REPOSITORY_TAG'] = tag_name
+  # Note that 'BASE_NAME' HAS to be 'grpc_interop_stress_cxx' since the script
+  # build_interop_stress_image.sh invokes the following script:
+  #   tools/dockerfile/$BASE_NAME/build_interop_stress.sh
+  os.environ['BASE_NAME'] = 'grpc_interop_stress_cxx'
+  cmd = ['tools/jenkins/build_interop_stress_image.sh']
+  retcode = subprocess.call(args=cmd)
+  if retcode != 0:
+    print 'Error in building docker image'
+    return False
+  return True
+
+
+def _push_docker_image_to_gke_registry(docker_tag_name):
+  """Executes 'gcloud docker push <docker_tag_name>' to push the image to GKE registry"""
+  cmd = ['gcloud', 'docker', 'push', docker_tag_name]
+  print 'Pushing %s to GKE registry..' % docker_tag_name
+  retcode = subprocess.call(args=cmd)
+  if retcode != 0:
+    print 'Error in pushing docker image %s to the GKE registry' % docker_tag_name
+    return False
+  return True
+
+
+def _launch_server(gke_settings, stress_server_settings, bq_settings,
+                   kubernetes_proxy):
+  """ Launches a stress test server instance in GKE cluster """
+  if not kubernetes_proxy.is_started:
+    print 'Kubernetes proxy must be started before calling this function'
+    return False
+
+  # This is the wrapper script that is run in the container. This script runs
+  # the actual stress test server
+  server_cmd_list = ['/var/local/git/grpc/tools/gcp/stress_test/run_server.py']
+
+  # run_server.py does not take any args from the command line. The args are
+  # instead passed via environment variables (see server_env below)
+  server_arg_list = []
+
+  # The parameters to the script run_server.py are injected into the container
+  # via environment variables
+  server_env = {
+      'STRESS_TEST_IMAGE_TYPE': 'SERVER',
+      'STRESS_TEST_IMAGE': '/var/local/git/grpc/bins/opt/interop_server',
+      'STRESS_TEST_ARGS_STR': '--port=%s' % stress_server_settings.server_port,
+      'RUN_ID': bq_settings.run_id,
+      'POD_NAME': stress_server_settings.server_pod_name,
+      'GCP_PROJECT_ID': gke_settings.project_id,
+      'DATASET_ID': bq_settings.dataset_id,
+      'SUMMARY_TABLE_ID': bq_settings.summary_table_id,
+      'QPS_TABLE_ID': bq_settings.qps_table_id
+  }
+
+  # Launch Server
+  is_success = kubernetes_api.create_pod_and_service(
+      'localhost',
+      kubernetes_proxy.get_port(),
+      'default',  # Use 'default' namespace
+      stress_server_settings.server_pod_name,
+      gke_settings.tag_name,
+      [stress_server_settings.server_port],  # Port that should be exposed
+      server_cmd_list,
+      server_arg_list,
+      server_env,
+      True  # Headless = True for server. Since we want DNS records to be created by GKE
+  )
+
+  return is_success
+
+
+def _launch_client(gke_settings, stress_server_settings, stress_client_settings,
+                   bq_settings, kubernetes_proxy):
+  """ Launches a configurable number of stress test clients on GKE cluster """
+  if not kubernetes_proxy.is_started:
+    print 'Kubernetes proxy must be started before calling this function'
+    return False
+
+  stress_client_arg_list = [
+      '--server_addresses=%s' % stress_client_settings.server_addresses,
+      '--test_cases=%s' % stress_client_settings.test_cases_str,
+      '--num_stubs_per_channel=%d' %
+      stress_client_settings.num_stubs_per_channel
+  ]
+
+  # This is the wrapper script that is run in the container. This script runs
+  # the actual stress client
+  client_cmd_list = ['/var/local/git/grpc/tools/gcp/stress_test/run_client.py']
+
+  # run_client.py takes no args. All args are passed as env variables (see
+  # client_env)
+  client_arg_list = []
+
+  metrics_server_address = 'localhost:%d' % stress_client_settings.metrics_port
+  metrics_client_arg_list = [
+      '--metrics_server_address=%s' % metrics_server_address,
+      '--total_only=true'
+  ]
+
+  # The parameters to the script run_client.py are injected into the container
+  # via environment variables
+  client_env = {
+      'STRESS_TEST_IMAGE_TYPE': 'CLIENT',
+      'STRESS_TEST_IMAGE': '/var/local/git/grpc/bins/opt/stress_test',
+      'STRESS_TEST_ARGS_STR': ' '.join(stress_client_arg_list),
+      'METRICS_CLIENT_IMAGE': '/var/local/git/grpc/bins/opt/metrics_client',
+      'METRICS_CLIENT_ARGS_STR': ' '.join(metrics_client_arg_list),
+      'RUN_ID': bq_settings.run_id,
+      'POLL_INTERVAL_SECS':
+          str(stress_client_settings.stress_client_poll_interval_secs),
+      'GCP_PROJECT_ID': gke_settings.project_id,
+      'DATASET_ID': bq_settings.dataset_id,
+      'SUMMARY_TABLE_ID': bq_settings.summary_table_id,
+      'QPS_TABLE_ID': bq_settings.qps_table_id
+  }
+
+  for pod_name in stress_client_settings.client_pod_names_list:
+    client_env['POD_NAME'] = pod_name
+    is_success = kubernetes_api.create_pod_and_service(
+        'localhost',  # Since proxy is running on localhost
+        kubernetes_proxy.get_port(),
+        'default',  # default namespace
+        pod_name,
+        gke_settings.tag_name,
+        [stress_client_settings.metrics_port
+        ],  # Client pods expose metrics port
+        client_cmd_list,
+        client_arg_list,
+        client_env,
+        False  # Client is not a headless service
+    )
+    if not is_success:
+      print 'Error in launching client %s' % pod_name
+      return False
+
+  return True
+
+
+def _launch_server_and_client(gke_settings, stress_server_settings,
+                              stress_client_settings, bq_settings,
+                              kubernetes_proxy_port):
+  # Start kubernetes proxy
+  print 'Kubernetes proxy'
+  kubernetes_proxy = KubernetesProxy(kubernetes_proxy_port)
+  kubernetes_proxy.start()
+
+  print 'Launching server..'
+  is_success = _launch_server(gke_settings, stress_server_settings, bq_settings,
+                              kubernetes_proxy)
+  if not is_success:
+    print 'Error in launching server'
+    return False
+
+  # Server takes a while to start.
+  # TODO(sree) Use Kubernetes API to query the status of the server instead of
+  # sleeping
+  print 'Waiting for %s seconds for the server to start...' % _GKE_IMAGE_WARMUP_WAIT_SECS
+  time.sleep(_GKE_IMAGE_WARMUP_WAIT_SECS)
+
+  # Launch client
+  client_pod_name_prefix = 'stress-client'
+  is_success = _launch_client(gke_settings, stress_server_settings,
+                              stress_client_settings, bq_settings,
+                              kubernetes_proxy)
+
+  if not is_success:
+    print 'Error in launching client(s)'
+    return False
+
+  print 'Waiting for %s seconds for the client images to start...' % _GKE_IMAGE_WARMUP_WAIT_SECS
+  time.sleep(_GKE_IMAGE_WARMUP_WAIT_SECS)
+  return True
+
+
+def _delete_server_and_client(stress_server_settings, stress_client_settings,
+                              kubernetes_proxy_port):
+  kubernetes_proxy = KubernetesProxy(kubernetes_proxy_port)
+  kubernetes_proxy.start()
+
+  # Delete clients first
+  is_success = True
+  for pod_name in stress_client_settings.client_pod_names_list:
+    is_success = kubernetes_api.delete_pod_and_service(
+        'localhost', kubernetes_proxy_port, 'default', pod_name)
+    if not is_success:
+      return False
+
+  # Delete server
+  is_success = kubernetes_api.delete_pod_and_service(
+      'localhost', kubernetes_proxy_port, 'default',
+      stress_server_settings.server_pod_name)
+  return is_success
+
+
+def run_test_main(test_settings, gke_settings, stress_server_settings,
+                  stress_client_clients):
+  is_success = True
+
+  if test_settings.build_docker_image:
+    is_success = _build_docker_image(gke_settings.docker_image_name,
+                                     gke_settings.tag_name)
+    if not is_success:
+      return False
+
+    is_success = _push_docker_image_to_gke_registry(gke_settings.tag_name)
+    if not is_success:
+      return False
+
+  # Create a unique id for this run (Note: Using timestamp instead of UUID to
+  # make it easier to deduce the date/time of the run just by looking at the run
+  # run id. This is useful in debugging when looking at records in Biq query)
+  run_id = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
+  dataset_id = '%s_%s' % (_DATASET_ID_PREFIX, run_id)
+
+  # Big Query settings (common for both Stress Server and Client)
+  bq_settings = BigQuerySettings(run_id, dataset_id, _SUMMARY_TABLE_ID,
+                                 _QPS_TABLE_ID)
+
+  bq_helper = BigQueryHelper(run_id, '', '', args.project_id, dataset_id,
+                             _SUMMARY_TABLE_ID, _QPS_TABLE_ID)
+  bq_helper.initialize()
+
+  try:
+    is_success = _launch_server_and_client(gke_settings, stress_server_settings,
+                                           stress_client_settings, bq_settings,
+                                           test_settings.kubernetes_proxy_port)
+    if not is_success:
+      return False
+
+    start_time = datetime.datetime.now()
+    end_time = start_time + datetime.timedelta(
+        seconds=test_settings.test_duration_secs)
+    print 'Running the test until %s' % end_time.isoformat()
+
+    while True:
+      if datetime.datetime.now() > end_time:
+        print 'Test was run for %d seconds' % test_settings.test_duration_secs
+        break
+
+      # Check if either stress server or clients have failed
+      if bq_helper.check_if_any_tests_failed():
+        is_success = False
+        print 'Some tests failed.'
+        break
+
+      # Things seem to be running fine. Wait until next poll time to check the
+      # status
+      print 'Sleeping for %d seconds..' % test_settings.test_poll_interval_secs
+      time.sleep(test_settings.test_poll_interval_secs)
+
+    # Print BiqQuery tables
+    bq_helper.print_summary_records()
+    bq_helper.print_qps_records()
+
+  finally:
+    # If is_success is False at this point, it means that the stress tests were
+    # started successfully but failed while running the tests. In this case we
+    # do should not delete the pods (since they contain all the failure
+    # information)
+    if is_success:
+      _delete_server_and_client(stress_server_settings, stress_client_settings,
+                                test_settings.kubernetes_proxy_port)
+
+  return is_success
+
+
+argp = argparse.ArgumentParser(
+    description='Launch stress tests in GKE',
+    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+argp.add_argument('--project_id',
+                  required=True,
+                  help='The Google Cloud Platform Project Id')
+argp.add_argument('--num_clients',
+                  default=1,
+                  type=int,
+                  help='Number of client instances to start')
+argp.add_argument('--docker_image_name',
+                  default=_DEFAULT_DOCKER_IMAGE_NAME,
+                  help='The name of the docker image containing stress client '
+                  'and stress servers')
+argp.add_argument('--build_docker_image',
+                  dest='build_docker_image',
+                  action='store_true',
+                  help='Build a docker image and push to Google Container '
+                  'Registry')
+argp.add_argument('--do_not_build_docker_image',
+                  dest='build_docker_image',
+                  action='store_false',
+                  help='Do not build and push docker image to Google Container '
+                  'Registry')
+argp.set_defaults(build_docker_image=True)
+
+argp.add_argument('--test_poll_interval_secs',
+                  default=_DEFAULT_TEST_POLL_INTERVAL_SECS,
+                  type=int,
+                  help='How frequently should this script should monitor the '
+                  'health of stress clients and servers running in the GKE '
+                  'cluster')
+argp.add_argument('--test_duration_secs',
+                  default=_DEFAULT_TEST_DURATION_SECS,
+                  type=int,
+                  help='How long should this test be run')
+argp.add_argument('--kubernetes_proxy_port',
+                  default=_DEFAULT_KUBERNETES_PROXY_PORT,
+                  type=int,
+                  help='The port on which the kubernetes proxy (on localhost)'
+                  ' is started')
+argp.add_argument('--stress_server_port',
+                  default=_DEFAULT_STRESS_SERVER_PORT,
+                  type=int,
+                  help='The port on which the stress server (in GKE '
+                  'containers) listens')
+argp.add_argument('--stress_client_metrics_port',
+                  default=_DEFAULT_METRICS_PORT,
+                  type=int,
+                  help='The port on which the stress clients (in GKE '
+                  'containers) expose metrics')
+argp.add_argument('--stress_client_poll_interval_secs',
+                  default=_DEFAULT_STRESS_CLIENT_POLL_INTERVAL_SECS,
+                  type=int,
+                  help='How frequently should the stress client wrapper script'
+                  ' running inside GKE should monitor health of the actual '
+                  ' stress client process and upload the metrics to BigQuery')
+argp.add_argument('--stress_client_metrics_collection_interval_secs',
+                  default=_DEFAULT_METRICS_COLLECTION_INTERVAL_SECS,
+                  type=int,
+                  help='How frequently should metrics be collected in-memory on'
+                  ' the stress clients (running inside GKE containers). Note '
+                  'that this is NOT the same as the upload-to-BigQuery '
+                  'frequency. The metrics upload frequency is controlled by the'
+                  ' --stress_client_poll_interval_secs flag')
+argp.add_argument('--stress_client_num_channels_per_server',
+                  default=_DEFAULT_NUM_CHANNELS_PER_SERVER,
+                  type=int,
+                  help='The number of channels created to each server from a '
+                  'stress client')
+argp.add_argument('--stress_client_num_stubs_per_channel',
+                  default=_DEFAULT_NUM_STUBS_PER_CHANNEL,
+                  type=int,
+                  help='The number of stubs created per channel. This number '
+                  'indicates the max number of RPCs that can be made in '
+                  'parallel on each channel at any given time')
+argp.add_argument('--stress_client_test_cases',
+                  default=_DEFAULT_TEST_CASES_STR,
+                  help='List of test cases (with weights) to be executed by the'
+                  ' stress test client. The list is in the following format:\n'
+                  '  <testcase_1:w_1,<test_case2:w_2>..<testcase_n:w_n>\n'
+                  ' (Note: The weights do not have to add up to 100)')
+
+if __name__ == '__main__':
+  args = argp.parse_args()
+
+  test_settings = TestSettings(
+      args.build_docker_image, args.test_poll_interval_secs,
+      args.test_duration_secs, args.kubernetes_proxy_port)
+
+  gke_settings = GkeSettings(args.project_id, args.docker_image_name)
+
+  stress_server_settings = StressServerSettings(_SERVER_POD_NAME,
+                                                args.stress_server_port)
+  stress_client_settings = StressClientSettings(
+      args.num_clients, _CLIENT_POD_NAME_PREFIX, _SERVER_POD_NAME,
+      args.stress_server_port, args.stress_client_metrics_port,
+      args.stress_client_metrics_collection_interval_secs,
+      args.stress_client_poll_interval_secs,
+      args.stress_client_num_channels_per_server,
+      args.stress_client_num_stubs_per_channel, args.stress_client_test_cases)
+
+  run_test_main(test_settings, gke_settings, stress_server_settings,
+                stress_client_settings)