| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456 | /* * * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * *     * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. *     * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. *     * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */#include <cinttypes>#include <memory>#include <thread>#include <grpc++/channel.h>#include <grpc++/client_context.h>#include <grpc++/create_channel.h>#include <grpc++/server.h>#include <grpc++/server_builder.h>#include <grpc++/server_context.h>#include <grpc/grpc.h>#include <grpc/support/thd.h>#include <grpc/support/time.h>#include <grpc/support/tls.h>#include <gtest/gtest.h>#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"#include "src/proto/grpc/testing/echo.grpc.pb.h"#include "test/core/util/port.h"#include "test/core/util/test_config.h"#include "test/cpp/util/string_ref_helper.h"#include "test/cpp/util/test_credentials_provider.h"#ifdef GPR_POSIX_SOCKET#include "src/core/lib/iomgr/ev_posix.h"#endifusing grpc::testing::EchoRequest;using grpc::testing::EchoResponse;using grpc::testing::kTlsCredentialsType;using std::chrono::system_clock;GPR_TLS_DECL(g_is_async_end2end_test);namespace grpc {namespace testing {namespace {void* tag(int i) { return (void*)(intptr_t)i; }int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }#ifdef GPR_POSIX_SOCKETstatic int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,                                          int timeout) {  if (gpr_tls_get(&g_is_async_end2end_test)) {    GPR_ASSERT(timeout == 0);  }  return poll(pfds, nfds, timeout);}class PollOverride { public:  PollOverride(grpc_poll_function_type f) {    prev_ = grpc_poll_function;    grpc_poll_function = f;  }  ~PollOverride() { grpc_poll_function = prev_; } private:  grpc_poll_function_type prev_;};class PollingOverrider : public PollOverride { public:  explicit PollingOverrider(bool allow_blocking)      : PollOverride(allow_blocking ? poll : maybe_assert_non_blocking_poll) {}};#elseclass PollingOverrider { public:  explicit PollingOverrider(bool allow_blocking) {}};#endifclass Verifier { public:  explicit Verifier(bool spin) : spin_(spin) {}  // Expect sets the expected ok value for a specific tag  Verifier& Expect(int i, bool expect_ok) {    expectations_[tag(i)] = expect_ok;    return *this;  }  // Next waits for 1 async tag to complete, checks its  // expectations, and returns the tag  int Next(CompletionQueue* cq, bool ignore_ok) {    bool ok;    void* got_tag;    if (spin_) {      for (;;) {        auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));        if (r == CompletionQueue::TIMEOUT) continue;        if (r == CompletionQueue::GOT_EVENT) break;        gpr_log(GPR_ERROR, "unexpected result from AsyncNext");        abort();      }    } else {      EXPECT_TRUE(cq->Next(&got_tag, &ok));    }    auto it = expectations_.find(got_tag);    EXPECT_TRUE(it != expectations_.end());    if (!ignore_ok) {      EXPECT_EQ(it->second, ok);    }    expectations_.erase(it);    return detag(got_tag);  }  // Verify keeps calling Next until all currently set  // expected tags are complete  void Verify(CompletionQueue* cq) { Verify(cq, false); }  // This version of Verify allows optionally ignoring the  // outcome of the expectation  void Verify(CompletionQueue* cq, bool ignore_ok) {    GPR_ASSERT(!expectations_.empty());    while (!expectations_.empty()) {      Next(cq, ignore_ok);    }  }  // This version of Verify stops after a certain deadline  void Verify(CompletionQueue* cq,              std::chrono::system_clock::time_point deadline) {    if (expectations_.empty()) {      bool ok;      void* got_tag;      if (spin_) {        while (std::chrono::system_clock::now() < deadline) {          EXPECT_EQ(              cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME)),              CompletionQueue::TIMEOUT);        }      } else {        EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),                  CompletionQueue::TIMEOUT);      }    } else {      while (!expectations_.empty()) {        bool ok;        void* got_tag;        if (spin_) {          for (;;) {            GPR_ASSERT(std::chrono::system_clock::now() < deadline);            auto r =                cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));            if (r == CompletionQueue::TIMEOUT) continue;            if (r == CompletionQueue::GOT_EVENT) break;            gpr_log(GPR_ERROR, "unexpected result from AsyncNext");            abort();          }        } else {          EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),                    CompletionQueue::GOT_EVENT);        }        auto it = expectations_.find(got_tag);        EXPECT_TRUE(it != expectations_.end());        EXPECT_EQ(it->second, ok);        expectations_.erase(it);      }    }  } private:  std::map<void*, bool> expectations_;  bool spin_;};// This class disables the server builder plugins that may add sync services to// the server. If there are sync services, UnimplementedRpc test will triger// the sync unkown rpc routine on the server side, rather than the async one// that needs to be tested here.class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption { public:  void UpdateArguments(ChannelArguments* arg) GRPC_OVERRIDE {}  void UpdatePlugins(std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins)      GRPC_OVERRIDE {    auto plugin = plugins->begin();    while (plugin != plugins->end()) {      if ((*plugin)->has_sync_methods()) {        plugins->erase(plugin++);      } else {        plugin++;      }    }  }};class TestScenario { public:  TestScenario(bool non_block, const grpc::string& creds_type,               const grpc::string& content)      : disable_blocking(non_block),        credentials_type(creds_type),        message_content(content) {}  void Log() const {    gpr_log(        GPR_INFO,        "Scenario: disable_blocking %d, credentials %s, message size %" PRIuPTR,        disable_blocking, credentials_type.c_str(), message_content.size());  }  bool disable_blocking;  // Although the below grpc::string's are logically const, we can't declare  // them const because of a limitation in the way old compilers (e.g., gcc-4.4)  // manage vector insertion using a copy constructor  grpc::string credentials_type;  grpc::string message_content;};class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> { protected:  AsyncEnd2endTest() { GetParam().Log(); }  void SetUp() GRPC_OVERRIDE {    poll_overrider_.reset(new PollingOverrider(!GetParam().disable_blocking));    port_ = grpc_pick_unused_port_or_die();    server_address_ << "localhost:" << port_;    // Setup server    ServerBuilder builder;    auto server_creds = GetServerCredentials(GetParam().credentials_type);    builder.AddListeningPort(server_address_.str(), server_creds);    builder.RegisterService(&service_);    cq_ = builder.AddCompletionQueue();    // TODO(zyc): make a test option to choose wheather sync plugins should be    // deleted    std::unique_ptr<ServerBuilderOption> sync_plugin_disabler(        new ServerBuilderSyncPluginDisabler());    builder.SetOption(move(sync_plugin_disabler));    server_ = builder.BuildAndStart();    gpr_tls_set(&g_is_async_end2end_test, 1);  }  void TearDown() GRPC_OVERRIDE {    server_->Shutdown();    void* ignored_tag;    bool ignored_ok;    cq_->Shutdown();    while (cq_->Next(&ignored_tag, &ignored_ok))      ;    poll_overrider_.reset();    gpr_tls_set(&g_is_async_end2end_test, 0);    grpc_recycle_unused_port(port_);  }  void ResetStub() {    ChannelArguments args;    auto channel_creds =        GetChannelCredentials(GetParam().credentials_type, &args);    std::shared_ptr<Channel> channel =        CreateCustomChannel(server_address_.str(), channel_creds, args);    stub_ = grpc::testing::EchoTestService::NewStub(channel);  }  void SendRpc(int num_rpcs) {    for (int i = 0; i < num_rpcs; i++) {      EchoRequest send_request;      EchoRequest recv_request;      EchoResponse send_response;      EchoResponse recv_response;      Status recv_status;      ClientContext cli_ctx;      ServerContext srv_ctx;      grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);      send_request.set_message(GetParam().message_content);      std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(          stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));      service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                           cq_.get(), tag(2));      Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());      EXPECT_EQ(send_request.message(), recv_request.message());      send_response.set_message(recv_request.message());      response_writer.Finish(send_response, Status::OK, tag(3));      response_reader->Finish(&recv_response, &recv_status, tag(4));      Verifier(GetParam().disable_blocking)          .Expect(3, true)          .Expect(4, true)          .Verify(cq_.get());      EXPECT_EQ(send_response.message(), recv_response.message());      EXPECT_TRUE(recv_status.ok());    }  }  std::unique_ptr<ServerCompletionQueue> cq_;  std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;  std::unique_ptr<Server> server_;  grpc::testing::EchoTestService::AsyncService service_;  std::ostringstream server_address_;  int port_;  std::unique_ptr<PollingOverrider> poll_overrider_;};TEST_P(AsyncEnd2endTest, SimpleRpc) {  ResetStub();  SendRpc(1);}TEST_P(AsyncEnd2endTest, SequentialRpcs) {  ResetStub();  SendRpc(10);}// We do not need to protect notify because the use is synchronized.void ServerWait(Server* server, int* notify) {  server->Wait();  *notify = 1;}TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {  int notify = 0;  std::thread* wait_thread =      new std::thread(&ServerWait, server_.get(), ¬ify);  ResetStub();  SendRpc(1);  EXPECT_EQ(0, notify);  server_->Shutdown();  wait_thread->join();  EXPECT_EQ(1, notify);  delete wait_thread;}TEST_P(AsyncEnd2endTest, ShutdownThenWait) {  ResetStub();  SendRpc(1);  server_->Shutdown();  server_->Wait();}// Test a simple RPC using the async version of NextTEST_P(AsyncEnd2endTest, AsyncNextRpc) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  std::chrono::system_clock::time_point time_now(      std::chrono::system_clock::now());  std::chrono::system_clock::time_point time_limit(      std::chrono::system_clock::now() + std::chrono::seconds(10));  Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);  Verifier(GetParam().disable_blocking).Verify(cq_.get(), time_now);  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking)      .Expect(2, true)      .Verify(cq_.get(), time_limit);  EXPECT_EQ(send_request.message(), recv_request.message());  send_response.set_message(recv_request.message());  response_writer.Finish(send_response, Status::OK, tag(3));  response_reader->Finish(&recv_response, &recv_status, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Verify(cq_.get(), std::chrono::system_clock::time_point::max());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());}// Two pings and a final pong.TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(      stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));  service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),                                tag(2));  Verifier(GetParam().disable_blocking)      .Expect(2, true)      .Expect(1, true)      .Verify(cq_.get());  cli_stream->Write(send_request, tag(3));  srv_stream.Read(&recv_request, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  cli_stream->Write(send_request, tag(5));  srv_stream.Read(&recv_request, tag(6));  Verifier(GetParam().disable_blocking)      .Expect(5, true)      .Expect(6, true)      .Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  cli_stream->WritesDone(tag(7));  srv_stream.Read(&recv_request, tag(8));  Verifier(GetParam().disable_blocking)      .Expect(7, true)      .Expect(8, false)      .Verify(cq_.get());  send_response.set_message(recv_request.message());  srv_stream.Finish(send_response, Status::OK, tag(9));  cli_stream->Finish(&recv_status, tag(10));  Verifier(GetParam().disable_blocking)      .Expect(9, true)      .Expect(10, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());}// One ping, two pongs.TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(      stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));  service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,                                 cq_.get(), cq_.get(), tag(2));  Verifier(GetParam().disable_blocking)      .Expect(1, true)      .Expect(2, true)      .Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  send_response.set_message(recv_request.message());  srv_stream.Write(send_response, tag(3));  cli_stream->Read(&recv_response, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  srv_stream.Write(send_response, tag(5));  cli_stream->Read(&recv_response, tag(6));  Verifier(GetParam().disable_blocking)      .Expect(5, true)      .Expect(6, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  srv_stream.Finish(Status::OK, tag(7));  cli_stream->Read(&recv_response, tag(8));  Verifier(GetParam().disable_blocking)      .Expect(7, true)      .Expect(8, false)      .Verify(cq_.get());  cli_stream->Finish(&recv_status, tag(9));  Verifier(GetParam().disable_blocking).Expect(9, true).Verify(cq_.get());  EXPECT_TRUE(recv_status.ok());}// One ping, one pong.TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>      cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));  service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),                             tag(2));  Verifier(GetParam().disable_blocking)      .Expect(1, true)      .Expect(2, true)      .Verify(cq_.get());  cli_stream->Write(send_request, tag(3));  srv_stream.Read(&recv_request, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  send_response.set_message(recv_request.message());  srv_stream.Write(send_response, tag(5));  cli_stream->Read(&recv_response, tag(6));  Verifier(GetParam().disable_blocking)      .Expect(5, true)      .Expect(6, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  cli_stream->WritesDone(tag(7));  srv_stream.Read(&recv_request, tag(8));  Verifier(GetParam().disable_blocking)      .Expect(7, true)      .Expect(8, false)      .Verify(cq_.get());  srv_stream.Finish(Status::OK, tag(9));  cli_stream->Finish(&recv_status, tag(10));  Verifier(GetParam().disable_blocking)      .Expect(9, true)      .Expect(10, true)      .Verify(cq_.get());  EXPECT_TRUE(recv_status.ok());}// Metadata testsTEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::pair<grpc::string, grpc::string> meta1("key1", "val1");  std::pair<grpc::string, grpc::string> meta2("key2", "val2");  std::pair<grpc::string, grpc::string> meta3("g.r.d-bin", "xyz");  cli_ctx.AddMetadata(meta1.first, meta1.second);  cli_ctx.AddMetadata(meta2.first, meta2.second);  cli_ctx.AddMetadata(meta3.first, meta3.second);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  auto client_initial_metadata = srv_ctx.client_metadata();  EXPECT_EQ(meta1.second,            ToString(client_initial_metadata.find(meta1.first)->second));  EXPECT_EQ(meta2.second,            ToString(client_initial_metadata.find(meta2.first)->second));  EXPECT_EQ(meta3.second,            ToString(client_initial_metadata.find(meta3.first)->second));  EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));  send_response.set_message(recv_request.message());  response_writer.Finish(send_response, Status::OK, tag(3));  response_reader->Finish(&recv_response, &recv_status, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());}TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::pair<grpc::string, grpc::string> meta1("key1", "val1");  std::pair<grpc::string, grpc::string> meta2("key2", "val2");  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  srv_ctx.AddInitialMetadata(meta1.first, meta1.second);  srv_ctx.AddInitialMetadata(meta2.first, meta2.second);  response_writer.SendInitialMetadata(tag(3));  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());  response_reader->ReadInitialMetadata(tag(4));  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());  auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();  EXPECT_EQ(meta1.second,            ToString(server_initial_metadata.find(meta1.first)->second));  EXPECT_EQ(meta2.second,            ToString(server_initial_metadata.find(meta2.first)->second));  EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());  send_response.set_message(recv_request.message());  response_writer.Finish(send_response, Status::OK, tag(5));  response_reader->Finish(&recv_response, &recv_status, tag(6));  Verifier(GetParam().disable_blocking)      .Expect(5, true)      .Expect(6, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());}TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::pair<grpc::string, grpc::string> meta1("key1", "val1");  std::pair<grpc::string, grpc::string> meta2("key2", "val2");  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  response_writer.SendInitialMetadata(tag(3));  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());  send_response.set_message(recv_request.message());  srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);  srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);  response_writer.Finish(send_response, Status::OK, tag(4));  response_reader->Finish(&recv_response, &recv_status, tag(5));  Verifier(GetParam().disable_blocking)      .Expect(4, true)      .Expect(5, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());  auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();  EXPECT_EQ(meta1.second,            ToString(server_trailing_metadata.find(meta1.first)->second));  EXPECT_EQ(meta2.second,            ToString(server_trailing_metadata.find(meta2.first)->second));  EXPECT_EQ(static_cast<size_t>(2), server_trailing_metadata.size());}TEST_P(AsyncEnd2endTest, MetadataRpc) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::pair<grpc::string, grpc::string> meta1("key1", "val1");  std::pair<grpc::string, grpc::string> meta2(      "key2-bin",      grpc::string("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13));  std::pair<grpc::string, grpc::string> meta3("key3", "val3");  std::pair<grpc::string, grpc::string> meta6(      "key4-bin",      grpc::string("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",                   14));  std::pair<grpc::string, grpc::string> meta5("key5", "val5");  std::pair<grpc::string, grpc::string> meta4(      "key6-bin",      grpc::string(          "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15));  cli_ctx.AddMetadata(meta1.first, meta1.second);  cli_ctx.AddMetadata(meta2.first, meta2.second);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  auto client_initial_metadata = srv_ctx.client_metadata();  EXPECT_EQ(meta1.second,            ToString(client_initial_metadata.find(meta1.first)->second));  EXPECT_EQ(meta2.second,            ToString(client_initial_metadata.find(meta2.first)->second));  EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));  srv_ctx.AddInitialMetadata(meta3.first, meta3.second);  srv_ctx.AddInitialMetadata(meta4.first, meta4.second);  response_writer.SendInitialMetadata(tag(3));  Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());  response_reader->ReadInitialMetadata(tag(4));  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());  auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();  EXPECT_EQ(meta3.second,            ToString(server_initial_metadata.find(meta3.first)->second));  EXPECT_EQ(meta4.second,            ToString(server_initial_metadata.find(meta4.first)->second));  EXPECT_GE(server_initial_metadata.size(), static_cast<size_t>(2));  send_response.set_message(recv_request.message());  srv_ctx.AddTrailingMetadata(meta5.first, meta5.second);  srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);  response_writer.Finish(send_response, Status::OK, tag(5));  response_reader->Finish(&recv_response, &recv_status, tag(6));  Verifier(GetParam().disable_blocking)      .Expect(5, true)      .Expect(6, true)      .Verify(cq_.get());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());  auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();  EXPECT_EQ(meta5.second,            ToString(server_trailing_metadata.find(meta5.first)->second));  EXPECT_EQ(meta6.second,            ToString(server_trailing_metadata.find(meta6.first)->second));  EXPECT_GE(server_trailing_metadata.size(), static_cast<size_t>(2));}// Server uses AsyncNotifyWhenDone API to check for cancellationTEST_P(AsyncEnd2endTest, ServerCheckCancellation) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  srv_ctx.AsyncNotifyWhenDone(tag(5));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  cli_ctx.TryCancel();  Verifier(GetParam().disable_blocking).Expect(5, true).Verify(cq_.get());  EXPECT_TRUE(srv_ctx.IsCancelled());  response_reader->Finish(&recv_response, &recv_status, tag(4));  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());  EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());}// Server uses AsyncNotifyWhenDone API to check for normal finishTEST_P(AsyncEnd2endTest, ServerCheckDone) {  ResetStub();  EchoRequest send_request;  EchoRequest recv_request;  EchoResponse send_response;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  ServerContext srv_ctx;  grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));  srv_ctx.AsyncNotifyWhenDone(tag(5));  service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),                       cq_.get(), tag(2));  Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());  EXPECT_EQ(send_request.message(), recv_request.message());  send_response.set_message(recv_request.message());  response_writer.Finish(send_response, Status::OK, tag(3));  response_reader->Finish(&recv_response, &recv_status, tag(4));  Verifier(GetParam().disable_blocking)      .Expect(3, true)      .Expect(4, true)      .Expect(5, true)      .Verify(cq_.get());  EXPECT_FALSE(srv_ctx.IsCancelled());  EXPECT_EQ(send_response.message(), recv_response.message());  EXPECT_TRUE(recv_status.ok());}TEST_P(AsyncEnd2endTest, UnimplementedRpc) {  ChannelArguments args;  auto channel_creds =      GetChannelCredentials(GetParam().credentials_type, &args);  std::shared_ptr<Channel> channel =      CreateCustomChannel(server_address_.str(), channel_creds, args);  std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;  stub = grpc::testing::UnimplementedEchoService::NewStub(channel);  EchoRequest send_request;  EchoResponse recv_response;  Status recv_status;  ClientContext cli_ctx;  send_request.set_message(GetParam().message_content);  std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(      stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));  response_reader->Finish(&recv_response, &recv_status, tag(4));  Verifier(GetParam().disable_blocking).Expect(4, true).Verify(cq_.get());  EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());  EXPECT_EQ("", recv_status.error_message());}// This class is for testing scenarios where RPCs are cancelled on the server// by calling ServerContext::TryCancel(). Server uses AsyncNotifyWhenDone// API to check for cancellationclass AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest { protected:  typedef enum {    DO_NOT_CANCEL = 0,    CANCEL_BEFORE_PROCESSING,    CANCEL_DURING_PROCESSING,    CANCEL_AFTER_PROCESSING  } ServerTryCancelRequestPhase;  // Helper for testing client-streaming RPCs which are cancelled on the server.  // Depending on the value of server_try_cancel parameter, this will test one  // of the following three scenarios:  //   CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading  //   any messages from the client  //  //   CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading  //   messages from the client  //  //   CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all  //   messages from the client (but before sending any status back to the  //   client)  void TestClientStreamingServerCancel(      ServerTryCancelRequestPhase server_try_cancel) {    ResetStub();    EchoRequest send_request;    EchoRequest recv_request;    EchoResponse send_response;    EchoResponse recv_response;    Status recv_status;    ClientContext cli_ctx;    ServerContext srv_ctx;    ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);    // Initiate the 'RequestStream' call on client    std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(        stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());    // On the server, request to be notified of 'RequestStream' calls    // and receive the 'RequestStream' call just made by the client    srv_ctx.AsyncNotifyWhenDone(tag(11));    service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),                                  tag(2));    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());    // Client sends 3 messages (tags 3, 4 and 5)    for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {      send_request.set_message("Ping " + grpc::to_string(tag_idx));      cli_stream->Write(send_request, tag(tag_idx));      Verifier(GetParam().disable_blocking)          .Expect(tag_idx, true)          .Verify(cq_.get());    }    cli_stream->WritesDone(tag(6));    Verifier(GetParam().disable_blocking).Expect(6, true).Verify(cq_.get());    bool expected_server_cq_result = true;    bool ignore_cq_result = false;    bool want_done_tag = false;    if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {      srv_ctx.TryCancel();      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      // Since cancellation is done before server reads any results, we know      // for sure that all cq results will return false from this point forward      expected_server_cq_result = false;    }    std::thread* server_try_cancel_thd = NULL;    auto verif = Verifier(GetParam().disable_blocking);    if (server_try_cancel == CANCEL_DURING_PROCESSING) {      server_try_cancel_thd =          new std::thread(&ServerContext::TryCancel, &srv_ctx);      // Server will cancel the RPC in a parallel thread while reading the      // requests from the client. Since the cancellation can happen at anytime,      // some of the cq results (i.e those until cancellation) might be true but      // its non deterministic. So better to ignore the cq results      ignore_cq_result = true;      // Expect that we might possibly see the done tag that      // indicates cancellation completion in this case      want_done_tag = true;      verif.Expect(11, true);    }    // Server reads 3 messages (tags 6, 7 and 8)    // But if want_done_tag is true, we might also see tag 11    for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {      srv_stream.Read(&recv_request, tag(tag_idx));      // Note that we'll add something to the verifier and verify that      // something was seen, but it might be tag 11 and not what we      // just added      int got_tag = verif.Expect(tag_idx, expected_server_cq_result)                        .Next(cq_.get(), ignore_cq_result);      GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));      if (got_tag == 11) {        EXPECT_TRUE(srv_ctx.IsCancelled());        want_done_tag = false;        // Now get the other entry that we were waiting on        EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);      }    }    if (server_try_cancel_thd != NULL) {      server_try_cancel_thd->join();      delete server_try_cancel_thd;    }    if (server_try_cancel == CANCEL_AFTER_PROCESSING) {      srv_ctx.TryCancel();      want_done_tag = true;      verif.Expect(11, true);    }    if (want_done_tag) {      verif.Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;    }    // The RPC has been cancelled at this point for sure (i.e irrespective of    // the value of `server_try_cancel` is). So, from this point forward, we    // know that cq results are supposed to return false on server.    // Server sends the final message and cancelled status (but the RPC is    // already cancelled at this point. So we expect the operation to fail)    srv_stream.Finish(send_response, Status::CANCELLED, tag(9));    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());    // Client will see the cancellation    cli_stream->Finish(&recv_status, tag(10));    Verifier(GetParam().disable_blocking).Expect(10, true).Verify(cq_.get());    EXPECT_FALSE(recv_status.ok());    EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());  }  // Helper for testing server-streaming RPCs which are cancelled on the server.  // Depending on the value of server_try_cancel parameter, this will test one  // of the following three scenarios:  //   CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before sending  //   any messages to the client  //  //   CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while sending  //   messages to the client  //  //   CANCEL_AFTER PROCESSING: Rpc is cancelled by server after sending all  //   messages to the client (but before sending any status back to the  //   client)  void TestServerStreamingServerCancel(      ServerTryCancelRequestPhase server_try_cancel) {    ResetStub();    EchoRequest send_request;    EchoRequest recv_request;    EchoResponse send_response;    EchoResponse recv_response;    Status recv_status;    ClientContext cli_ctx;    ServerContext srv_ctx;    ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);    send_request.set_message("Ping");    // Initiate the 'ResponseStream' call on the client    std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(        stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());    // On the server, request to be notified of 'ResponseStream' calls and    // receive the call just made by the client    srv_ctx.AsyncNotifyWhenDone(tag(11));    service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,                                   cq_.get(), cq_.get(), tag(2));    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());    EXPECT_EQ(send_request.message(), recv_request.message());    bool expected_cq_result = true;    bool ignore_cq_result = false;    bool want_done_tag = false;    if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {      srv_ctx.TryCancel();      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      // We know for sure that all cq results will be false from this point      // since the server cancelled the RPC      expected_cq_result = false;    }    std::thread* server_try_cancel_thd = NULL;    auto verif = Verifier(GetParam().disable_blocking);    if (server_try_cancel == CANCEL_DURING_PROCESSING) {      server_try_cancel_thd =          new std::thread(&ServerContext::TryCancel, &srv_ctx);      // Server will cancel the RPC in a parallel thread while writing responses      // to the client. Since the cancellation can happen at anytime, some of      // the cq results (i.e those until cancellation) might be true but it is      // non deterministic. So better to ignore the cq results      ignore_cq_result = true;      // Expect that we might possibly see the done tag that      // indicates cancellation completion in this case      want_done_tag = true;      verif.Expect(11, true);    }    // Server sends three messages (tags 3, 4 and 5)    // But if want_done tag is true, we might also see tag 11    for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {      send_response.set_message("Pong " + grpc::to_string(tag_idx));      srv_stream.Write(send_response, tag(tag_idx));      // Note that we'll add something to the verifier and verify that      // something was seen, but it might be tag 11 and not what we      // just added      int got_tag = verif.Expect(tag_idx, expected_cq_result)                        .Next(cq_.get(), ignore_cq_result);      GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));      if (got_tag == 11) {        EXPECT_TRUE(srv_ctx.IsCancelled());        want_done_tag = false;        // Now get the other entry that we were waiting on        EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);      }    }    if (server_try_cancel_thd != NULL) {      server_try_cancel_thd->join();      delete server_try_cancel_thd;    }    if (server_try_cancel == CANCEL_AFTER_PROCESSING) {      srv_ctx.TryCancel();      want_done_tag = true;      verif.Expect(11, true);      // Client reads may fail bacause it is notified that the stream is      // cancelled.      ignore_cq_result = true;    }    if (want_done_tag) {      verif.Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;    }    // Client attemts to read the three messages from the server    for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {      cli_stream->Read(&recv_response, tag(tag_idx));      Verifier(GetParam().disable_blocking)          .Expect(tag_idx, expected_cq_result)          .Verify(cq_.get(), ignore_cq_result);    }    // The RPC has been cancelled at this point for sure (i.e irrespective of    // the value of `server_try_cancel` is). So, from this point forward, we    // know that cq results are supposed to return false on server.    // Server finishes the stream (but the RPC is already cancelled)    srv_stream.Finish(Status::CANCELLED, tag(9));    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());    // Client will see the cancellation    cli_stream->Finish(&recv_status, tag(10));    Verifier(GetParam().disable_blocking).Expect(10, true).Verify(cq_.get());    EXPECT_FALSE(recv_status.ok());    EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());  }  // Helper for testing bidirectinal-streaming RPCs which are cancelled on the  // server.  //  // Depending on the value of server_try_cancel parameter, this will  // test one of the following three scenarios:  //   CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/  //   writing any messages from/to the client  //  //   CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading  //   messages from the client  //  //   CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all  //   messages from the client (but before sending any status back to the  //   client)  void TestBidiStreamingServerCancel(      ServerTryCancelRequestPhase server_try_cancel) {    ResetStub();    EchoRequest send_request;    EchoRequest recv_request;    EchoResponse send_response;    EchoResponse recv_response;    Status recv_status;    ClientContext cli_ctx;    ServerContext srv_ctx;    ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);    // Initiate the call from the client side    std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>        cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));    Verifier(GetParam().disable_blocking).Expect(1, true).Verify(cq_.get());    // On the server, request to be notified of the 'BidiStream' call and    // receive the call just made by the client    srv_ctx.AsyncNotifyWhenDone(tag(11));    service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),                               tag(2));    Verifier(GetParam().disable_blocking).Expect(2, true).Verify(cq_.get());    // Client sends the first and the only message    send_request.set_message("Ping");    cli_stream->Write(send_request, tag(3));    Verifier(GetParam().disable_blocking).Expect(3, true).Verify(cq_.get());    bool expected_cq_result = true;    bool ignore_cq_result = false;    bool want_done_tag = false;    if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {      srv_ctx.TryCancel();      Verifier(GetParam().disable_blocking).Expect(11, true).Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      // We know for sure that all cq results will be false from this point      // since the server cancelled the RPC      expected_cq_result = false;    }    std::thread* server_try_cancel_thd = NULL;    auto verif = Verifier(GetParam().disable_blocking);    if (server_try_cancel == CANCEL_DURING_PROCESSING) {      server_try_cancel_thd =          new std::thread(&ServerContext::TryCancel, &srv_ctx);      // Since server is going to cancel the RPC in a parallel thread, some of      // the cq results (i.e those until the cancellation) might be true. Since      // that number is non-deterministic, it is better to ignore the cq results      ignore_cq_result = true;      // Expect that we might possibly see the done tag that      // indicates cancellation completion in this case      want_done_tag = true;      verif.Expect(11, true);    }    int got_tag;    srv_stream.Read(&recv_request, tag(4));    verif.Expect(4, expected_cq_result);    got_tag = verif.Next(cq_.get(), ignore_cq_result);    GPR_ASSERT((got_tag == 4) || (got_tag == 11 && want_done_tag));    if (got_tag == 11) {      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;      // Now get the other entry that we were waiting on      EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 4);    }    send_response.set_message("Pong");    srv_stream.Write(send_response, tag(5));    verif.Expect(5, expected_cq_result);    got_tag = verif.Next(cq_.get(), ignore_cq_result);    GPR_ASSERT((got_tag == 5) || (got_tag == 11 && want_done_tag));    if (got_tag == 11) {      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;      // Now get the other entry that we were waiting on      EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 5);    }    cli_stream->Read(&recv_response, tag(6));    verif.Expect(6, expected_cq_result);    got_tag = verif.Next(cq_.get(), ignore_cq_result);    GPR_ASSERT((got_tag == 6) || (got_tag == 11 && want_done_tag));    if (got_tag == 11) {      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;      // Now get the other entry that we were waiting on      EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 6);    }    // This is expected to succeed in all cases    cli_stream->WritesDone(tag(7));    verif.Expect(7, true);    got_tag = verif.Next(cq_.get(), ignore_cq_result);    GPR_ASSERT((got_tag == 7) || (got_tag == 11 && want_done_tag));    if (got_tag == 11) {      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;      // Now get the other entry that we were waiting on      EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 7);    }    // This is expected to fail in all cases i.e for all values of    // server_try_cancel. This is because at this point, either there are no    // more msgs from the client (because client called WritesDone) or the RPC    // is cancelled on the server    srv_stream.Read(&recv_request, tag(8));    verif.Expect(8, false);    got_tag = verif.Next(cq_.get(), ignore_cq_result);    GPR_ASSERT((got_tag == 8) || (got_tag == 11 && want_done_tag));    if (got_tag == 11) {      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;      // Now get the other entry that we were waiting on      EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 8);    }    if (server_try_cancel_thd != NULL) {      server_try_cancel_thd->join();      delete server_try_cancel_thd;    }    if (server_try_cancel == CANCEL_AFTER_PROCESSING) {      srv_ctx.TryCancel();      want_done_tag = true;      verif.Expect(11, true);    }    if (want_done_tag) {      verif.Verify(cq_.get());      EXPECT_TRUE(srv_ctx.IsCancelled());      want_done_tag = false;    }    // The RPC has been cancelled at this point for sure (i.e irrespective of    // the value of `server_try_cancel` is). So, from this point forward, we    // know that cq results are supposed to return false on server.    srv_stream.Finish(Status::CANCELLED, tag(9));    Verifier(GetParam().disable_blocking).Expect(9, false).Verify(cq_.get());    cli_stream->Finish(&recv_status, tag(10));    Verifier(GetParam().disable_blocking).Expect(10, true).Verify(cq_.get());    EXPECT_FALSE(recv_status.ok());    EXPECT_EQ(grpc::StatusCode::CANCELLED, recv_status.error_code());  }};TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelBefore) {  TestClientStreamingServerCancel(CANCEL_BEFORE_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelDuring) {  TestClientStreamingServerCancel(CANCEL_DURING_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelAfter) {  TestClientStreamingServerCancel(CANCEL_AFTER_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelBefore) {  TestServerStreamingServerCancel(CANCEL_BEFORE_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelDuring) {  TestServerStreamingServerCancel(CANCEL_DURING_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelAfter) {  TestServerStreamingServerCancel(CANCEL_AFTER_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelBefore) {  TestBidiStreamingServerCancel(CANCEL_BEFORE_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelDuring) {  TestBidiStreamingServerCancel(CANCEL_DURING_PROCESSING);}TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) {  TestBidiStreamingServerCancel(CANCEL_AFTER_PROCESSING);}std::vector<TestScenario> CreateTestScenarios(bool test_disable_blocking,                                              bool test_secure,                                              int test_big_limit) {  std::vector<TestScenario> scenarios;  std::vector<grpc::string> credentials_types;  std::vector<grpc::string> messages;  credentials_types.push_back(kInsecureCredentialsType);  auto sec_list = GetSecureCredentialsTypeList();  for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {    credentials_types.push_back(*sec);  }  messages.push_back("Hello");  for (int sz = 1; sz < test_big_limit; sz *= 2) {    grpc::string big_msg;    for (int i = 0; i < sz * 1024; i++) {      char c = 'a' + (i % 26);      big_msg += c;    }    messages.push_back(big_msg);  }  for (auto cred = credentials_types.begin(); cred != credentials_types.end();       ++cred) {    for (auto msg = messages.begin(); msg != messages.end(); msg++) {      scenarios.emplace_back(false, *cred, *msg);      if (test_disable_blocking) {        scenarios.emplace_back(true, *cred, *msg);      }    }  }  return scenarios;}INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest,                        ::testing::ValuesIn(CreateTestScenarios(true, true,                                                                1024)));INSTANTIATE_TEST_CASE_P(AsyncEnd2endServerTryCancel,                        AsyncEnd2endServerTryCancelTest,                        ::testing::ValuesIn(CreateTestScenarios(false, false,                                                                0)));}  // namespace}  // namespace testing}  // namespace grpcint main(int argc, char** argv) {  grpc_test_init(argc, argv);  gpr_tls_init(&g_is_async_end2end_test);  ::testing::InitGoogleTest(&argc, argv);  int ret = RUN_ALL_TESTS();  gpr_tls_destroy(&g_is_async_end2end_test);  return ret;}
 |