Эх сурвалжийг харах

Merge remote-tracking branch 'upstream/master' into libuv_em_basic

Guantao Liu 5 жил өмнө
parent
commit
5d24c1c6e2
100 өөрчлөгдсөн 3308 нэмэгдсэн , 1954 устгасан
  1. 1 1
      .github/CODEOWNERS
  2. 1 1
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 1 1
      .github/ISSUE_TEMPLATE/cleanup_request.md
  4. 1 1
      .github/ISSUE_TEMPLATE/feature_request.md
  5. 1 1
      .github/pull_request_template.md
  6. 2 0
      .gitignore
  7. 5 2
      BUILD
  8. 6 4
      BUILD.gn
  9. 103 19
      CMakeLists.txt
  10. 109 14
      Makefile
  11. 0 12
      OWNERS
  12. 5 1
      bazel/grpc_build_system.bzl
  13. 8 3
      bazel/grpc_deps.bzl
  14. 29 2
      build.yaml
  15. 28 0
      cmake/abseil-cpp.cmake
  16. 2 1
      cmake/benchmark.cmake
  17. 2 1
      cmake/gflags.cmake
  18. 1 2
      config.m4
  19. 1 2
      config.w32
  20. 12 1
      doc/core/moving-to-c++.md
  21. 96 100
      examples/objective-c/route_guide/ViewControllers.m
  22. 5 0
      gRPC-C++.podspec
  23. 3 2
      gRPC-Core.podspec
  24. 2 2
      grpc.gemspec
  25. 8 12
      grpc.gyp
  26. 4 0
      include/grpc/impl/codegen/grpc_types.h
  27. 0 1
      include/grpcpp/channel_impl.h
  28. 34 34
      include/grpcpp/impl/codegen/async_generic_service.h
  29. 13 5
      include/grpcpp/impl/codegen/callback_common.h
  30. 17 17
      include/grpcpp/impl/codegen/client_callback_impl.h
  31. 13 1
      include/grpcpp/impl/codegen/client_context_impl.h
  32. 4 2
      include/grpcpp/impl/codegen/completion_queue_impl.h
  33. 25 12
      include/grpcpp/impl/codegen/method_handler_impl.h
  34. 7 4
      include/grpcpp/impl/codegen/rpc_service_method.h
  35. 5 7
      include/grpcpp/impl/codegen/server_callback.h
  36. 814 0
      include/grpcpp/impl/codegen/server_callback_handlers.h
  37. 425 860
      include/grpcpp/impl/codegen/server_callback_impl.h
  38. 8 0
      include/grpcpp/impl/codegen/server_context.h
  39. 241 32
      include/grpcpp/impl/codegen/server_context_impl.h
  40. 8 6
      include/grpcpp/impl/codegen/server_interceptor.h
  41. 0 2
      include/grpcpp/server_impl.h
  42. 55 0
      include/grpcpp/test/default_reactor_test_peer.h
  43. 2 2
      package.xml
  44. 52 42
      src/compiler/cpp_generator.cc
  45. 0 2
      src/core/ext/filters/client_channel/OWNERS
  46. 2 2
      src/core/ext/filters/client_channel/client_channel.cc
  47. 3 3
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  48. 0 41
      src/core/ext/filters/client_channel/connector.cc
  49. 40 45
      src/core/ext/filters/client_channel/connector.h
  50. 107 116
      src/core/ext/filters/client_channel/http_proxy.cc
  51. 5 1
      src/core/ext/filters/client_channel/http_proxy.h
  52. 0 48
      src/core/ext/filters/client_channel/proxy_mapper.cc
  53. 14 34
      src/core/ext/filters/client_channel/proxy_mapper.h
  54. 46 79
      src/core/ext/filters/client_channel/proxy_mapper_registry.cc
  55. 23 17
      src/core/ext/filters/client_channel/proxy_mapper_registry.h
  56. 14 15
      src/core/ext/filters/client_channel/subchannel.cc
  57. 4 4
      src/core/ext/filters/client_channel/subchannel.h
  58. 3 4
      src/core/ext/filters/client_idle/client_idle_filter.cc
  59. 5 0
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  60. 134 174
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  61. 31 1
      src/core/ext/transport/chttp2/client/chttp2_connector.h
  62. 2 3
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  63. 2 3
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
  64. 1 1
      src/core/lib/iomgr/endpoint_pair_posix.cc
  65. 103 0
      src/core/lib/iomgr/logical_thread.cc
  66. 52 0
      src/core/lib/iomgr/logical_thread.h
  67. 13 0
      src/core/lib/iomgr/pollset_windows.cc
  68. 3 0
      src/core/lib/iomgr/port.h
  69. 1 1
      src/core/lib/iomgr/udp_server.cc
  70. 1 1
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  71. 2 2
      src/core/lib/security/security_connector/alts/alts_security_connector.cc
  72. 1 0
      src/core/lib/security/security_connector/alts/alts_security_connector.h
  73. 2 1
      src/core/lib/surface/completion_queue.cc
  74. 2 1
      src/core/lib/transport/byte_stream.h
  75. 130 28
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  76. 8 2
      src/cpp/client/channel_cc.cc
  77. 15 2
      src/cpp/client/client_context.cc
  78. 0 2
      src/cpp/server/dynamic_thread_pool.cc
  79. 0 2
      src/cpp/server/dynamic_thread_pool.h
  80. 0 1
      src/cpp/server/health/default_health_check_service.cc
  81. 0 1
      src/cpp/server/health/default_health_check_service.h
  82. 52 0
      src/cpp/server/server_callback.cc
  83. 40 16
      src/cpp/server/server_cc.cc
  84. 47 70
      src/cpp/server/server_context.cc
  85. 0 1
      src/cpp/thread_manager/thread_manager.cc
  86. 0 2
      src/cpp/thread_manager/thread_manager.h
  87. 20 0
      src/csharp/Grpc.Core.Tests/CompressionTest.cs
  88. 4 0
      src/objective-c/GRPCClient/GRPCInterceptor.h
  89. 5 2
      src/objective-c/ProtoRPC/ProtoRPC.h
  90. 2 2
      src/objective-c/ProtoRPC/ProtoRPC.m
  91. 31 0
      src/php/ext/grpc/channel_credentials.c
  92. 65 0
      src/php/ext/grpc/php_grpc.c
  93. 3 0
      src/php/ext/grpc/php_grpc.h
  94. 6 4
      src/php/lib/Grpc/BaseStub.php
  95. 10 0
      src/php/tests/unit_tests/ChannelCredentialsTest.php
  96. 31 0
      src/proto/grpc/http_over_grpc/BUILD
  97. 51 0
      src/proto/grpc/http_over_grpc/http_over_grpc.proto
  98. 11 6
      src/proto/grpc/testing/control.proto
  99. 1 0
      src/python/grpcio/commands.py
  100. 1 2
      src/python/grpcio/grpc_core_dependencies.py

+ 1 - 1
.github/CODEOWNERS

@@ -4,5 +4,5 @@
 /**/OWNERS @markdroth @nicolasnoble @a11r
 /bazel/** @nicolasnoble @jtattermusch @veblush @gnossen
 /cmake/** @jtattermusch @nicolasnoble @apolcyn
-/src/core/ext/filters/client_channel/** @markdroth @apolcyn @AspirinSJL
+/src/core/ext/filters/client_channel/** @markdroth
 /tools/dockerfile/** @jtattermusch @apolcyn @nicolasnoble

+ 1 - 1
.github/ISSUE_TEMPLATE/bug_report.md

@@ -2,7 +2,7 @@
 name: Report a bug
 about: Create a report to help us improve
 labels: kind/bug, priority/P2
-assignees: sheenaqotj
+assignees: nicolasnoble
 
 ---
 

+ 1 - 1
.github/ISSUE_TEMPLATE/cleanup_request.md

@@ -2,7 +2,7 @@
 name: Request a cleanup
 about: Suggest a cleanup in our repository
 labels: kind/internal cleanup, priority/P2
-assignees: sheenaqotj
+assignees: nicolasnoble
 
 ---
 

+ 1 - 1
.github/ISSUE_TEMPLATE/feature_request.md

@@ -2,7 +2,7 @@
 name: Request a feature
 about: Suggest an idea for this project
 labels: kind/enhancement, priority/P2
-assignees: sheenaqotj
+assignees: nicolasnoble
 
 ---
 

+ 1 - 1
.github/pull_request_template.md

@@ -8,4 +8,4 @@ If you know who should review your pull request, please remove the mentioning be
 
 -->
 
-@sheenaqotj
+@nicolasnoble

+ 2 - 0
.gitignore

@@ -19,6 +19,8 @@ py27_gevent/
 py27_native/
 py3[0-9]_gevent/
 py3[0-9]_native/
+a.out
+src/python/grpcio_*/LICENSE
 
 # Node installation output
 node_modules

+ 5 - 2
BUILD

@@ -150,6 +150,7 @@ GRPCXX_SRCS = [
     "src/cpp/server/health/health_check_service.cc",
     "src/cpp/server/health/health_check_service_server_builder_option.cc",
     "src/cpp/server/server_builder.cc",
+    "src/cpp/server/server_callback.cc",
     "src/cpp/server/server_cc.cc",
     "src/cpp/server/server_context.cc",
     "src/cpp/server/server_credentials.cc",
@@ -720,6 +721,7 @@ grpc_cc_library(
         "src/core/lib/iomgr/is_epollexclusive_available.cc",
         "src/core/lib/iomgr/load_file.cc",
         "src/core/lib/iomgr/lockfree_event.cc",
+        "src/core/lib/iomgr/logical_thread.cc",
         "src/core/lib/iomgr/polling_entity.cc",
         "src/core/lib/iomgr/pollset.cc",
         "src/core/lib/iomgr/pollset_custom.cc",
@@ -872,6 +874,7 @@ grpc_cc_library(
         "src/core/lib/iomgr/is_epollexclusive_available.h",
         "src/core/lib/iomgr/load_file.h",
         "src/core/lib/iomgr/lockfree_event.h",
+        "src/core/lib/iomgr/logical_thread.h",  
         "src/core/lib/iomgr/nameser.h",
         "src/core/lib/iomgr/polling_entity.h",
         "src/core/lib/iomgr/pollset.h",
@@ -1020,7 +1023,6 @@ grpc_cc_library(
         "src/core/ext/filters/client_channel/client_channel_channelz.cc",
         "src/core/ext/filters/client_channel/client_channel_factory.cc",
         "src/core/ext/filters/client_channel/client_channel_plugin.cc",
-        "src/core/ext/filters/client_channel/connector.cc",
         "src/core/ext/filters/client_channel/global_subchannel_pool.cc",
         "src/core/ext/filters/client_channel/health/health_check_client.cc",
         "src/core/ext/filters/client_channel/http_connect_handshaker.cc",
@@ -1029,7 +1031,6 @@ grpc_cc_library(
         "src/core/ext/filters/client_channel/lb_policy_registry.cc",
         "src/core/ext/filters/client_channel/local_subchannel_pool.cc",
         "src/core/ext/filters/client_channel/parse_address.cc",
-        "src/core/ext/filters/client_channel/proxy_mapper.cc",
         "src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
         "src/core/ext/filters/client_channel/resolver.cc",
         "src/core/ext/filters/client_channel/resolver_registry.cc",
@@ -2129,6 +2130,7 @@ grpc_cc_library(
         "include/grpcpp/impl/codegen/security/auth_context.h",
         "include/grpcpp/impl/codegen/serialization_traits.h",
         "include/grpcpp/impl/codegen/server_callback.h",
+        "include/grpcpp/impl/codegen/server_callback_handlers.h",
         "include/grpcpp/impl/codegen/server_callback_impl.h",
         "include/grpcpp/impl/codegen/server_context.h",
         "include/grpcpp/impl/codegen/server_context_impl.h",
@@ -2238,6 +2240,7 @@ grpc_cc_library(
         "include/grpc++/test/server_context_test_spouse.h",
         "include/grpcpp/test/mock_stream.h",
         "include/grpcpp/test/server_context_test_spouse.h",
+        "include/grpcpp/test/default_reactor_test_peer.h",
     ],
     deps = [
         ":grpc++",

+ 6 - 4
BUILD.gn

@@ -218,7 +218,6 @@ config("grpc_config") {
         "src/core/ext/filters/client_channel/client_channel_factory.cc",
         "src/core/ext/filters/client_channel/client_channel_factory.h",
         "src/core/ext/filters/client_channel/client_channel_plugin.cc",
-        "src/core/ext/filters/client_channel/connector.cc",
         "src/core/ext/filters/client_channel/connector.h",
         "src/core/ext/filters/client_channel/global_subchannel_pool.cc",
         "src/core/ext/filters/client_channel/global_subchannel_pool.h",
@@ -253,7 +252,6 @@ config("grpc_config") {
         "src/core/ext/filters/client_channel/local_subchannel_pool.h",
         "src/core/ext/filters/client_channel/parse_address.cc",
         "src/core/ext/filters/client_channel/parse_address.h",
-        "src/core/ext/filters/client_channel/proxy_mapper.cc",
         "src/core/ext/filters/client_channel/proxy_mapper.h",
         "src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
         "src/core/ext/filters/client_channel/proxy_mapper_registry.h",
@@ -597,6 +595,8 @@ config("grpc_config") {
         "src/core/lib/iomgr/load_file.h",
         "src/core/lib/iomgr/lockfree_event.cc",
         "src/core/lib/iomgr/lockfree_event.h",
+        "src/core/lib/iomgr/logical_thread.cc",
+        "src/core/lib/iomgr/logical_thread.h",
         "src/core/lib/iomgr/nameser.h",
         "src/core/lib/iomgr/poller/eventmanager_libuv.cc",
         "src/core/lib/iomgr/poller/eventmanager_libuv.h",
@@ -1122,6 +1122,7 @@ config("grpc_config") {
         "include/grpcpp/impl/codegen/security/auth_context.h",
         "include/grpcpp/impl/codegen/serialization_traits.h",
         "include/grpcpp/impl/codegen/server_callback.h",
+        "include/grpcpp/impl/codegen/server_callback_handlers.h",
         "include/grpcpp/impl/codegen/server_callback_impl.h",
         "include/grpcpp/impl/codegen/server_context.h",
         "include/grpcpp/impl/codegen/server_context_impl.h",
@@ -1204,7 +1205,6 @@ config("grpc_config") {
         "src/core/ext/filters/client_channel/client_channel_factory.cc",
         "src/core/ext/filters/client_channel/client_channel_factory.h",
         "src/core/ext/filters/client_channel/client_channel_plugin.cc",
-        "src/core/ext/filters/client_channel/connector.cc",
         "src/core/ext/filters/client_channel/connector.h",
         "src/core/ext/filters/client_channel/global_subchannel_pool.cc",
         "src/core/ext/filters/client_channel/global_subchannel_pool.h",
@@ -1223,7 +1223,6 @@ config("grpc_config") {
         "src/core/ext/filters/client_channel/local_subchannel_pool.h",
         "src/core/ext/filters/client_channel/parse_address.cc",
         "src/core/ext/filters/client_channel/parse_address.h",
-        "src/core/ext/filters/client_channel/proxy_mapper.cc",
         "src/core/ext/filters/client_channel/proxy_mapper.h",
         "src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
         "src/core/ext/filters/client_channel/proxy_mapper_registry.h",
@@ -1436,6 +1435,8 @@ config("grpc_config") {
         "src/core/lib/iomgr/load_file.h",
         "src/core/lib/iomgr/lockfree_event.cc",
         "src/core/lib/iomgr/lockfree_event.h",
+        "src/core/lib/iomgr/logical_thread.cc",
+        "src/core/lib/iomgr/logical_thread.h",
         "src/core/lib/iomgr/nameser.h",
         "src/core/lib/iomgr/poller/eventmanager_libuv.cc",
         "src/core/lib/iomgr/poller/eventmanager_libuv.h",
@@ -1657,6 +1658,7 @@ config("grpc_config") {
         "src/cpp/server/secure_server_credentials.cc",
         "src/cpp/server/secure_server_credentials.h",
         "src/cpp/server/server_builder.cc",
+        "src/cpp/server/server_callback.cc",
         "src/cpp/server/server_cc.cc",
         "src/cpp/server/server_context.cc",
         "src/cpp/server/server_credentials.cc",

+ 103 - 19
CMakeLists.txt

@@ -76,11 +76,16 @@ set_property(CACHE gRPC_PROTOBUF_PROVIDER PROPERTY STRINGS "module" "package")
 set(gRPC_PROTOBUF_PACKAGE_TYPE "" CACHE STRING "Algorithm for searching protobuf package")
 set_property(CACHE gRPC_PROTOBUF_PACKAGE_TYPE PROPERTY STRINGS "CONFIG" "MODULE")
 
-set(gRPC_GFLAGS_PROVIDER "module" CACHE STRING "Provider of gflags library")
-set_property(CACHE gRPC_GFLAGS_PROVIDER PROPERTY STRINGS "module" "package")
+if(gRPC_BUILD_TESTS)
+  set(gRPC_GFLAGS_PROVIDER "module" CACHE STRING "Provider of gflags library")
+  set_property(CACHE gRPC_GFLAGS_PROVIDER PROPERTY STRINGS "module" "package")
 
-set(gRPC_BENCHMARK_PROVIDER "module" CACHE STRING "Provider of benchmark library")
-set_property(CACHE gRPC_BENCHMARK_PROVIDER PROPERTY STRINGS "module" "package")
+  set(gRPC_BENCHMARK_PROVIDER "module" CACHE STRING "Provider of benchmark library")
+  set_property(CACHE gRPC_BENCHMARK_PROVIDER PROPERTY STRINGS "module" "package")
+else()
+  set(gRPC_GFLAGS_PROVIDER "none")
+  set(gRPC_BENCHMARK_PROVIDER "none")
+endif()
 
 set(gRPC_USE_PROTO_LITE OFF CACHE BOOL "Use the protobuf-lite library")
 
@@ -775,6 +780,7 @@ if(gRPC_BUILD_TESTS)
   if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
     add_dependencies(buildtests_cxx json_run_localhost)
   endif()
+  add_dependencies(buildtests_cxx logical_thread_test)
   add_dependencies(buildtests_cxx message_allocator_end2end_test)
   add_dependencies(buildtests_cxx metrics_client)
   add_dependencies(buildtests_cxx mock_test)
@@ -782,6 +788,9 @@ if(gRPC_BUILD_TESTS)
   add_dependencies(buildtests_cxx noop-benchmark)
   add_dependencies(buildtests_cxx optional_test)
   add_dependencies(buildtests_cxx orphanable_test)
+  if(_gRPC_PLATFORM_WINDOWS)
+    add_dependencies(buildtests_cxx pollset_windows_starvation_test.cc)
+  endif()
   add_dependencies(buildtests_cxx port_sharing_end2end_test)
   add_dependencies(buildtests_cxx proto_server_reflection_test)
   add_dependencies(buildtests_cxx proto_utils_test)
@@ -1159,6 +1168,7 @@ add_library(grpc
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -1362,7 +1372,6 @@ add_library(grpc
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -1371,7 +1380,6 @@ add_library(grpc
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -1634,6 +1642,7 @@ add_library(grpc_cronet
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -1769,7 +1778,6 @@ add_library(grpc_cronet
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -1778,7 +1786,6 @@ add_library(grpc_cronet
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -2065,6 +2072,7 @@ add_library(grpc_test_util
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -2168,7 +2176,6 @@ add_library(grpc_test_util
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -2177,7 +2184,6 @@ add_library(grpc_test_util
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -2410,6 +2416,7 @@ add_library(grpc_test_util_unsecure
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -2513,7 +2520,6 @@ add_library(grpc_test_util_unsecure
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -2522,7 +2528,6 @@ add_library(grpc_test_util_unsecure
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -2731,6 +2736,7 @@ add_library(grpc_unsecure
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -2869,7 +2875,6 @@ add_library(grpc_unsecure
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -2878,7 +2883,6 @@ add_library(grpc_unsecure
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -3291,6 +3295,7 @@ add_library(grpc++
   src/cpp/server/health/health_check_service.cc
   src/cpp/server/health/health_check_service_server_builder_option.cc
   src/cpp/server/server_builder.cc
+  src/cpp/server/server_callback.cc
   src/cpp/server/server_cc.cc
   src/cpp/server/server_context.cc
   src/cpp/server/server_credentials.cc
@@ -3307,7 +3312,6 @@ add_library(grpc++
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -3316,7 +3320,6 @@ add_library(grpc++
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -3390,6 +3393,7 @@ add_library(grpc++
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -3784,6 +3788,7 @@ foreach(_hdr
   include/grpcpp/impl/codegen/security/auth_context.h
   include/grpcpp/impl/codegen/serialization_traits.h
   include/grpcpp/impl/codegen/server_callback.h
+  include/grpcpp/impl/codegen/server_callback_handlers.h
   include/grpcpp/impl/codegen/server_callback_impl.h
   include/grpcpp/impl/codegen/server_context.h
   include/grpcpp/impl/codegen/server_context_impl.h
@@ -4274,6 +4279,7 @@ foreach(_hdr
   include/grpcpp/impl/codegen/security/auth_context.h
   include/grpcpp/impl/codegen/serialization_traits.h
   include/grpcpp/impl/codegen/server_callback.h
+  include/grpcpp/impl/codegen/server_callback_handlers.h
   include/grpcpp/impl/codegen/server_callback_impl.h
   include/grpcpp/impl/codegen/server_context.h
   include/grpcpp/impl/codegen/server_context_impl.h
@@ -4470,6 +4476,7 @@ foreach(_hdr
   include/grpcpp/impl/codegen/security/auth_context.h
   include/grpcpp/impl/codegen/serialization_traits.h
   include/grpcpp/impl/codegen/server_callback.h
+  include/grpcpp/impl/codegen/server_callback_handlers.h
   include/grpcpp/impl/codegen/server_callback_impl.h
   include/grpcpp/impl/codegen/server_context.h
   include/grpcpp/impl/codegen/server_context_impl.h
@@ -4554,6 +4561,7 @@ add_library(grpc++_unsecure
   src/cpp/server/health/health_check_service.cc
   src/cpp/server/health/health_check_service_server_builder_option.cc
   src/cpp/server/server_builder.cc
+  src/cpp/server/server_callback.cc
   src/cpp/server/server_cc.cc
   src/cpp/server/server_context.cc
   src/cpp/server/server_credentials.cc
@@ -4570,7 +4578,6 @@ add_library(grpc++_unsecure
   src/core/ext/filters/client_channel/client_channel_channelz.cc
   src/core/ext/filters/client_channel/client_channel_factory.cc
   src/core/ext/filters/client_channel/client_channel_plugin.cc
-  src/core/ext/filters/client_channel/connector.cc
   src/core/ext/filters/client_channel/global_subchannel_pool.cc
   src/core/ext/filters/client_channel/health/health_check_client.cc
   src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -4579,7 +4586,6 @@ add_library(grpc++_unsecure
   src/core/ext/filters/client_channel/lb_policy_registry.cc
   src/core/ext/filters/client_channel/local_subchannel_pool.cc
   src/core/ext/filters/client_channel/parse_address.cc
-  src/core/ext/filters/client_channel/proxy_mapper.cc
   src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   src/core/ext/filters/client_channel/resolver.cc
   src/core/ext/filters/client_channel/resolver_registry.cc
@@ -4653,6 +4659,7 @@ add_library(grpc++_unsecure
   src/core/lib/iomgr/is_epollexclusive_available.cc
   src/core/lib/iomgr/load_file.cc
   src/core/lib/iomgr/lockfree_event.cc
+  src/core/lib/iomgr/logical_thread.cc
   src/core/lib/iomgr/poller/eventmanager_libuv.cc
   src/core/lib/iomgr/polling_entity.cc
   src/core/lib/iomgr/pollset.cc
@@ -5046,6 +5053,7 @@ foreach(_hdr
   include/grpcpp/impl/codegen/security/auth_context.h
   include/grpcpp/impl/codegen/serialization_traits.h
   include/grpcpp/impl/codegen/server_callback.h
+  include/grpcpp/impl/codegen/server_callback_handlers.h
   include/grpcpp/impl/codegen/server_callback_impl.h
   include/grpcpp/impl/codegen/server_context.h
   include/grpcpp/impl/codegen/server_context_impl.h
@@ -5263,7 +5271,9 @@ if(gRPC_INSTALL)
 endif()
 
 
-if(gRPC_BUILD_CODEGEN)
+# grpcpp_channelz doesn't build with protobuf-lite
+# See https://github.com/grpc/grpc/issues/19473
+if(gRPC_BUILD_CODEGEN AND NOT gRPC_USE_PROTO_LITE)
 add_library(grpcpp_channelz
   src/cpp/server/channelz/channelz_service.cc
   src/cpp/server/channelz/channelz_service_plugin.cc
@@ -13968,6 +13978,42 @@ endif()
 endif()
 if(gRPC_BUILD_TESTS)
 
+add_executable(logical_thread_test
+  test/core/iomgr/logical_thread_test.cc
+  third_party/googletest/googletest/src/gtest-all.cc
+  third_party/googletest/googlemock/src/gmock-all.cc
+)
+
+target_include_directories(logical_thread_test
+  PRIVATE
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/include
+    ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
+    ${_gRPC_SSL_INCLUDE_DIR}
+    ${_gRPC_UPB_GENERATED_DIR}
+    ${_gRPC_UPB_GRPC_GENERATED_DIR}
+    ${_gRPC_UPB_INCLUDE_DIR}
+    ${_gRPC_ZLIB_INCLUDE_DIR}
+    third_party/googletest/googletest/include
+    third_party/googletest/googletest
+    third_party/googletest/googlemock/include
+    third_party/googletest/googlemock
+    ${_gRPC_PROTO_GENS_DIR}
+)
+
+target_link_libraries(logical_thread_test
+  ${_gRPC_PROTOBUF_LIBRARIES}
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  grpc
+  gpr
+  ${_gRPC_GFLAGS_LIBRARIES}
+)
+
+
+endif()
+if(gRPC_BUILD_TESTS)
+
 add_executable(message_allocator_end2end_test
   test/cpp/end2end/message_allocator_end2end_test.cc
   third_party/googletest/googletest/src/gtest-all.cc
@@ -14228,6 +14274,44 @@ target_link_libraries(orphanable_test
 )
 
 
+endif()
+if(gRPC_BUILD_TESTS)
+if(_gRPC_PLATFORM_WINDOWS)
+
+  add_executable(pollset_windows_starvation_test.cc
+    test/core/iomgr/pollset_windows_starvation_test.cc
+    third_party/googletest/googletest/src/gtest-all.cc
+    third_party/googletest/googlemock/src/gmock-all.cc
+  )
+
+  target_include_directories(pollset_windows_starvation_test.cc
+    PRIVATE
+      ${CMAKE_CURRENT_SOURCE_DIR}
+      ${CMAKE_CURRENT_SOURCE_DIR}/include
+      ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
+      ${_gRPC_SSL_INCLUDE_DIR}
+      ${_gRPC_UPB_GENERATED_DIR}
+      ${_gRPC_UPB_GRPC_GENERATED_DIR}
+      ${_gRPC_UPB_INCLUDE_DIR}
+      ${_gRPC_ZLIB_INCLUDE_DIR}
+      third_party/googletest/googletest/include
+      third_party/googletest/googletest
+      third_party/googletest/googlemock/include
+      third_party/googletest/googlemock
+      ${_gRPC_PROTO_GENS_DIR}
+  )
+
+  target_link_libraries(pollset_windows_starvation_test.cc
+    ${_gRPC_PROTOBUF_LIBRARIES}
+    ${_gRPC_ALLTARGETS_LIBRARIES}
+    grpc_test_util
+    grpc
+    gpr
+    ${_gRPC_GFLAGS_LIBRARIES}
+  )
+
+
+endif()
 endif()
 if(gRPC_BUILD_TESTS)
 

+ 109 - 14
Makefile

@@ -1257,6 +1257,7 @@ interop_client: $(BINDIR)/$(CONFIG)/interop_client
 interop_server: $(BINDIR)/$(CONFIG)/interop_server
 interop_test: $(BINDIR)/$(CONFIG)/interop_test
 json_run_localhost: $(BINDIR)/$(CONFIG)/json_run_localhost
+logical_thread_test: $(BINDIR)/$(CONFIG)/logical_thread_test
 message_allocator_end2end_test: $(BINDIR)/$(CONFIG)/message_allocator_end2end_test
 metrics_client: $(BINDIR)/$(CONFIG)/metrics_client
 mock_test: $(BINDIR)/$(CONFIG)/mock_test
@@ -1264,6 +1265,7 @@ nonblocking_test: $(BINDIR)/$(CONFIG)/nonblocking_test
 noop-benchmark: $(BINDIR)/$(CONFIG)/noop-benchmark
 optional_test: $(BINDIR)/$(CONFIG)/optional_test
 orphanable_test: $(BINDIR)/$(CONFIG)/orphanable_test
+pollset_windows_starvation_test.cc: $(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc
 port_sharing_end2end_test: $(BINDIR)/$(CONFIG)/port_sharing_end2end_test
 proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test
 proto_utils_test: $(BINDIR)/$(CONFIG)/proto_utils_test
@@ -1727,6 +1729,7 @@ buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/interop_server \
   $(BINDIR)/$(CONFIG)/interop_test \
   $(BINDIR)/$(CONFIG)/json_run_localhost \
+  $(BINDIR)/$(CONFIG)/logical_thread_test \
   $(BINDIR)/$(CONFIG)/message_allocator_end2end_test \
   $(BINDIR)/$(CONFIG)/metrics_client \
   $(BINDIR)/$(CONFIG)/mock_test \
@@ -1734,6 +1737,7 @@ buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/noop-benchmark \
   $(BINDIR)/$(CONFIG)/optional_test \
   $(BINDIR)/$(CONFIG)/orphanable_test \
+  $(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc \
   $(BINDIR)/$(CONFIG)/port_sharing_end2end_test \
   $(BINDIR)/$(CONFIG)/proto_server_reflection_test \
   $(BINDIR)/$(CONFIG)/proto_utils_test \
@@ -1899,6 +1903,7 @@ buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/interop_server \
   $(BINDIR)/$(CONFIG)/interop_test \
   $(BINDIR)/$(CONFIG)/json_run_localhost \
+  $(BINDIR)/$(CONFIG)/logical_thread_test \
   $(BINDIR)/$(CONFIG)/message_allocator_end2end_test \
   $(BINDIR)/$(CONFIG)/metrics_client \
   $(BINDIR)/$(CONFIG)/mock_test \
@@ -1906,6 +1911,7 @@ buildtests_cxx: privatelibs_cxx \
   $(BINDIR)/$(CONFIG)/noop-benchmark \
   $(BINDIR)/$(CONFIG)/optional_test \
   $(BINDIR)/$(CONFIG)/orphanable_test \
+  $(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc \
   $(BINDIR)/$(CONFIG)/port_sharing_end2end_test \
   $(BINDIR)/$(CONFIG)/proto_server_reflection_test \
   $(BINDIR)/$(CONFIG)/proto_utils_test \
@@ -2412,6 +2418,8 @@ test_cxx: buildtests_cxx
 	$(Q) $(BINDIR)/$(CONFIG)/inproc_sync_unary_ping_pong_test || ( echo test inproc_sync_unary_ping_pong_test failed ; exit 1 )
 	$(E) "[RUN]     Testing interop_test"
 	$(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 )
+	$(E) "[RUN]     Testing logical_thread_test"
+	$(Q) $(BINDIR)/$(CONFIG)/logical_thread_test || ( echo test logical_thread_test failed ; exit 1 )
 	$(E) "[RUN]     Testing message_allocator_end2end_test"
 	$(Q) $(BINDIR)/$(CONFIG)/message_allocator_end2end_test || ( echo test message_allocator_end2end_test failed ; exit 1 )
 	$(E) "[RUN]     Testing mock_test"
@@ -2424,6 +2432,8 @@ test_cxx: buildtests_cxx
 	$(Q) $(BINDIR)/$(CONFIG)/optional_test || ( echo test optional_test failed ; exit 1 )
 	$(E) "[RUN]     Testing orphanable_test"
 	$(Q) $(BINDIR)/$(CONFIG)/orphanable_test || ( echo test orphanable_test failed ; exit 1 )
+	$(E) "[RUN]     Testing pollset_windows_starvation_test.cc"
+	$(Q) $(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc || ( echo test pollset_windows_starvation_test.cc failed ; exit 1 )
 	$(E) "[RUN]     Testing port_sharing_end2end_test"
 	$(Q) $(BINDIR)/$(CONFIG)/port_sharing_end2end_test || ( echo test port_sharing_end2end_test failed ; exit 1 )
 	$(E) "[RUN]     Testing proto_server_reflection_test"
@@ -3651,6 +3661,7 @@ LIBGRPC_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -3854,7 +3865,6 @@ LIBGRPC_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -3863,7 +3873,6 @@ LIBGRPC_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -4118,6 +4127,7 @@ LIBGRPC_CRONET_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -4253,7 +4263,6 @@ LIBGRPC_CRONET_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -4262,7 +4271,6 @@ LIBGRPC_CRONET_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -4540,6 +4548,7 @@ LIBGRPC_TEST_UTIL_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -4643,7 +4652,6 @@ LIBGRPC_TEST_UTIL_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -4652,7 +4660,6 @@ LIBGRPC_TEST_UTIL_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -4871,6 +4878,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -4974,7 +4982,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -4983,7 +4990,6 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -5165,6 +5171,7 @@ LIBGRPC_UNSECURE_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -5303,7 +5310,6 @@ LIBGRPC_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -5312,7 +5318,6 @@ LIBGRPC_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -5690,6 +5695,7 @@ LIBGRPC++_SRC = \
     src/cpp/server/health/health_check_service.cc \
     src/cpp/server/health/health_check_service_server_builder_option.cc \
     src/cpp/server/server_builder.cc \
+    src/cpp/server/server_callback.cc \
     src/cpp/server/server_cc.cc \
     src/cpp/server/server_context.cc \
     src/cpp/server/server_credentials.cc \
@@ -5706,7 +5712,6 @@ LIBGRPC++_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -5715,7 +5720,6 @@ LIBGRPC++_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -5789,6 +5793,7 @@ LIBGRPC++_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -6144,6 +6149,7 @@ PUBLIC_HEADERS_CXX += \
     include/grpcpp/impl/codegen/security/auth_context.h \
     include/grpcpp/impl/codegen/serialization_traits.h \
     include/grpcpp/impl/codegen/server_callback.h \
+    include/grpcpp/impl/codegen/server_callback_handlers.h \
     include/grpcpp/impl/codegen/server_callback_impl.h \
     include/grpcpp/impl/codegen/server_context.h \
     include/grpcpp/impl/codegen/server_context_impl.h \
@@ -6616,6 +6622,7 @@ PUBLIC_HEADERS_CXX += \
     include/grpcpp/impl/codegen/security/auth_context.h \
     include/grpcpp/impl/codegen/serialization_traits.h \
     include/grpcpp/impl/codegen/server_callback.h \
+    include/grpcpp/impl/codegen/server_callback_handlers.h \
     include/grpcpp/impl/codegen/server_callback_impl.h \
     include/grpcpp/impl/codegen/server_context.h \
     include/grpcpp/impl/codegen/server_context_impl.h \
@@ -6795,6 +6802,7 @@ PUBLIC_HEADERS_CXX += \
     include/grpcpp/impl/codegen/security/auth_context.h \
     include/grpcpp/impl/codegen/serialization_traits.h \
     include/grpcpp/impl/codegen/server_callback.h \
+    include/grpcpp/impl/codegen/server_callback_handlers.h \
     include/grpcpp/impl/codegen/server_callback_impl.h \
     include/grpcpp/impl/codegen/server_context.h \
     include/grpcpp/impl/codegen/server_context_impl.h \
@@ -6919,6 +6927,7 @@ LIBGRPC++_UNSECURE_SRC = \
     src/cpp/server/health/health_check_service.cc \
     src/cpp/server/health/health_check_service_server_builder_option.cc \
     src/cpp/server/server_builder.cc \
+    src/cpp/server/server_callback.cc \
     src/cpp/server/server_cc.cc \
     src/cpp/server/server_context.cc \
     src/cpp/server/server_credentials.cc \
@@ -6935,7 +6944,6 @@ LIBGRPC++_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -6944,7 +6952,6 @@ LIBGRPC++_UNSECURE_SRC = \
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver_registry.cc \
@@ -7018,6 +7025,7 @@ LIBGRPC++_UNSECURE_SRC = \
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \
@@ -7373,6 +7381,7 @@ PUBLIC_HEADERS_CXX += \
     include/grpcpp/impl/codegen/security/auth_context.h \
     include/grpcpp/impl/codegen/serialization_traits.h \
     include/grpcpp/impl/codegen/server_callback.h \
+    include/grpcpp/impl/codegen/server_callback_handlers.h \
     include/grpcpp/impl/codegen/server_callback_impl.h \
     include/grpcpp/impl/codegen/server_context.h \
     include/grpcpp/impl/codegen/server_context_impl.h \
@@ -18259,6 +18268,49 @@ endif
 endif
 
 
+LOGICAL_THREAD_TEST_SRC = \
+    test/core/iomgr/logical_thread_test.cc \
+
+LOGICAL_THREAD_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LOGICAL_THREAD_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/logical_thread_test: openssl_dep_error
+
+else
+
+
+
+
+ifeq ($(NO_PROTOBUF),true)
+
+# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
+
+$(BINDIR)/$(CONFIG)/logical_thread_test: protobuf_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/logical_thread_test: $(PROTOBUF_DEP) $(LOGICAL_THREAD_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(LOGICAL_THREAD_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/logical_thread_test
+
+endif
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/iomgr/logical_thread_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_logical_thread_test: $(LOGICAL_THREAD_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(LOGICAL_THREAD_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 MESSAGE_ALLOCATOR_END2END_TEST_SRC = \
     test/cpp/end2end/message_allocator_end2end_test.cc \
 
@@ -18565,6 +18617,49 @@ endif
 endif
 
 
+POLLSET_WINDOWS_STARVATION_TEST.CC_SRC = \
+    test/core/iomgr/pollset_windows_starvation_test.cc \
+
+POLLSET_WINDOWS_STARVATION_TEST.CC_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(POLLSET_WINDOWS_STARVATION_TEST.CC_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc: openssl_dep_error
+
+else
+
+
+
+
+ifeq ($(NO_PROTOBUF),true)
+
+# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.5.0+.
+
+$(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc: protobuf_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc: $(PROTOBUF_DEP) $(POLLSET_WINDOWS_STARVATION_TEST.CC_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(POLLSET_WINDOWS_STARVATION_TEST.CC_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/pollset_windows_starvation_test.cc
+
+endif
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/iomgr/pollset_windows_starvation_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_pollset_windows_starvation_test.cc: $(POLLSET_WINDOWS_STARVATION_TEST.CC_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(POLLSET_WINDOWS_STARVATION_TEST.CC_OBJS:.o=.dep)
+endif
+endif
+
+
 PORT_SHARING_END2END_TEST_SRC = \
     test/cpp/end2end/port_sharing_end2end_test.cc \
 

+ 0 - 12
OWNERS

@@ -1,16 +1,4 @@
 # Top level ownership
-
-# nothing listed here until GitHub CODEOWNERS gets better
-# we need:
-# 1. owners to be able to self-approve
-# 2. authors to be able to select approvers
-
-# OWNERS file approvers
-# POLICY: at least three owners are needed before adding any OWNERS
-# REASON: GitHub does not recognize an author as able to give approval
-#         for a change; without this policy authors that are owners would
-#         be forced to rely on one reviewer, which would consequently
-#         lead to a bus factor of one to changes to that code
 @markdroth **/OWNERS
 @nicolasnoble **/OWNERS
 @a11r **/OWNERS

+ 5 - 1
bazel/grpc_build_system.bzl

@@ -189,10 +189,12 @@ def ios_cc_test(
             deps = ios_test_deps,
         )
 
-def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = [], exec_properties = {}):
+def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = [], exec_properties = {}, shard_count = None):
     copts = if_mac(["-DGRPC_CFSTREAM"])
     if language.upper() == "C":
         copts = copts + if_not_windows(["-std=c99"])
+    # NOTE: these attributes won't be used for the poller-specific versions of a test
+    # automatically, you need to set them explicitly (if applicable)
     args = {
         "srcs": srcs,
         "args": args,
@@ -204,6 +206,7 @@ def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data
         "timeout": timeout,
         "exec_compatible_with": exec_compatible_with,
         "exec_properties": exec_properties,
+        "shard_count": shard_count,
     }
     if uses_polling:
         # the vanilla version of the test should run on platforms that only
@@ -234,6 +237,7 @@ def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data
                 tags = (tags + ["no_windows", "no_mac"]),
                 exec_compatible_with = exec_compatible_with,
                 exec_properties = exec_properties,
+                shard_count = shard_count,
             )
     else:
         # the test behavior doesn't depend on polling, just generate the test

+ 8 - 3
bazel/grpc_deps.bzl

@@ -11,6 +11,11 @@ def grpc_deps():
         actual = "@upb//:upb",
     )
 
+    native.bind(
+        name = "absl",
+        actual = "@com_google_absl//absl",
+    )
+
     native.bind(
         name = "absl-base",
         actual = "@com_google_absl//absl/base",
@@ -165,9 +170,9 @@ def grpc_deps():
     if "com_google_absl" not in native.existing_rules():
         http_archive(
             name = "com_google_absl",
-            sha256 = "c5f6429c067e6b8f3c6d13d1ab2bdcd559c6f8b85317aa5b0dc8c364c37d1742",
-            strip_prefix = "abseil-cpp-846e5dbedac123d12455adcfe6f53c8b5dcbfeef",
-            url = "https://github.com/abseil/abseil-cpp/archive/846e5dbedac123d12455adcfe6f53c8b5dcbfeef.tar.gz",
+            sha256 = "ce318a8cd0fa4443c6c01d385cd28b2785b8160dd270b945d6b08cccff568ce6",
+            strip_prefix = "abseil-cpp-0514227d2547793b23e209809276375e41c76617",
+            url = "https://github.com/abseil/abseil-cpp/archive/0514227d2547793b23e209809276375e41c76617.tar.gz",
         )
 
     if "bazel_toolchains" not in native.existing_rules():

+ 29 - 2
build.yaml

@@ -397,6 +397,7 @@ filegroups:
   - include/grpcpp/impl/codegen/security/auth_context.h
   - include/grpcpp/impl/codegen/serialization_traits.h
   - include/grpcpp/impl/codegen/server_callback.h
+  - include/grpcpp/impl/codegen/server_callback_handlers.h
   - include/grpcpp/impl/codegen/server_callback_impl.h
   - include/grpcpp/impl/codegen/server_context.h
   - include/grpcpp/impl/codegen/server_context_impl.h
@@ -587,6 +588,7 @@ filegroups:
   - src/cpp/server/health/health_check_service.cc
   - src/cpp/server/health/health_check_service_server_builder_option.cc
   - src/cpp/server/server_builder.cc
+  - src/cpp/server/server_callback.cc
   - src/cpp/server/server_cc.cc
   - src/cpp/server/server_context.cc
   - src/cpp/server/server_credentials.cc
@@ -618,6 +620,7 @@ filegroups:
   public_headers:
   - include/grpc++/test/mock_stream.h
   - include/grpc++/test/server_context_test_spouse.h
+  - include/grpcpp/test/default_reactor_test_peer.h
   - include/grpcpp/test/mock_stream.h
   - include/grpcpp/test/server_context_test_spouse.h
   deps:
@@ -688,6 +691,7 @@ filegroups:
   - src/core/lib/iomgr/is_epollexclusive_available.cc
   - src/core/lib/iomgr/load_file.cc
   - src/core/lib/iomgr/lockfree_event.cc
+  - src/core/lib/iomgr/logical_thread.cc
   - src/core/lib/iomgr/poller/eventmanager_libuv.cc
   - src/core/lib/iomgr/polling_entity.cc
   - src/core/lib/iomgr/pollset.cc
@@ -868,6 +872,7 @@ filegroups:
   - src/core/lib/iomgr/is_epollexclusive_available.h
   - src/core/lib/iomgr/load_file.h
   - src/core/lib/iomgr/lockfree_event.h
+  - src/core/lib/iomgr/logical_thread.h
   - src/core/lib/iomgr/nameser.h
   - src/core/lib/iomgr/poller/eventmanager_libuv.h
   - src/core/lib/iomgr/polling_entity.h
@@ -998,7 +1003,6 @@ filegroups:
   - src/core/ext/filters/client_channel/client_channel_channelz.cc
   - src/core/ext/filters/client_channel/client_channel_factory.cc
   - src/core/ext/filters/client_channel/client_channel_plugin.cc
-  - src/core/ext/filters/client_channel/connector.cc
   - src/core/ext/filters/client_channel/global_subchannel_pool.cc
   - src/core/ext/filters/client_channel/health/health_check_client.cc
   - src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -1007,7 +1011,6 @@ filegroups:
   - src/core/ext/filters/client_channel/lb_policy_registry.cc
   - src/core/ext/filters/client_channel/local_subchannel_pool.cc
   - src/core/ext/filters/client_channel/parse_address.cc
-  - src/core/ext/filters/client_channel/proxy_mapper.cc
   - src/core/ext/filters/client_channel/proxy_mapper_registry.cc
   - src/core/ext/filters/client_channel/resolver.cc
   - src/core/ext/filters/client_channel/resolver_registry.cc
@@ -5361,6 +5364,16 @@ targets:
   - mac
   - linux
   - posix
+- name: logical_thread_test
+  cpu_cost: 10
+  build: test
+  language: c++
+  src:
+  - test/core/iomgr/logical_thread_test.cc
+  deps:
+  - grpc_test_util
+  - grpc
+  - gpr
 - name: message_allocator_end2end_test
   gtest: true
   cpu_cost: 0.5
@@ -5445,6 +5458,20 @@ targets:
   - grpc++
   - grpc
   - gpr
+- name: pollset_windows_starvation_test.cc
+  cpu_cost: 0.5
+  build: test
+  language: c++
+  src:
+  - test/core/iomgr/pollset_windows_starvation_test.cc
+  deps:
+  - grpc_test_util
+  - grpc
+  - gpr
+  exclude_iomgrs:
+  - uv
+  platforms:
+  - windows
 - name: port_sharing_end2end_test
   gtest: true
   build: test

+ 28 - 0
cmake/abseil-cpp.cmake

@@ -0,0 +1,28 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if(gRPC_ABSL_PROVIDER STREQUAL "module")
+  if(NOT ABSL_ROOT_DIR)
+    set(ABSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/abseil-cpp)
+  endif()
+  if(EXISTS "${ABSL_ROOT_DIR}/CMakeLists.txt")
+    add_subdirectory(${ABSL_ROOT_DIR} third_party/abseil-cpp)
+  else()
+    message(WARNING "gRPC_ABSL_PROVIDER is \"module\" but ABSL_ROOT_DIR is wrong")
+  endif()
+elseif(gRPC_ABSL_PROVIDER STREQUAL "package")
+  # Use "CONFIG" as there is no built-in cmake module for absl.
+  find_package(absl REQUIRED CONFIG)
+  set(_gRPC_FIND_ABSL "if(NOT absl_FOUND)\n  find_package(absl CONFIG)\nendif()")
+endif()

+ 2 - 1
cmake/benchmark.cmake

@@ -32,5 +32,6 @@ elseif(gRPC_BENCHMARK_PROVIDER STREQUAL "package")
     set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark)
   endif()
   set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n  find_package(benchmark CONFIG)\nendif()")
+elseif(gRPC_BENCHMARK_PROVIDER STREQUAL "none")
+  # Benchmark is a test-only dependency and can be avoided if we're not building tests.
 endif()
-

+ 2 - 1
cmake/gflags.cmake

@@ -29,5 +29,6 @@ elseif(gRPC_GFLAGS_PROVIDER STREQUAL "package")
     set(_gRPC_GFLAGS_LIBRARIES gflags::gflags)
   endif()
   set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n  find_package(gflags CONFIG)\nendif()")
+elseif(gRPC_GFLAGS_PROVIDER STREQUAL "none")
+  # gflags is a test-only dependency and can be avoided if we're not building tests.
 endif()
-

+ 1 - 2
config.m4

@@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then
     src/core/ext/filters/client_channel/client_channel_channelz.cc \
     src/core/ext/filters/client_channel/client_channel_factory.cc \
     src/core/ext/filters/client_channel/client_channel_plugin.cc \
-    src/core/ext/filters/client_channel/connector.cc \
     src/core/ext/filters/client_channel/global_subchannel_pool.cc \
     src/core/ext/filters/client_channel/health/health_check_client.cc \
     src/core/ext/filters/client_channel/http_connect_handshaker.cc \
@@ -63,7 +62,6 @@ if test "$PHP_GRPC" != "no"; then
     src/core/ext/filters/client_channel/lb_policy_registry.cc \
     src/core/ext/filters/client_channel/local_subchannel_pool.cc \
     src/core/ext/filters/client_channel/parse_address.cc \
-    src/core/ext/filters/client_channel/proxy_mapper.cc \
     src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
     src/core/ext/filters/client_channel/resolver.cc \
     src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc \
@@ -282,6 +280,7 @@ if test "$PHP_GRPC" != "no"; then
     src/core/lib/iomgr/is_epollexclusive_available.cc \
     src/core/lib/iomgr/load_file.cc \
     src/core/lib/iomgr/lockfree_event.cc \
+    src/core/lib/iomgr/logical_thread.cc \
     src/core/lib/iomgr/poller/eventmanager_libuv.cc \
     src/core/lib/iomgr/polling_entity.cc \
     src/core/lib/iomgr/pollset.cc \

+ 1 - 2
config.w32

@@ -123,6 +123,7 @@ if (PHP_GRPC != "no") {
     "src\\core\\lib\\iomgr\\is_epollexclusive_available.cc " +
     "src\\core\\lib\\iomgr\\load_file.cc " +
     "src\\core\\lib\\iomgr\\lockfree_event.cc " +
+    "src\\core\\lib\\iomgr\\logical_thread.cc " +
     "src\\core\\lib\\iomgr\\poller\\eventmanager_libuv.cc " +
     "src\\core\\lib\\iomgr\\polling_entity.cc " +
     "src\\core\\lib\\iomgr\\pollset.cc " +
@@ -326,7 +327,6 @@ if (PHP_GRPC != "no") {
     "src\\core\\ext\\filters\\client_channel\\client_channel_channelz.cc " +
     "src\\core\\ext\\filters\\client_channel\\client_channel_factory.cc " +
     "src\\core\\ext\\filters\\client_channel\\client_channel_plugin.cc " +
-    "src\\core\\ext\\filters\\client_channel\\connector.cc " +
     "src\\core\\ext\\filters\\client_channel\\global_subchannel_pool.cc " +
     "src\\core\\ext\\filters\\client_channel\\health\\health_check_client.cc " +
     "src\\core\\ext\\filters\\client_channel\\http_connect_handshaker.cc " +
@@ -335,7 +335,6 @@ if (PHP_GRPC != "no") {
     "src\\core\\ext\\filters\\client_channel\\lb_policy_registry.cc " +
     "src\\core\\ext\\filters\\client_channel\\local_subchannel_pool.cc " +
     "src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
-    "src\\core\\ext\\filters\\client_channel\\proxy_mapper.cc " +
     "src\\core\\ext\\filters\\client_channel\\proxy_mapper_registry.cc " +
     "src\\core\\ext\\filters\\client_channel\\resolver.cc " +
     "src\\core\\ext\\filters\\client_channel\\resolver_registry.cc " +

+ 12 - 1
doc/core/moving-to-c++.md

@@ -34,7 +34,18 @@ C++ compatible with
   You can easily see whether PR is free from this issue by checking the result of
   `Artifact Build Linux` test.
 - `thread_local` is not allowed to use on Apple's products because their old OSes
-  (e.g. ios < 9.0) don't support `thread_local`.
+  (e.g. ios < 9.0) don't support `thread_local`. Please use `GPR_TLS_DECL` instead.
+- gRPC main libraries (grpc, grpc+++, and plugins) cannot use following C++ libraries:
+  (Test and example codes are relatively free from this constraints)
+  - `<thread>`. Use `grpc_core::Thread`.
+  - `<condition_variable>`. Use `grpc_core::CondVar`.
+  - `<mutex>`. Use `grpc_core::Mutex`, `grpc_core::MutexLock`, and `grpc_core::ReleasableMutexLock`.
+  - `<future>`
+  - `<ratio>`
+  - `<system_error>`
+  - `<filesystem>`
+- `grpc_core::Atomic` is prefered over `std::atomic` in gRPC library because it provides
+  additional debugging information.
 
 ## Roadmap
 

+ 96 - 100
examples/objective-c/route_guide/ViewControllers.m

@@ -25,7 +25,7 @@
 
 #import <GRPCClient/GRPCTransport.h>
 
-static NSString * const kHostAddress = @"localhost:50051";
+static NSString *const kHostAddress = @"localhost:50051";
 
 /** Category to override RTGPoint's description. */
 @interface RTGPoint (Description)
@@ -36,9 +36,9 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (NSString *)description {
   NSString *verticalDirection = self.latitude >= 0 ? @"N" : @"S";
   NSString *horizontalDirection = self.longitude >= 0 ? @"E" : @"W";
-  return [NSString stringWithFormat:@"%.02f%@ %.02f%@",
-          abs(self.latitude) / 1E7f, verticalDirection,
-          abs(self.longitude) / 1E7f, horizontalDirection];
+  return
+      [NSString stringWithFormat:@"%.02f%@ %.02f%@", abs(self.latitude) / 1E7f, verticalDirection,
+                                 abs(self.longitude) / 1E7f, horizontalDirection];
 }
 @end
 
@@ -55,22 +55,21 @@ static NSString * const kHostAddress = @"localhost:50051";
                       longitude:(float)longitude {
   RTGRouteNote *note = [self message];
   note.message = message;
-  note.location.latitude = (int32_t) latitude * 1E7;
-  note.location.longitude = (int32_t) longitude * 1E7;
+  note.location.latitude = (int32_t)latitude * 1E7;
+  note.location.longitude = (int32_t)longitude * 1E7;
   return note;
 }
 @end
 
-
 #pragma mark Demo: Get Feature
 
 /**
  * Run the getFeature demo. Calls getFeature with a point known to have a feature and a point known
  * not to have a feature.
  */
-@interface GetFeatureViewController : UIViewController<GRPCProtoResponseHandler>
+@interface GetFeatureViewController : UIViewController
 
-@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
+@property(weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
@@ -78,47 +77,44 @@ static NSString * const kHostAddress = @"localhost:50051";
   RTGRouteGuide *_service;
 }
 
-- (dispatch_queue_t)dispatchQueue {
-  return dispatch_get_main_queue();
-}
-
-- (void)didReceiveProtoMessage:(GPBMessage *)message {
-  RTGFeature *response = (RTGFeature *)message;
-
-  // TODO(makdharma): Remove boilerplate by consolidating into one log function.
-  if (response.name.length != 0) {
-    NSString *str =[NSString stringWithFormat:@"%@\nFound feature called %@ at %@.", self.outputLabel.text, response.location, response.name];
-    self.outputLabel.text = str;
-    NSLog(@"Found feature called %@ at %@.", response.name, response.location);
-  } else if (response) {
-    NSString *str =[NSString stringWithFormat:@"%@\nFound no features at %@",  self.outputLabel.text,response.location];
-    self.outputLabel.text = str;
-    NSLog(@"Found no features at %@", response.location);
-  }
-}
-
-- (void)didCloseWithTrailingMetadata:(NSDictionary *)trailingMetadata error:(NSError *)error {
-  if (error) {
-    NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
-    self.outputLabel.text = str;
-    NSLog(@"RPC error: %@", error);
-  }
-}
-
 - (void)execRequest {
+  void (^handler)(RTGFeature *response, NSError *error) = ^(RTGFeature *response, NSError *error) {
+    // TODO(makdharma): Remove boilerplate by consolidating into one log function.
+    if (response.name.length) {
+      NSString *str =
+          [NSString stringWithFormat:@"%@\nFound feature called %@ at %@.", self.outputLabel.text,
+                                     response.location, response.name];
+      self.outputLabel.text = str;
+      NSLog(@"Found feature called %@ at %@.", response.name, response.location);
+    } else if (response) {
+      NSString *str = [NSString stringWithFormat:@"%@\nFound no features at %@",
+                                                 self.outputLabel.text, response.location];
+      self.outputLabel.text = str;
+      NSLog(@"Found no features at %@", response.location);
+    } else {
+      NSString *str =
+          [NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+      self.outputLabel.text = str;
+      NSLog(@"RPC error: %@", error);
+    }
+  };
+
   RTGPoint *point = [RTGPoint message];
   point.latitude = 409146138;
   point.longitude = -746188906;
 
-  GRPCUnaryProtoCall *call = [_service getFeatureWithMessage:point
-                                             responseHandler:self
-                                                 callOptions:nil];
+  GRPCUnaryProtoCall *call = [_service
+      getFeatureWithMessage:point
+            responseHandler:[[GRPCUnaryResponseHandler alloc] initWithResponseHandler:handler
+                                                                responseDispatchQueue:nil]
+                callOptions:nil];
   [call start];
-  call = [_service getFeatureWithMessage:[RTGPoint message]
-                         responseHandler:self
-                             callOptions:nil];
+  call = [_service
+      getFeatureWithMessage:[RTGPoint message]
+            responseHandler:[[GRPCUnaryResponseHandler alloc] initWithResponseHandler:handler
+                                                                responseDispatchQueue:nil]
+                callOptions:nil];
   [call start];
-
 }
 
 - (void)viewDidLoad {
@@ -139,16 +135,15 @@ static NSString * const kHostAddress = @"localhost:50051";
 
 @end
 
-
 #pragma mark Demo: List Features
 
 /**
  * Run the listFeatures demo. Calls listFeatures with a rectangle containing all of the features in
  * the pre-generated database. Prints each response as it comes in.
  */
-@interface ListFeaturesViewController : UIViewController<GRPCProtoResponseHandler>
+@interface ListFeaturesViewController : UIViewController <GRPCProtoResponseHandler>
 
-@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
+@property(weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
@@ -177,7 +172,9 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (void)didReceiveProtoMessage:(GPBMessage *)message {
   RTGFeature *response = (RTGFeature *)message;
   if (response) {
-    NSString *str =[NSString stringWithFormat:@"%@\nFound feature at %@ called %@.", self.outputLabel.text, response.location, response.name];
+    NSString *str =
+        [NSString stringWithFormat:@"%@\nFound feature at %@ called %@.", self.outputLabel.text,
+                                   response.location, response.name];
     self.outputLabel.text = str;
     NSLog(@"Found feature at %@ called %@.", response.location, response.name);
   }
@@ -185,7 +182,7 @@ static NSString * const kHostAddress = @"localhost:50051";
 
 - (void)didCloseWithTrailingMetadata:(NSDictionary *)trailingMetadata error:(NSError *)error {
   if (error) {
-    NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+    NSString *str = [NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
     self.outputLabel.text = str;
     NSLog(@"RPC error: %@", error);
   }
@@ -216,9 +213,9 @@ static NSString * const kHostAddress = @"localhost:50051";
  * database with a variable delay in between. Prints the statistics when they are sent from the
  * server.
  */
-@interface RecordRouteViewController : UIViewController<GRPCProtoResponseHandler>
+@interface RecordRouteViewController : UIViewController
 
-@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
+@property(weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
@@ -226,16 +223,13 @@ static NSString * const kHostAddress = @"localhost:50051";
   RTGRouteGuide *_service;
 }
 
-- (dispatch_queue_t)dispatchQueue {
-  return dispatch_get_main_queue();
-}
-
 - (void)execRequest {
-  NSString *dataBasePath = [NSBundle.mainBundle pathForResource:@"route_guide_db"
-                                                         ofType:@"json"];
+  NSString *dataBasePath = [NSBundle.mainBundle pathForResource:@"route_guide_db" ofType:@"json"];
   NSData *dataBaseContent = [NSData dataWithContentsOfFile:dataBasePath];
   NSError *error;
-  NSArray *features = [NSJSONSerialization JSONObjectWithData:dataBaseContent options:0 error:&error];
+  NSArray *features = [NSJSONSerialization JSONObjectWithData:dataBaseContent
+                                                      options:0
+                                                        error:&error];
 
   if (error) {
     NSLog(@"Error reading database.");
@@ -244,14 +238,41 @@ static NSString * const kHostAddress = @"localhost:50051";
     return;
   }
 
-  GRPCStreamingProtoCall *call = [_service recordRouteWithResponseHandler:self
-                                                              callOptions:nil];
+  void (^handler)(RTGRouteSummary *response, NSError *error) =
+      ^(RTGRouteSummary *response, NSError *error) {
+        if (response) {
+          NSString *str = [NSString
+              stringWithFormat:@"%@\nFinished trip with %i points\nPassed %i features\n"
+                                "Travelled %i meters\nIt took %i seconds",
+                               self.outputLabel.text, response.pointCount, response.featureCount,
+                               response.distance, response.elapsedTime];
+          self.outputLabel.text = str;
+          NSLog(@"Finished trip with %i points", response.pointCount);
+          NSLog(@"Passed %i features", response.featureCount);
+          NSLog(@"Travelled %i meters", response.distance);
+          NSLog(@"It took %i seconds", response.elapsedTime);
+        } else {
+          NSString *str =
+              [NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+          self.outputLabel.text = str;
+          NSLog(@"RPC error: %@", error);
+        }
+      };
+
+  // We can use unary response handler here because, despite the requests being a stream, the
+  // response of the RPC is unary.
+  GRPCStreamingProtoCall *call =
+      [_service recordRouteWithResponseHandler:[[GRPCUnaryResponseHandler alloc]
+                                                   initWithResponseHandler:handler
+                                                     responseDispatchQueue:nil]
+                                   callOptions:nil];
   [call start];
   for (id feature in features) {
     RTGPoint *location = [RTGPoint message];
-    location.longitude = [((NSNumber *) feature[@"location"][@"longitude"]) intValue];
-    location.latitude = [((NSNumber *) feature[@"location"][@"latitude"]) intValue];
-    NSString *str =[NSString stringWithFormat:@"%@\nVisiting point %@", self.outputLabel.text, location];
+    location.longitude = [((NSNumber *)feature[@"location"][@"longitude"]) intValue];
+    location.latitude = [((NSNumber *)feature[@"location"][@"latitude"]) intValue];
+    NSString *str =
+        [NSString stringWithFormat:@"%@\nVisiting point %@", self.outputLabel.text, location];
     self.outputLabel.text = str;
     NSLog(@"Visiting point %@", location);
     [call writeMessage:location];
@@ -259,31 +280,6 @@ static NSString * const kHostAddress = @"localhost:50051";
   [call finish];
 }
 
-- (void)didReceiveProtoMessage:(GPBMessage *)message {
-  RTGRouteSummary *response = (RTGRouteSummary *)message;
-
-  if (response) {
-    NSString *str =[NSString stringWithFormat:
-                    @"%@\nFinished trip with %i points\nPassed %i features\n"
-                    "Travelled %i meters\nIt took %i seconds",
-                    self.outputLabel.text, response.pointCount, response.featureCount,
-                    response.distance, response.elapsedTime];
-    self.outputLabel.text = str;
-    NSLog(@"Finished trip with %i points", response.pointCount);
-    NSLog(@"Passed %i features", response.featureCount);
-    NSLog(@"Travelled %i meters", response.distance);
-    NSLog(@"It took %i seconds", response.elapsedTime);
-  }
-}
-
-- (void)didCloseWithTrailingMetadata:(NSDictionary *)trailingMetadata error:(NSError *)error {
-  if (error) {
-    NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
-    self.outputLabel.text = str;
-    NSLog(@"RPC error: %@", error);
-  }
-}
-
 - (void)viewDidLoad {
   [super viewDidLoad];
 
@@ -302,16 +298,15 @@ static NSString * const kHostAddress = @"localhost:50051";
 
 @end
 
-
 #pragma mark Demo: Route Chat
 
 /**
  * Run the routeChat demo. Send some chat messages, and print any chat messages that are sent from
  * the server.
  */
-@interface RouteChatViewController : UIViewController<GRPCProtoResponseHandler>
+@interface RouteChatViewController : UIViewController <GRPCProtoResponseHandler>
 
-@property (weak, nonatomic) IBOutlet UILabel *outputLabel;
+@property(weak, nonatomic) IBOutlet UILabel *outputLabel;
 
 @end
 
@@ -324,13 +319,14 @@ static NSString * const kHostAddress = @"localhost:50051";
 }
 
 - (void)execRequest {
-  NSArray *notes = @[[RTGRouteNote noteWithMessage:@"First message" latitude:0 longitude:0],
-                     [RTGRouteNote noteWithMessage:@"Second message" latitude:0 longitude:1],
-                     [RTGRouteNote noteWithMessage:@"Third message" latitude:1 longitude:0],
-                     [RTGRouteNote noteWithMessage:@"Fourth message" latitude:0 longitude:0]];
-
-  GRPCStreamingProtoCall *call = [_service routeChatWithResponseHandler:self
-                                                            callOptions:nil];
+  NSArray *notes = @[
+    [RTGRouteNote noteWithMessage:@"First message" latitude:0 longitude:0],
+    [RTGRouteNote noteWithMessage:@"Second message" latitude:0 longitude:1],
+    [RTGRouteNote noteWithMessage:@"Third message" latitude:1 longitude:0],
+    [RTGRouteNote noteWithMessage:@"Fourth message" latitude:0 longitude:0]
+  ];
+
+  GRPCStreamingProtoCall *call = [_service routeChatWithResponseHandler:self callOptions:nil];
   [call start];
   for (RTGRouteNote *note in notes) {
     [call writeMessage:note];
@@ -341,8 +337,8 @@ static NSString * const kHostAddress = @"localhost:50051";
 - (void)didReceiveProtoMessage:(GPBMessage *)message {
   RTGRouteNote *note = (RTGRouteNote *)message;
   if (note) {
-    NSString *str =[NSString stringWithFormat:@"%@\nGot message %@ at %@",
-                    self.outputLabel.text, note.message, note.location];
+    NSString *str = [NSString stringWithFormat:@"%@\nGot message %@ at %@", self.outputLabel.text,
+                                               note.message, note.location];
     self.outputLabel.text = str;
     NSLog(@"Got message %@ at %@", note.message, note.location);
   }
@@ -352,7 +348,7 @@ static NSString * const kHostAddress = @"localhost:50051";
   if (!error) {
     NSLog(@"Chat ended.");
   } else {
-    NSString *str =[NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
+    NSString *str = [NSString stringWithFormat:@"%@\nRPC error: %@", self.outputLabel.text, error];
     self.outputLabel.text = str;
     NSLog(@"RPC error: %@", error);
   }

+ 5 - 0
gRPC-C++.podspec

@@ -136,6 +136,7 @@ Pod::Spec.new do |s|
                       'include/grpcpp/impl/codegen/security/auth_context.h',
                       'include/grpcpp/impl/codegen/serialization_traits.h',
                       'include/grpcpp/impl/codegen/server_callback.h',
+                      'include/grpcpp/impl/codegen/server_callback_handlers.h',
                       'include/grpcpp/impl/codegen/server_callback_impl.h',
                       'include/grpcpp/impl/codegen/server_context.h',
                       'include/grpcpp/impl/codegen/server_context_impl.h',
@@ -260,6 +261,7 @@ Pod::Spec.new do |s|
                       'src/cpp/server/secure_server_credentials.cc',
                       'src/cpp/server/secure_server_credentials.h',
                       'src/cpp/server/server_builder.cc',
+                      'src/cpp/server/server_callback.cc',
                       'src/cpp/server/server_cc.cc',
                       'src/cpp/server/server_context.cc',
                       'src/cpp/server/server_credentials.cc',
@@ -484,6 +486,7 @@ Pod::Spec.new do |s|
                       'src/core/lib/iomgr/is_epollexclusive_available.h',
                       'src/core/lib/iomgr/load_file.h',
                       'src/core/lib/iomgr/lockfree_event.h',
+                      'src/core/lib/iomgr/logical_thread.h',
                       'src/core/lib/iomgr/nameser.h',
                       'src/core/lib/iomgr/poller/eventmanager_libuv.h',
                       'src/core/lib/iomgr/polling_entity.h',
@@ -774,6 +777,7 @@ Pod::Spec.new do |s|
                               'src/core/lib/iomgr/is_epollexclusive_available.h',
                               'src/core/lib/iomgr/load_file.h',
                               'src/core/lib/iomgr/lockfree_event.h',
+                              'src/core/lib/iomgr/logical_thread.h',
                               'src/core/lib/iomgr/nameser.h',
                               'src/core/lib/iomgr/poller/eventmanager_libuv.h',
                               'src/core/lib/iomgr/polling_entity.h',
@@ -1078,6 +1082,7 @@ Pod::Spec.new do |s|
                               'src/core/lib/iomgr/is_epollexclusive_available.h',
                               'src/core/lib/iomgr/load_file.h',
                               'src/core/lib/iomgr/lockfree_event.h',
+                              'src/core/lib/iomgr/logical_thread.h',
                               'src/core/lib/iomgr/nameser.h',
                               'src/core/lib/iomgr/poller/eventmanager_libuv.h',
                               'src/core/lib/iomgr/polling_entity.h',

+ 3 - 2
gRPC-Core.podspec

@@ -201,7 +201,6 @@ Pod::Spec.new do |s|
                       'src/core/ext/filters/client_channel/client_channel_factory.cc',
                       'src/core/ext/filters/client_channel/client_channel_factory.h',
                       'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-                      'src/core/ext/filters/client_channel/connector.cc',
                       'src/core/ext/filters/client_channel/connector.h',
                       'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
                       'src/core/ext/filters/client_channel/global_subchannel_pool.h',
@@ -236,7 +235,6 @@ Pod::Spec.new do |s|
                       'src/core/ext/filters/client_channel/local_subchannel_pool.h',
                       'src/core/ext/filters/client_channel/parse_address.cc',
                       'src/core/ext/filters/client_channel/parse_address.h',
-                      'src/core/ext/filters/client_channel/proxy_mapper.cc',
                       'src/core/ext/filters/client_channel/proxy_mapper.h',
                       'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
                       'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
@@ -646,6 +644,8 @@ Pod::Spec.new do |s|
                       'src/core/lib/iomgr/load_file.h',
                       'src/core/lib/iomgr/lockfree_event.cc',
                       'src/core/lib/iomgr/lockfree_event.h',
+                      'src/core/lib/iomgr/logical_thread.cc',
+                      'src/core/lib/iomgr/logical_thread.h',
                       'src/core/lib/iomgr/nameser.h',
                       'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
                       'src/core/lib/iomgr/poller/eventmanager_libuv.h',
@@ -1185,6 +1185,7 @@ Pod::Spec.new do |s|
                               'src/core/lib/iomgr/is_epollexclusive_available.h',
                               'src/core/lib/iomgr/load_file.h',
                               'src/core/lib/iomgr/lockfree_event.h',
+                              'src/core/lib/iomgr/logical_thread.h',
                               'src/core/lib/iomgr/nameser.h',
                               'src/core/lib/iomgr/poller/eventmanager_libuv.h',
                               'src/core/lib/iomgr/polling_entity.h',

+ 2 - 2
grpc.gemspec

@@ -113,7 +113,6 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.cc )
   s.files += %w( src/core/ext/filters/client_channel/client_channel_factory.h )
   s.files += %w( src/core/ext/filters/client_channel/client_channel_plugin.cc )
-  s.files += %w( src/core/ext/filters/client_channel/connector.cc )
   s.files += %w( src/core/ext/filters/client_channel/connector.h )
   s.files += %w( src/core/ext/filters/client_channel/global_subchannel_pool.cc )
   s.files += %w( src/core/ext/filters/client_channel/global_subchannel_pool.h )
@@ -148,7 +147,6 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/ext/filters/client_channel/local_subchannel_pool.h )
   s.files += %w( src/core/ext/filters/client_channel/parse_address.cc )
   s.files += %w( src/core/ext/filters/client_channel/parse_address.h )
-  s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.cc )
   s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.h )
   s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.cc )
   s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.h )
@@ -558,6 +556,8 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/iomgr/load_file.h )
   s.files += %w( src/core/lib/iomgr/lockfree_event.cc )
   s.files += %w( src/core/lib/iomgr/lockfree_event.h )
+  s.files += %w( src/core/lib/iomgr/logical_thread.cc )
+  s.files += %w( src/core/lib/iomgr/logical_thread.h )
   s.files += %w( src/core/lib/iomgr/nameser.h )
   s.files += %w( src/core/lib/iomgr/poller/eventmanager_libuv.cc )
   s.files += %w( src/core/lib/iomgr/poller/eventmanager_libuv.h )

+ 8 - 12
grpc.gyp

@@ -292,6 +292,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',
@@ -495,7 +496,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -504,7 +504,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -706,6 +705,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',
@@ -809,7 +809,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -818,7 +817,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -971,6 +969,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',
@@ -1074,7 +1073,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -1083,7 +1081,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -1212,6 +1209,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',
@@ -1350,7 +1348,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -1359,7 +1356,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -1545,6 +1541,7 @@
         'src/cpp/server/health/health_check_service.cc',
         'src/cpp/server/health/health_check_service_server_builder_option.cc',
         'src/cpp/server/server_builder.cc',
+        'src/cpp/server/server_callback.cc',
         'src/cpp/server/server_cc.cc',
         'src/cpp/server/server_context.cc',
         'src/cpp/server/server_credentials.cc',
@@ -1561,7 +1558,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -1570,7 +1566,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -1644,6 +1639,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',
@@ -1900,6 +1896,7 @@
         'src/cpp/server/health/health_check_service.cc',
         'src/cpp/server/health/health_check_service_server_builder_option.cc',
         'src/cpp/server/server_builder.cc',
+        'src/cpp/server/server_callback.cc',
         'src/cpp/server/server_cc.cc',
         'src/cpp/server/server_context.cc',
         'src/cpp/server/server_credentials.cc',
@@ -1916,7 +1913,6 @@
         'src/core/ext/filters/client_channel/client_channel_channelz.cc',
         'src/core/ext/filters/client_channel/client_channel_factory.cc',
         'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-        'src/core/ext/filters/client_channel/connector.cc',
         'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/health/health_check_client.cc',
         'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -1925,7 +1921,6 @@
         'src/core/ext/filters/client_channel/lb_policy_registry.cc',
         'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
         'src/core/ext/filters/client_channel/parse_address.cc',
-        'src/core/ext/filters/client_channel/proxy_mapper.cc',
         'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
         'src/core/ext/filters/client_channel/resolver.cc',
         'src/core/ext/filters/client_channel/resolver_registry.cc',
@@ -1999,6 +1994,7 @@
         'src/core/lib/iomgr/is_epollexclusive_available.cc',
         'src/core/lib/iomgr/load_file.cc',
         'src/core/lib/iomgr/lockfree_event.cc',
+        'src/core/lib/iomgr/logical_thread.cc',
         'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
         'src/core/lib/iomgr/polling_entity.cc',
         'src/core/lib/iomgr/pollset.cc',

+ 4 - 0
include/grpc/impl/codegen/grpc_types.h

@@ -726,6 +726,10 @@ typedef struct grpc_experimental_completion_queue_functor {
       operation succeeded (non-zero) or failed (zero) */
   void (*functor_run)(struct grpc_experimental_completion_queue_functor*, int);
 
+  /** The inlineable member specifies whether this functor can be run inline.
+      This should only be used for trivial internally-defined functors. */
+  int inlineable;
+
   /** The following fields are not API. They are meant for internal use. */
   int internal_success;
   struct grpc_experimental_completion_queue_functor* internal_next;

+ 0 - 1
include/grpcpp/channel_impl.h

@@ -20,7 +20,6 @@
 #define GRPCPP_CHANNEL_IMPL_H
 
 #include <memory>
-#include <mutex>
 
 #include <grpc/grpc.h>
 #include <grpcpp/impl/call.h>

+ 34 - 34
include/grpcpp/impl/codegen/async_generic_service.h

@@ -21,6 +21,7 @@
 
 #include <grpcpp/impl/codegen/async_stream_impl.h>
 #include <grpcpp/impl/codegen/byte_buffer.h>
+#include <grpcpp/impl/codegen/server_callback_handlers.h>
 #include <grpcpp/impl/codegen/server_callback_impl.h>
 
 struct grpc_server;
@@ -42,12 +43,12 @@ class GenericServerContext final : public ::grpc_impl::ServerContext {
 
  private:
   friend class grpc_impl::Server;
-  friend class ServerInterface;
+  friend class grpc::ServerInterface;
 
   void Clear() {
     method_.clear();
     host_.clear();
-    ServerContext::Clear();
+    ::grpc_impl::ServerContext::Clear();
   }
 
   grpc::string method_;
@@ -89,39 +90,30 @@ class AsyncGenericService final {
 namespace experimental {
 
 /// \a ServerGenericBidiReactor is the reactor class for bidi streaming RPCs
-/// invoked on a CallbackGenericService. The API difference relative to
-/// ServerBidiReactor is that the argument to OnStarted is a
-/// GenericServerContext rather than a ServerContext. All other reaction and
-/// operation initiation APIs are the same as ServerBidiReactor.
-class ServerGenericBidiReactor
-    : public ::grpc_impl::experimental::ServerBidiReactor<ByteBuffer,
-                                                          ByteBuffer> {
+/// invoked on a CallbackGenericService. It is just a ServerBidi reactor with
+/// ByteBuffer arguments.
+using ServerGenericBidiReactor =
+    ::grpc_impl::experimental::ServerBidiReactor<ByteBuffer, ByteBuffer>;
+
+class GenericCallbackServerContext final
+    : public ::grpc_impl::experimental::CallbackServerContext {
  public:
-  /// Similar to ServerBidiReactor::OnStarted except for argument type.
-  ///
-  /// \param[in] context The context object associated with this RPC.
-  virtual void OnStarted(GenericServerContext* /*context*/) {}
+  const grpc::string& method() const { return method_; }
+  const grpc::string& host() const { return host_; }
 
  private:
-  void OnStarted(::grpc_impl::ServerContext* ctx) final {
-    OnStarted(static_cast<GenericServerContext*>(ctx));
-  }
-};
-
-}  // namespace experimental
+  friend class ::grpc_impl::Server;
+  friend class ::grpc::ServerInterface;
 
-namespace internal {
-class UnimplementedGenericBidiReactor
-    : public experimental::ServerGenericBidiReactor {
- public:
-  void OnDone() override { delete this; }
-  void OnStarted(GenericServerContext*) override {
-    this->Finish(Status(StatusCode::UNIMPLEMENTED, ""));
+  void Clear() {
+    method_.clear();
+    host_.clear();
+    ::grpc_impl::experimental::CallbackServerContext::Clear();
   }
-};
-}  // namespace internal
 
-namespace experimental {
+  grpc::string method_;
+  grpc::string host_;
+};
 
 /// \a CallbackGenericService is the base class for generic services implemented
 /// using the callback API and registered through the ServerBuilder using
@@ -132,10 +124,16 @@ class CallbackGenericService {
   virtual ~CallbackGenericService() {}
 
   /// The "method handler" for the generic API. This function should be
-  /// overridden to return a ServerGenericBidiReactor that implements the
-  /// application-level interface for this RPC.
-  virtual ServerGenericBidiReactor* CreateReactor() {
-    return new internal::UnimplementedGenericBidiReactor;
+  /// overridden to provide a ServerGenericBidiReactor that implements the
+  /// application-level interface for this RPC. Unimplemented by default.
+  virtual ServerGenericBidiReactor* CreateReactor(
+      GenericCallbackServerContext* /*ctx*/) {
+    class Reactor : public ServerGenericBidiReactor {
+     public:
+      Reactor() { this->Finish(Status(StatusCode::UNIMPLEMENTED, "")); }
+      void OnDone() override { delete this; }
+    };
+    return new Reactor;
   }
 
  private:
@@ -145,7 +143,9 @@ class CallbackGenericService {
   Handler() {
     return new ::grpc_impl::internal::CallbackBidiHandler<ByteBuffer,
                                                           ByteBuffer>(
-        [this] { return CreateReactor(); });
+        [this](::grpc_impl::experimental::CallbackServerContext* ctx) {
+          return CreateReactor(static_cast<GenericCallbackServerContext*>(ctx));
+        });
   }
 
   grpc_impl::Server* server_{nullptr};

+ 13 - 5
include/grpcpp/impl/codegen/callback_common.h

@@ -47,8 +47,8 @@ void CatchingCallback(Func&& func, Args&&... args) {
 #endif  // GRPC_ALLOW_EXCEPTIONS
 }
 
-template <class ReturnType, class Func, class... Args>
-ReturnType* CatchingReactorCreator(Func&& func, Args&&... args) {
+template <class Reactor, class Func, class... Args>
+Reactor* CatchingReactorGetter(Func&& func, Args&&... args) {
 #if GRPC_ALLOW_EXCEPTIONS
   try {
     return func(std::forward<Args>(args)...);
@@ -85,6 +85,10 @@ class CallbackWithStatusTag
       : call_(call), func_(std::move(f)), ops_(ops) {
     g_core_codegen_interface->grpc_call_ref(call);
     functor_run = &CallbackWithStatusTag::StaticRun;
+    // A client-side callback should never be run inline since they will always
+    // have work to do from the user application. So, set the parent's
+    // inlineable field to false
+    inlineable = false;
   }
   ~CallbackWithStatusTag() {}
   Status* status_ptr() { return &status_; }
@@ -147,8 +151,8 @@ class CallbackWithSuccessTag
   CallbackWithSuccessTag() : call_(nullptr) {}
 
   CallbackWithSuccessTag(grpc_call* call, std::function<void(bool)> f,
-                         CompletionQueueTag* ops) {
-    Set(call, f, ops);
+                         CompletionQueueTag* ops, bool can_inline) {
+    Set(call, f, ops, can_inline);
   }
 
   CallbackWithSuccessTag(const CallbackWithSuccessTag&) = delete;
@@ -159,14 +163,18 @@ class CallbackWithSuccessTag
   // Set can only be called on a default-constructed or Clear'ed tag.
   // It should never be called on a tag that was constructed with arguments
   // or on a tag that has been Set before unless the tag has been cleared.
+  // can_inline indicates that this particular callback can be executed inline
+  // (without needing a thread hop) and is only used for library-provided server
+  // callbacks.
   void Set(grpc_call* call, std::function<void(bool)> f,
-           CompletionQueueTag* ops) {
+           CompletionQueueTag* ops, bool can_inline) {
     GPR_CODEGEN_ASSERT(call_ == nullptr);
     g_core_codegen_interface->grpc_call_ref(call);
     call_ = call;
     func_ = std::move(f);
     ops_ = ops;
     functor_run = &CallbackWithSuccessTag::StaticRun;
+    inlineable = can_inline;
   }
 
   void Clear() {

+ 17 - 17
include/grpcpp/impl/codegen/client_callback_impl.h

@@ -285,18 +285,18 @@ class ClientBidiReactor {
   /// call of OnReadDone or OnDone.
   ///
   /// \param[in] ok Was the initial metadata read successfully? If false, no
-  ///               further read-side operation will succeed.
+  ///               new read/write operation will succeed.
   virtual void OnReadInitialMetadataDone(bool /*ok*/) {}
 
   /// Notifies the application that a StartRead operation completed.
   ///
-  /// \param[in] ok Was it successful? If false, no further read-side operation
+  /// \param[in] ok Was it successful? If false, no new read/write operation
   ///               will succeed.
   virtual void OnReadDone(bool /*ok*/) {}
 
   /// Notifies the application that a StartWrite operation completed.
   ///
-  /// \param[in] ok Was it successful? If false, no further write-side operation
+  /// \param[in] ok Was it successful? If false, no new read/write operation
   ///               will succeed.
   virtual void OnWriteDone(bool /*ok*/) {}
 
@@ -457,7 +457,7 @@ class ClientCallbackReaderWriterImpl
                      reactor_->OnReadInitialMetadataDone(ok);
                      MaybeFinish();
                    },
-                   &start_ops_);
+                   &start_ops_, /*can_inline=*/false);
     if (!start_corked_) {
       start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
                                      context_->initial_metadata_flags());
@@ -473,7 +473,7 @@ class ClientCallbackReaderWriterImpl
                      reactor_->OnWriteDone(ok);
                      MaybeFinish();
                    },
-                   &write_ops_);
+                   &write_ops_, /*can_inline=*/false);
     write_ops_.set_core_cq_tag(&write_tag_);
 
     read_tag_.Set(call_.call(),
@@ -481,7 +481,7 @@ class ClientCallbackReaderWriterImpl
                     reactor_->OnReadDone(ok);
                     MaybeFinish();
                   },
-                  &read_ops_);
+                  &read_ops_, /*can_inline=*/false);
     read_ops_.set_core_cq_tag(&read_tag_);
     if (read_ops_at_start_) {
       call_.PerformOps(&read_ops_);
@@ -496,7 +496,7 @@ class ClientCallbackReaderWriterImpl
     }
 
     finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
-                    &finish_ops_);
+                    &finish_ops_, /*can_inline=*/false);
     finish_ops_.ClientRecvStatus(context_, &finish_status_);
     finish_ops_.set_core_cq_tag(&finish_tag_);
     call_.PerformOps(&finish_ops_);
@@ -544,7 +544,7 @@ class ClientCallbackReaderWriterImpl
                            reactor_->OnWritesDoneDone(ok);
                            MaybeFinish();
                          },
-                         &writes_done_ops_);
+                         &writes_done_ops_, /*can_inline=*/false);
     writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
     callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
     if (started_) {
@@ -668,7 +668,7 @@ class ClientCallbackReaderImpl
                      reactor_->OnReadInitialMetadataDone(ok);
                      MaybeFinish();
                    },
-                   &start_ops_);
+                   &start_ops_, /*can_inline=*/false);
     start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
                                    context_->initial_metadata_flags());
     start_ops_.RecvInitialMetadata(context_);
@@ -681,14 +681,14 @@ class ClientCallbackReaderImpl
                     reactor_->OnReadDone(ok);
                     MaybeFinish();
                   },
-                  &read_ops_);
+                  &read_ops_, /*can_inline=*/false);
     read_ops_.set_core_cq_tag(&read_tag_);
     if (read_ops_at_start_) {
       call_.PerformOps(&read_ops_);
     }
 
     finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
-                    &finish_ops_);
+                    &finish_ops_, /*can_inline=*/false);
     finish_ops_.ClientRecvStatus(context_, &finish_status_);
     finish_ops_.set_core_cq_tag(&finish_tag_);
     call_.PerformOps(&finish_ops_);
@@ -808,7 +808,7 @@ class ClientCallbackWriterImpl
                      reactor_->OnReadInitialMetadataDone(ok);
                      MaybeFinish();
                    },
-                   &start_ops_);
+                   &start_ops_, /*can_inline=*/false);
     if (!start_corked_) {
       start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
                                      context_->initial_metadata_flags());
@@ -824,7 +824,7 @@ class ClientCallbackWriterImpl
                      reactor_->OnWriteDone(ok);
                      MaybeFinish();
                    },
-                   &write_ops_);
+                   &write_ops_, /*can_inline=*/false);
     write_ops_.set_core_cq_tag(&write_tag_);
 
     if (write_ops_at_start_) {
@@ -836,7 +836,7 @@ class ClientCallbackWriterImpl
     }
 
     finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
-                    &finish_ops_);
+                    &finish_ops_, /*can_inline=*/false);
     finish_ops_.ClientRecvStatus(context_, &finish_status_);
     finish_ops_.set_core_cq_tag(&finish_tag_);
     call_.PerformOps(&finish_ops_);
@@ -874,7 +874,7 @@ class ClientCallbackWriterImpl
                            reactor_->OnWritesDoneDone(ok);
                            MaybeFinish();
                          },
-                         &writes_done_ops_);
+                         &writes_done_ops_, /*can_inline=*/false);
     writes_done_ops_.set_core_cq_tag(&writes_done_tag_);
     callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
     if (started_) {
@@ -983,7 +983,7 @@ class ClientCallbackUnaryImpl final : public experimental::ClientCallbackUnary {
                      reactor_->OnReadInitialMetadataDone(ok);
                      MaybeFinish();
                    },
-                   &start_ops_);
+                   &start_ops_, /*can_inline=*/false);
     start_ops_.SendInitialMetadata(&context_->send_initial_metadata_,
                                    context_->initial_metadata_flags());
     start_ops_.RecvInitialMetadata(context_);
@@ -991,7 +991,7 @@ class ClientCallbackUnaryImpl final : public experimental::ClientCallbackUnary {
     call_.PerformOps(&start_ops_);
 
     finish_tag_.Set(call_.call(), [this](bool /*ok*/) { MaybeFinish(); },
-                    &finish_ops_);
+                    &finish_ops_, /*can_inline=*/false);
     finish_ops_.ClientRecvStatus(context_, &finish_status_);
     finish_ops_.set_core_cq_tag(&finish_tag_);
     call_.PerformOps(&finish_ops_);

+ 13 - 1
include/grpcpp/impl/codegen/client_context_impl.h

@@ -36,7 +36,6 @@
 
 #include <map>
 #include <memory>
-#include <mutex>
 #include <string>
 
 #include <grpc/impl/codegen/compression_types.h>
@@ -67,6 +66,7 @@ template <class InputMessage, class OutputMessage>
 class BlockingUnaryCallImpl;
 class CallOpClientRecvStatus;
 class CallOpRecvInitialMetadata;
+class ServerContextImpl;
 }  // namespace internal
 
 namespace testing {
@@ -107,6 +107,11 @@ class ClientAsyncReaderWriter;
 template <class R>
 class ClientAsyncResponseReader;
 
+namespace experimental {
+class ServerContextBase;
+class CallbackServerContext;
+}  // namespace experimental
+
 /// Options for \a ClientContext::FromServerContext specifying which traits from
 /// the \a ServerContext to propagate (copy) from it into a new \a
 /// ClientContext.
@@ -196,6 +201,9 @@ class ClientContext {
   static std::unique_ptr<ClientContext> FromServerContext(
       const grpc_impl::ServerContext& server_context,
       PropagationOptions options = PropagationOptions());
+  static std::unique_ptr<ClientContext> FromCallbackServerContext(
+      const grpc_impl::experimental::CallbackServerContext& server_context,
+      PropagationOptions options = PropagationOptions());
 
   /// Add the (\a meta_key, \a meta_value) pair to the metadata associated with
   /// a client call. These are made available at the server side by the \a
@@ -475,6 +483,10 @@ class ClientContext {
 
   void SendCancelToInterceptors();
 
+  static std::unique_ptr<ClientContext> FromInternalServerContext(
+      const grpc_impl::experimental::ServerContextBase& server_context,
+      PropagationOptions options);
+
   bool initial_metadata_received_;
   bool wait_for_ready_;
   bool wait_for_ready_explicitly_set_;

+ 4 - 2
include/grpcpp/impl/codegen/completion_queue_impl.h

@@ -46,7 +46,6 @@ namespace grpc_impl {
 class Channel;
 class Server;
 class ServerBuilder;
-class ServerContext;
 template <class R>
 class ClientReader;
 template <class W>
@@ -57,6 +56,9 @@ template <class R>
 class ServerReader;
 template <class W>
 class ServerWriter;
+namespace experimental {
+class ServerContextBase;
+}  // namespace experimental
 namespace internal {
 template <class W, class R>
 class ServerReaderWriterBody;
@@ -275,7 +277,7 @@ class CompletionQueue : private ::grpc::GrpcLibraryCodegen {
   template <::grpc::StatusCode code>
   friend class ::grpc_impl::internal::ErrorMethodHandler;
   friend class ::grpc_impl::Server;
-  friend class ::grpc_impl::ServerContext;
+  friend class ::grpc_impl::experimental::ServerContextBase;
   friend class ::grpc::ServerInterface;
   template <class InputMessage, class OutputMessage>
   friend class ::grpc::internal::BlockingUnaryCallImpl;

+ 25 - 12
include/grpcpp/impl/codegen/method_handler_impl.h

@@ -65,8 +65,10 @@ class RpcMethodHandler : public ::grpc::internal::MethodHandler {
     ::grpc::Status status = param.status;
     if (status.ok()) {
       status = CatchingFunctionHandler([this, &param, &rsp] {
-        return func_(service_, param.server_context,
-                     static_cast<RequestType*>(param.request), &rsp);
+        return func_(
+            service_,
+            static_cast<::grpc_impl::ServerContext*>(param.server_context),
+            static_cast<RequestType*>(param.request), &rsp);
       });
       static_cast<RequestType*>(param.request)->~RequestType();
     }
@@ -128,12 +130,16 @@ class ClientStreamingHandler : public ::grpc::internal::MethodHandler {
       : func_(func), service_(service) {}
 
   void RunHandler(const HandlerParameter& param) final {
-    ::grpc_impl::ServerReader<RequestType> reader(param.call,
-                                                  param.server_context);
+    ::grpc_impl::ServerReader<RequestType> reader(
+        param.call,
+        static_cast<::grpc_impl::ServerContext*>(param.server_context));
     ResponseType rsp;
     ::grpc::Status status =
         CatchingFunctionHandler([this, &param, &reader, &rsp] {
-          return func_(service_, param.server_context, &reader, &rsp);
+          return func_(
+              service_,
+              static_cast<::grpc_impl::ServerContext*>(param.server_context),
+              &reader, &rsp);
         });
 
     ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
@@ -178,11 +184,14 @@ class ServerStreamingHandler : public ::grpc::internal::MethodHandler {
   void RunHandler(const HandlerParameter& param) final {
     ::grpc::Status status = param.status;
     if (status.ok()) {
-      ::grpc_impl::ServerWriter<ResponseType> writer(param.call,
-                                                     param.server_context);
+      ::grpc_impl::ServerWriter<ResponseType> writer(
+          param.call,
+          static_cast<::grpc_impl::ServerContext*>(param.server_context));
       status = CatchingFunctionHandler([this, &param, &writer] {
-        return func_(service_, param.server_context,
-                     static_cast<RequestType*>(param.request), &writer);
+        return func_(
+            service_,
+            static_cast<::grpc_impl::ServerContext*>(param.server_context),
+            static_cast<RequestType*>(param.request), &writer);
       });
       static_cast<RequestType*>(param.request)->~RequestType();
     }
@@ -246,9 +255,12 @@ class TemplatedBidiStreamingHandler : public ::grpc::internal::MethodHandler {
       : func_(func), write_needed_(WriteNeeded) {}
 
   void RunHandler(const HandlerParameter& param) final {
-    Streamer stream(param.call, param.server_context);
+    Streamer stream(param.call, static_cast<::grpc_impl::ServerContext*>(
+                                    param.server_context));
     ::grpc::Status status = CatchingFunctionHandler([this, &param, &stream] {
-      return func_(param.server_context, &stream);
+      return func_(
+          static_cast<::grpc_impl::ServerContext*>(param.server_context),
+          &stream);
     });
 
     ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
@@ -333,7 +345,8 @@ template <::grpc::StatusCode code>
 class ErrorMethodHandler : public ::grpc::internal::MethodHandler {
  public:
   template <class T>
-  static void FillOps(::grpc_impl::ServerContext* context, T* ops) {
+  static void FillOps(::grpc_impl::experimental::ServerContextBase* context,
+                      T* ops) {
     ::grpc::Status status(code, "");
     if (!context->sent_initial_metadata_) {
       ops->SendInitialMetadata(&context->initial_metadata_,

+ 7 - 4
include/grpcpp/impl/codegen/rpc_service_method.h

@@ -32,8 +32,10 @@
 #include <grpcpp/impl/codegen/status.h>
 
 namespace grpc_impl {
-class ServerContext;
+namespace experimental {
+class ServerContextBase;
 }
+}  // namespace grpc_impl
 
 namespace grpc {
 namespace internal {
@@ -52,8 +54,9 @@ class MethodHandler {
     /// \param requester : used only by the callback API. It is a function
     ///        called by the RPC Controller to request another RPC (and also
     ///        to set up the state required to make that request possible)
-    HandlerParameter(Call* c, ::grpc_impl::ServerContext* context, void* req,
-                     Status req_status, void* handler_data,
+    HandlerParameter(Call* c,
+                     ::grpc_impl::experimental::ServerContextBase* context,
+                     void* req, Status req_status, void* handler_data,
                      std::function<void()> requester)
         : call(c),
           server_context(context),
@@ -63,7 +66,7 @@ class MethodHandler {
           call_requester(std::move(requester)) {}
     ~HandlerParameter() {}
     Call* const call;
-    ::grpc_impl::ServerContext* const server_context;
+    ::grpc_impl::experimental::ServerContextBase* const server_context;
     void* const request;
     const Status status;
     void* const internal_data;

+ 5 - 7
include/grpcpp/impl/codegen/server_callback.h

@@ -23,20 +23,18 @@
 
 namespace grpc {
 namespace experimental {
-template <class Request, class Response>
-using ServerReadReactor =
-    ::grpc_impl::experimental::ServerReadReactor<Request, Response>;
+template <class Request>
+using ServerReadReactor = ::grpc_impl::experimental::ServerReadReactor<Request>;
 
-template <class Request, class Response>
+template <class Response>
 using ServerWriteReactor =
-    ::grpc_impl::experimental::ServerWriteReactor<Request, Response>;
+    ::grpc_impl::experimental::ServerWriteReactor<Response>;
 
 template <class Request, class Response>
 using ServerBidiReactor =
     ::grpc_impl::experimental::ServerBidiReactor<Request, Response>;
 
-typedef ::grpc_impl::experimental::ServerCallbackRpcController
-    ServerCallbackRpcController;
+using ServerUnaryReactor = ::grpc_impl::experimental::ServerUnaryReactor;
 
 }  // namespace experimental
 }  // namespace grpc

+ 814 - 0
include/grpcpp/impl/codegen/server_callback_handlers.h

@@ -0,0 +1,814 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H
+#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H
+
+#include <grpcpp/impl/codegen/message_allocator.h>
+#include <grpcpp/impl/codegen/rpc_service_method.h>
+#include <grpcpp/impl/codegen/server_callback_impl.h>
+#include <grpcpp/impl/codegen/server_context_impl.h>
+#include <grpcpp/impl/codegen/status.h>
+
+namespace grpc_impl {
+namespace internal {
+
+template <class RequestType, class ResponseType>
+class CallbackUnaryHandler : public ::grpc::internal::MethodHandler {
+ public:
+  explicit CallbackUnaryHandler(
+      std::function<experimental::ServerUnaryReactor*(
+          ::grpc_impl::experimental::CallbackServerContext*, const RequestType*,
+          ResponseType*)>
+          get_reactor)
+      : get_reactor_(std::move(get_reactor)) {}
+
+  void SetMessageAllocator(
+      ::grpc::experimental::MessageAllocator<RequestType, ResponseType>*
+          allocator) {
+    allocator_ = allocator;
+  }
+
+  void RunHandler(const HandlerParameter& param) final {
+    // Arena allocate a controller structure (that includes request/response)
+    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
+    auto* allocator_state = static_cast<
+        ::grpc::experimental::MessageHolder<RequestType, ResponseType>*>(
+        param.internal_data);
+
+    auto* call = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+        param.call->call(), sizeof(ServerCallbackUnaryImpl)))
+        ServerCallbackUnaryImpl(
+            static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+                param.server_context),
+            param.call, allocator_state, std::move(param.call_requester));
+    param.server_context->BeginCompletionOp(
+        param.call, [call](bool) { call->MaybeDone(); }, call);
+
+    experimental::ServerUnaryReactor* reactor = nullptr;
+    if (param.status.ok()) {
+      reactor = ::grpc::internal::CatchingReactorGetter<
+          experimental::ServerUnaryReactor>(
+          get_reactor_,
+          static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+              param.server_context),
+          call->request(), call->response());
+    }
+
+    if (reactor == nullptr) {
+      // if deserialization or reactor creator failed, we need to fail the call
+      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+          param.call->call(), sizeof(UnimplementedUnaryReactor)))
+          UnimplementedUnaryReactor(
+              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
+    }
+
+    /// Invoke SetupReactor as the last part of the handler
+    call->SetupReactor(reactor);
+  }
+
+  void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+                    ::grpc::Status* status, void** handler_data) final {
+    ::grpc::ByteBuffer buf;
+    buf.set_buffer(req);
+    RequestType* request = nullptr;
+    ::grpc::experimental::MessageHolder<RequestType, ResponseType>*
+        allocator_state = nullptr;
+    if (allocator_ != nullptr) {
+      allocator_state = allocator_->AllocateMessages();
+    } else {
+      allocator_state =
+          new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+              call, sizeof(DefaultMessageHolder<RequestType, ResponseType>)))
+              DefaultMessageHolder<RequestType, ResponseType>();
+    }
+    *handler_data = allocator_state;
+    request = allocator_state->request();
+    *status =
+        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+    buf.Release();
+    if (status->ok()) {
+      return request;
+    }
+    // Clean up on deserialization failure.
+    allocator_state->Release();
+    return nullptr;
+  }
+
+ private:
+  std::function<experimental::ServerUnaryReactor*(
+      ::grpc_impl::experimental::CallbackServerContext*, const RequestType*,
+      ResponseType*)>
+      get_reactor_;
+  ::grpc::experimental::MessageAllocator<RequestType, ResponseType>*
+      allocator_ = nullptr;
+
+  class ServerCallbackUnaryImpl : public experimental::ServerCallbackUnary {
+   public:
+    void Finish(::grpc::Status s) override {
+      finish_tag_.Set(
+          call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_,
+          reactor_.load(std::memory_order_relaxed)->InternalInlineable());
+      finish_ops_.set_core_cq_tag(&finish_tag_);
+
+      if (!ctx_->sent_initial_metadata_) {
+        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                        ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          finish_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      // The response is dropped if the status is not OK.
+      if (s.ok()) {
+        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
+                                     finish_ops_.SendMessagePtr(response()));
+      } else {
+        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
+      }
+      finish_ops_.set_core_cq_tag(&finish_tag_);
+      call_.PerformOps(&finish_ops_);
+    }
+
+    void SendInitialMetadata() override {
+      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+      this->Ref();
+      meta_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)
+                          ->OnSendInitialMetadataDone(ok);
+                      MaybeDone();
+                    },
+                    &meta_ops_, false);
+      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                    ctx_->initial_metadata_flags());
+      if (ctx_->compression_level_set()) {
+        meta_ops_.set_compression_level(ctx_->compression_level());
+      }
+      ctx_->sent_initial_metadata_ = true;
+      meta_ops_.set_core_cq_tag(&meta_tag_);
+      call_.PerformOps(&meta_ops_);
+    }
+
+   private:
+    friend class CallbackUnaryHandler<RequestType, ResponseType>;
+
+    ServerCallbackUnaryImpl(
+        ::grpc_impl::experimental::CallbackServerContext* ctx,
+        ::grpc::internal::Call* call,
+        ::grpc::experimental::MessageHolder<RequestType, ResponseType>*
+            allocator_state,
+        std::function<void()> call_requester)
+        : ctx_(ctx),
+          call_(*call),
+          allocator_state_(allocator_state),
+          call_requester_(std::move(call_requester)) {
+      ctx_->set_message_allocator_state(allocator_state);
+    }
+
+    /// SetupReactor binds the reactor (which also releases any queued
+    /// operations), maybe calls OnCancel if possible/needed, and maybe marks
+    /// the completion of the RPC. This should be the last component of the
+    /// handler.
+    void SetupReactor(experimental::ServerUnaryReactor* reactor) {
+      reactor_.store(reactor, std::memory_order_relaxed);
+      this->BindReactor(reactor);
+      this->MaybeCallOnCancel(reactor);
+      this->MaybeDone();
+    }
+
+    const RequestType* request() { return allocator_state_->request(); }
+    ResponseType* response() { return allocator_state_->response(); }
+
+    void MaybeDone() override {
+      if (GPR_UNLIKELY(this->Unref() == 1)) {
+        reactor_.load(std::memory_order_relaxed)->OnDone();
+        grpc_call* call = call_.call();
+        auto call_requester = std::move(call_requester_);
+        allocator_state_->Release();
+        this->~ServerCallbackUnaryImpl();  // explicitly call destructor
+        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+        call_requester();
+      }
+    }
+
+    ServerReactor* reactor() override {
+      return reactor_.load(std::memory_order_relaxed);
+    }
+
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+        meta_ops_;
+    ::grpc::internal::CallbackWithSuccessTag meta_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage,
+                                ::grpc::internal::CallOpServerSendStatus>
+        finish_ops_;
+    ::grpc::internal::CallbackWithSuccessTag finish_tag_;
+
+    ::grpc_impl::experimental::CallbackServerContext* const ctx_;
+    ::grpc::internal::Call call_;
+    ::grpc::experimental::MessageHolder<RequestType, ResponseType>* const
+        allocator_state_;
+    std::function<void()> call_requester_;
+    // reactor_ can always be loaded/stored with relaxed memory ordering because
+    // its value is only set once, independently of other data in the object,
+    // and the loads that use it will always actually come provably later even
+    // though they are from different threads since they are triggered by
+    // actions initiated only by the setting up of the reactor_ variable. In
+    // a sense, it's a delayed "const": it gets its value from the SetupReactor
+    // method (not the constructor, so it's not a true const), but it doesn't
+    // change after that and it only gets used by actions caused, directly or
+    // indirectly, by that setup. This comment also applies to the reactor_
+    // variables of the other streaming objects in this file.
+    std::atomic<experimental::ServerUnaryReactor*> reactor_;
+    // callbacks_outstanding_ follows a refcount pattern
+    std::atomic<intptr_t> callbacks_outstanding_{
+        3};  // reserve for start, Finish, and CompletionOp
+  };
+};
+
+template <class RequestType, class ResponseType>
+class CallbackClientStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+  explicit CallbackClientStreamingHandler(
+      std::function<experimental::ServerReadReactor<RequestType>*(
+          ::grpc_impl::experimental::CallbackServerContext*, ResponseType*)>
+          get_reactor)
+      : get_reactor_(std::move(get_reactor)) {}
+  void RunHandler(const HandlerParameter& param) final {
+    // Arena allocate a reader structure (that includes response)
+    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
+
+    auto* reader = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+        param.call->call(), sizeof(ServerCallbackReaderImpl)))
+        ServerCallbackReaderImpl(
+            static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+                param.server_context),
+            param.call, std::move(param.call_requester));
+    param.server_context->BeginCompletionOp(
+        param.call, [reader](bool) { reader->MaybeDone(); }, reader);
+
+    experimental::ServerReadReactor<RequestType>* reactor = nullptr;
+    if (param.status.ok()) {
+      reactor = ::grpc::internal::CatchingReactorGetter<
+          experimental::ServerReadReactor<RequestType>>(
+          get_reactor_,
+          static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+              param.server_context),
+          reader->response());
+    }
+
+    if (reactor == nullptr) {
+      // if deserialization or reactor creator failed, we need to fail the call
+      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+          param.call->call(), sizeof(UnimplementedReadReactor<RequestType>)))
+          UnimplementedReadReactor<RequestType>(
+              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
+    }
+
+    reader->SetupReactor(reactor);
+  }
+
+ private:
+  std::function<experimental::ServerReadReactor<RequestType>*(
+      ::grpc_impl::experimental::CallbackServerContext*, ResponseType*)>
+      get_reactor_;
+
+  class ServerCallbackReaderImpl
+      : public experimental::ServerCallbackReader<RequestType> {
+   public:
+    void Finish(::grpc::Status s) override {
+      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_,
+                      false);
+      if (!ctx_->sent_initial_metadata_) {
+        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                        ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          finish_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      // The response is dropped if the status is not OK.
+      if (s.ok()) {
+        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
+                                     finish_ops_.SendMessagePtr(&resp_));
+      } else {
+        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
+      }
+      finish_ops_.set_core_cq_tag(&finish_tag_);
+      call_.PerformOps(&finish_ops_);
+    }
+
+    void SendInitialMetadata() override {
+      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+      this->Ref();
+      meta_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)
+                          ->OnSendInitialMetadataDone(ok);
+                      MaybeDone();
+                    },
+                    &meta_ops_, false);
+      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                    ctx_->initial_metadata_flags());
+      if (ctx_->compression_level_set()) {
+        meta_ops_.set_compression_level(ctx_->compression_level());
+      }
+      ctx_->sent_initial_metadata_ = true;
+      meta_ops_.set_core_cq_tag(&meta_tag_);
+      call_.PerformOps(&meta_ops_);
+    }
+
+    void Read(RequestType* req) override {
+      this->Ref();
+      read_ops_.RecvMessage(req);
+      call_.PerformOps(&read_ops_);
+    }
+
+   private:
+    friend class CallbackClientStreamingHandler<RequestType, ResponseType>;
+
+    ServerCallbackReaderImpl(
+        ::grpc_impl::experimental::CallbackServerContext* ctx,
+        ::grpc::internal::Call* call, std::function<void()> call_requester)
+        : ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
+
+    void SetupReactor(experimental::ServerReadReactor<RequestType>* reactor) {
+      reactor_.store(reactor, std::memory_order_relaxed);
+      read_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)->OnReadDone(ok);
+                      MaybeDone();
+                    },
+                    &read_ops_, false);
+      read_ops_.set_core_cq_tag(&read_tag_);
+      this->BindReactor(reactor);
+      this->MaybeCallOnCancel(reactor);
+      this->MaybeDone();
+    }
+
+    ~ServerCallbackReaderImpl() {}
+
+    ResponseType* response() { return &resp_; }
+
+    void MaybeDone() override {
+      if (GPR_UNLIKELY(this->Unref() == 1)) {
+        reactor_.load(std::memory_order_relaxed)->OnDone();
+        grpc_call* call = call_.call();
+        auto call_requester = std::move(call_requester_);
+        this->~ServerCallbackReaderImpl();  // explicitly call destructor
+        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+        call_requester();
+      }
+    }
+
+    ServerReactor* reactor() override {
+      return reactor_.load(std::memory_order_relaxed);
+    }
+
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+        meta_ops_;
+    ::grpc::internal::CallbackWithSuccessTag meta_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage,
+                                ::grpc::internal::CallOpServerSendStatus>
+        finish_ops_;
+    ::grpc::internal::CallbackWithSuccessTag finish_tag_;
+    ::grpc::internal::CallOpSet<
+        ::grpc::internal::CallOpRecvMessage<RequestType>>
+        read_ops_;
+    ::grpc::internal::CallbackWithSuccessTag read_tag_;
+
+    ::grpc_impl::experimental::CallbackServerContext* const ctx_;
+    ::grpc::internal::Call call_;
+    ResponseType resp_;
+    std::function<void()> call_requester_;
+    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl.
+    std::atomic<experimental::ServerReadReactor<RequestType>*> reactor_;
+    // callbacks_outstanding_ follows a refcount pattern
+    std::atomic<intptr_t> callbacks_outstanding_{
+        3};  // reserve for OnStarted, Finish, and CompletionOp
+  };
+};
+
+template <class RequestType, class ResponseType>
+class CallbackServerStreamingHandler : public ::grpc::internal::MethodHandler {
+ public:
+  explicit CallbackServerStreamingHandler(
+      std::function<experimental::ServerWriteReactor<ResponseType>*(
+          ::grpc_impl::experimental::CallbackServerContext*,
+          const RequestType*)>
+          get_reactor)
+      : get_reactor_(std::move(get_reactor)) {}
+  void RunHandler(const HandlerParameter& param) final {
+    // Arena allocate a writer structure
+    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
+
+    auto* writer = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+        param.call->call(), sizeof(ServerCallbackWriterImpl)))
+        ServerCallbackWriterImpl(
+            static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+                param.server_context),
+            param.call, static_cast<RequestType*>(param.request),
+            std::move(param.call_requester));
+    param.server_context->BeginCompletionOp(
+        param.call, [writer](bool) { writer->MaybeDone(); }, writer);
+
+    experimental::ServerWriteReactor<ResponseType>* reactor = nullptr;
+    if (param.status.ok()) {
+      reactor = ::grpc::internal::CatchingReactorGetter<
+          experimental::ServerWriteReactor<ResponseType>>(
+          get_reactor_,
+          static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+              param.server_context),
+          writer->request());
+    }
+    if (reactor == nullptr) {
+      // if deserialization or reactor creator failed, we need to fail the call
+      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+          param.call->call(), sizeof(UnimplementedWriteReactor<ResponseType>)))
+          UnimplementedWriteReactor<ResponseType>(
+              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
+    }
+
+    writer->SetupReactor(reactor);
+  }
+
+  void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
+                    ::grpc::Status* status, void** /*handler_data*/) final {
+    ::grpc::ByteBuffer buf;
+    buf.set_buffer(req);
+    auto* request =
+        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+            call, sizeof(RequestType))) RequestType();
+    *status =
+        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
+    buf.Release();
+    if (status->ok()) {
+      return request;
+    }
+    request->~RequestType();
+    return nullptr;
+  }
+
+ private:
+  std::function<experimental::ServerWriteReactor<ResponseType>*(
+      ::grpc_impl::experimental::CallbackServerContext*, const RequestType*)>
+      get_reactor_;
+
+  class ServerCallbackWriterImpl
+      : public experimental::ServerCallbackWriter<ResponseType> {
+   public:
+    void Finish(::grpc::Status s) override {
+      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_,
+                      false);
+      finish_ops_.set_core_cq_tag(&finish_tag_);
+
+      if (!ctx_->sent_initial_metadata_) {
+        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                        ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          finish_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
+      call_.PerformOps(&finish_ops_);
+    }
+
+    void SendInitialMetadata() override {
+      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+      this->Ref();
+      meta_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)
+                          ->OnSendInitialMetadataDone(ok);
+                      MaybeDone();
+                    },
+                    &meta_ops_, false);
+      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                    ctx_->initial_metadata_flags());
+      if (ctx_->compression_level_set()) {
+        meta_ops_.set_compression_level(ctx_->compression_level());
+      }
+      ctx_->sent_initial_metadata_ = true;
+      meta_ops_.set_core_cq_tag(&meta_tag_);
+      call_.PerformOps(&meta_ops_);
+    }
+
+    void Write(const ResponseType* resp,
+               ::grpc::WriteOptions options) override {
+      this->Ref();
+      if (options.is_last_message()) {
+        options.set_buffer_hint();
+      }
+      if (!ctx_->sent_initial_metadata_) {
+        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                       ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          write_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      // TODO(vjpai): don't assert
+      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok());
+      call_.PerformOps(&write_ops_);
+    }
+
+    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
+                        ::grpc::Status s) override {
+      // This combines the write into the finish callback
+      // Don't send any message if the status is bad
+      if (s.ok()) {
+        // TODO(vjpai): don't assert
+        GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+      }
+      Finish(std::move(s));
+    }
+
+   private:
+    friend class CallbackServerStreamingHandler<RequestType, ResponseType>;
+
+    ServerCallbackWriterImpl(
+        ::grpc_impl::experimental::CallbackServerContext* ctx,
+        ::grpc::internal::Call* call, const RequestType* req,
+        std::function<void()> call_requester)
+        : ctx_(ctx),
+          call_(*call),
+          req_(req),
+          call_requester_(std::move(call_requester)) {}
+
+    void SetupReactor(experimental::ServerWriteReactor<ResponseType>* reactor) {
+      reactor_.store(reactor, std::memory_order_relaxed);
+      write_tag_.Set(
+          call_.call(),
+          [this](bool ok) {
+            reactor_.load(std::memory_order_relaxed)->OnWriteDone(ok);
+            MaybeDone();
+          },
+          &write_ops_, false);
+      write_ops_.set_core_cq_tag(&write_tag_);
+      this->BindReactor(reactor);
+      this->MaybeCallOnCancel(reactor);
+      this->MaybeDone();
+    }
+    ~ServerCallbackWriterImpl() { req_->~RequestType(); }
+
+    const RequestType* request() { return req_; }
+
+    void MaybeDone() override {
+      if (GPR_UNLIKELY(this->Unref() == 1)) {
+        reactor_.load(std::memory_order_relaxed)->OnDone();
+        grpc_call* call = call_.call();
+        auto call_requester = std::move(call_requester_);
+        this->~ServerCallbackWriterImpl();  // explicitly call destructor
+        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+        call_requester();
+      }
+    }
+
+    ServerReactor* reactor() override {
+      return reactor_.load(std::memory_order_relaxed);
+    }
+
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+        meta_ops_;
+    ::grpc::internal::CallbackWithSuccessTag meta_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage,
+                                ::grpc::internal::CallOpServerSendStatus>
+        finish_ops_;
+    ::grpc::internal::CallbackWithSuccessTag finish_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage>
+        write_ops_;
+    ::grpc::internal::CallbackWithSuccessTag write_tag_;
+
+    ::grpc_impl::experimental::CallbackServerContext* const ctx_;
+    ::grpc::internal::Call call_;
+    const RequestType* req_;
+    std::function<void()> call_requester_;
+    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl.
+    std::atomic<experimental::ServerWriteReactor<ResponseType>*> reactor_;
+    // callbacks_outstanding_ follows a refcount pattern
+    std::atomic<intptr_t> callbacks_outstanding_{
+        3};  // reserve for OnStarted, Finish, and CompletionOp
+  };
+};
+
+template <class RequestType, class ResponseType>
+class CallbackBidiHandler : public ::grpc::internal::MethodHandler {
+ public:
+  explicit CallbackBidiHandler(
+      std::function<experimental::ServerBidiReactor<RequestType, ResponseType>*(
+          ::grpc_impl::experimental::CallbackServerContext*)>
+          get_reactor)
+      : get_reactor_(std::move(get_reactor)) {}
+  void RunHandler(const HandlerParameter& param) final {
+    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
+
+    auto* stream = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+        param.call->call(), sizeof(ServerCallbackReaderWriterImpl)))
+        ServerCallbackReaderWriterImpl(
+            static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+                param.server_context),
+            param.call, std::move(param.call_requester));
+    param.server_context->BeginCompletionOp(
+        param.call, [stream](bool) { stream->MaybeDone(); }, stream);
+
+    experimental::ServerBidiReactor<RequestType, ResponseType>* reactor =
+        nullptr;
+    if (param.status.ok()) {
+      reactor = ::grpc::internal::CatchingReactorGetter<
+          experimental::ServerBidiReactor<RequestType, ResponseType>>(
+          get_reactor_,
+          static_cast<::grpc_impl::experimental::CallbackServerContext*>(
+              param.server_context));
+    }
+
+    if (reactor == nullptr) {
+      // if deserialization or reactor creator failed, we need to fail the call
+      reactor = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
+          param.call->call(),
+          sizeof(UnimplementedBidiReactor<RequestType, ResponseType>)))
+          UnimplementedBidiReactor<RequestType, ResponseType>(
+              ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
+    }
+
+    stream->SetupReactor(reactor);
+  }
+
+ private:
+  std::function<experimental::ServerBidiReactor<RequestType, ResponseType>*(
+      ::grpc_impl::experimental::CallbackServerContext*)>
+      get_reactor_;
+
+  class ServerCallbackReaderWriterImpl
+      : public experimental::ServerCallbackReaderWriter<RequestType,
+                                                        ResponseType> {
+   public:
+    void Finish(::grpc::Status s) override {
+      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); }, &finish_ops_,
+                      false);
+      finish_ops_.set_core_cq_tag(&finish_tag_);
+
+      if (!ctx_->sent_initial_metadata_) {
+        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                        ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          finish_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
+      call_.PerformOps(&finish_ops_);
+    }
+
+    void SendInitialMetadata() override {
+      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
+      this->Ref();
+      meta_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)
+                          ->OnSendInitialMetadataDone(ok);
+                      MaybeDone();
+                    },
+                    &meta_ops_, false);
+      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                    ctx_->initial_metadata_flags());
+      if (ctx_->compression_level_set()) {
+        meta_ops_.set_compression_level(ctx_->compression_level());
+      }
+      ctx_->sent_initial_metadata_ = true;
+      meta_ops_.set_core_cq_tag(&meta_tag_);
+      call_.PerformOps(&meta_ops_);
+    }
+
+    void Write(const ResponseType* resp,
+               ::grpc::WriteOptions options) override {
+      this->Ref();
+      if (options.is_last_message()) {
+        options.set_buffer_hint();
+      }
+      if (!ctx_->sent_initial_metadata_) {
+        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
+                                       ctx_->initial_metadata_flags());
+        if (ctx_->compression_level_set()) {
+          write_ops_.set_compression_level(ctx_->compression_level());
+        }
+        ctx_->sent_initial_metadata_ = true;
+      }
+      // TODO(vjpai): don't assert
+      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok());
+      call_.PerformOps(&write_ops_);
+    }
+
+    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
+                        ::grpc::Status s) override {
+      // Don't send any message if the status is bad
+      if (s.ok()) {
+        // TODO(vjpai): don't assert
+        GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
+      }
+      Finish(std::move(s));
+    }
+
+    void Read(RequestType* req) override {
+      this->Ref();
+      read_ops_.RecvMessage(req);
+      call_.PerformOps(&read_ops_);
+    }
+
+   private:
+    friend class CallbackBidiHandler<RequestType, ResponseType>;
+
+    ServerCallbackReaderWriterImpl(
+        ::grpc_impl::experimental::CallbackServerContext* ctx,
+        ::grpc::internal::Call* call, std::function<void()> call_requester)
+        : ctx_(ctx), call_(*call), call_requester_(std::move(call_requester)) {}
+
+    void SetupReactor(
+        experimental::ServerBidiReactor<RequestType, ResponseType>* reactor) {
+      reactor_.store(reactor, std::memory_order_relaxed);
+      write_tag_.Set(
+          call_.call(),
+          [this](bool ok) {
+            reactor_.load(std::memory_order_relaxed)->OnWriteDone(ok);
+            MaybeDone();
+          },
+          &write_ops_, false);
+      write_ops_.set_core_cq_tag(&write_tag_);
+      read_tag_.Set(call_.call(),
+                    [this](bool ok) {
+                      reactor_.load(std::memory_order_relaxed)->OnReadDone(ok);
+                      MaybeDone();
+                    },
+                    &read_ops_, false);
+      read_ops_.set_core_cq_tag(&read_tag_);
+      this->BindReactor(reactor);
+      this->MaybeCallOnCancel(reactor);
+      this->MaybeDone();
+    }
+
+    void MaybeDone() override {
+      if (GPR_UNLIKELY(this->Unref() == 1)) {
+        reactor_.load(std::memory_order_relaxed)->OnDone();
+        grpc_call* call = call_.call();
+        auto call_requester = std::move(call_requester_);
+        this->~ServerCallbackReaderWriterImpl();  // explicitly call destructor
+        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
+        call_requester();
+      }
+    }
+
+    ServerReactor* reactor() override {
+      return reactor_.load(std::memory_order_relaxed);
+    }
+
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata>
+        meta_ops_;
+    ::grpc::internal::CallbackWithSuccessTag meta_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage,
+                                ::grpc::internal::CallOpServerSendStatus>
+        finish_ops_;
+    ::grpc::internal::CallbackWithSuccessTag finish_tag_;
+    ::grpc::internal::CallOpSet<::grpc::internal::CallOpSendInitialMetadata,
+                                ::grpc::internal::CallOpSendMessage>
+        write_ops_;
+    ::grpc::internal::CallbackWithSuccessTag write_tag_;
+    ::grpc::internal::CallOpSet<
+        ::grpc::internal::CallOpRecvMessage<RequestType>>
+        read_ops_;
+    ::grpc::internal::CallbackWithSuccessTag read_tag_;
+
+    ::grpc_impl::experimental::CallbackServerContext* const ctx_;
+    ::grpc::internal::Call call_;
+    std::function<void()> call_requester_;
+    // The memory ordering of reactor_ follows ServerCallbackUnaryImpl.
+    std::atomic<experimental::ServerBidiReactor<RequestType, ResponseType>*>
+        reactor_;
+    // callbacks_outstanding_ follows a refcount pattern
+    std::atomic<intptr_t> callbacks_outstanding_{
+        3};  // reserve for OnStarted, Finish, and CompletionOp
+  };
+};
+
+}  // namespace internal
+}  // namespace grpc_impl
+
+#endif  // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_HANDLERS_H

+ 425 - 860
include/grpcpp/impl/codegen/server_callback_impl.h

@@ -28,8 +28,6 @@
 #include <grpcpp/impl/codegen/config.h>
 #include <grpcpp/impl/codegen/core_codegen_interface.h>
 #include <grpcpp/impl/codegen/message_allocator.h>
-#include <grpcpp/impl/codegen/server_context_impl.h>
-#include <grpcpp/impl/codegen/server_interface.h>
 #include <grpcpp/impl/codegen/status.h>
 
 namespace grpc_impl {
@@ -39,6 +37,8 @@ namespace internal {
 
 // Forward declarations
 template <class Request, class Response>
+class CallbackUnaryHandler;
+template <class Request, class Response>
 class CallbackClientStreamingHandler;
 template <class Request, class Response>
 class CallbackServerStreamingHandler;
@@ -51,29 +51,69 @@ class ServerReactor {
   virtual void OnDone() = 0;
   virtual void OnCancel() = 0;
 
+  // The following is not API. It is for internal use only and specifies whether
+  // all reactions of this Reactor can be run without an extra executor
+  // scheduling. This should only be used for internally-defined reactors with
+  // trivial reactions.
+  virtual bool InternalInlineable() { return false; }
+
  private:
-  friend class ::grpc_impl::ServerContext;
+  template <class Request, class Response>
+  friend class CallbackUnaryHandler;
   template <class Request, class Response>
   friend class CallbackClientStreamingHandler;
   template <class Request, class Response>
   friend class CallbackServerStreamingHandler;
   template <class Request, class Response>
   friend class CallbackBidiHandler;
+};
 
-  // The ServerReactor is responsible for tracking when it is safe to call
-  // OnCancel. This function should not be called until after OnStarted is done
-  // and the RPC has completed with a cancellation. This is tracked by counting
-  // how many of these conditions have been met and calling OnCancel when none
-  // remain unmet.
+/// The base class of ServerCallbackUnary etc.
+class ServerCallbackCall {
+ public:
+  virtual ~ServerCallbackCall() {}
+
+  // This object is responsible for tracking when it is safe to call
+  // OnCancel. This function should not be called until after the method handler
+  // is done and the RPC has completed with a cancellation. This is tracked by
+  // counting how many of these conditions have been met and calling OnCancel
+  // when none remain unmet.
 
-  void MaybeCallOnCancel() {
+  // Fast version called with known reactor passed in, used from derived
+  // classes, typically in non-cancel case
+  void MaybeCallOnCancel(ServerReactor* reactor) {
     if (GPR_UNLIKELY(on_cancel_conditions_remaining_.fetch_sub(
                          1, std::memory_order_acq_rel) == 1)) {
-      OnCancel();
+      CallOnCancel(reactor);
     }
   }
 
-  std::atomic<intptr_t> on_cancel_conditions_remaining_{2};
+  // Slower version called from object that doesn't know the reactor a priori
+  // (such as the ServerContext CompletionOp which is formed before the
+  // reactor). This is used in cancel cases only, so it's ok to be slower and
+  // invoke a virtual function.
+  void MaybeCallOnCancel() { MaybeCallOnCancel(reactor()); }
+
+ protected:
+  /// Increases the reference count
+  void Ref() { callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed); }
+
+  /// Decreases the reference count and returns the previous value
+  int Unref() {
+    return callbacks_outstanding_.fetch_sub(1, std::memory_order_acq_rel);
+  }
+
+ private:
+  virtual ServerReactor* reactor() = 0;
+  virtual void MaybeDone() = 0;
+
+  // If the OnCancel reaction is inlineable, execute it inline. Otherwise send
+  // it to an executor.
+  void CallOnCancel(ServerReactor* reactor);
+
+  std::atomic_int on_cancel_conditions_remaining_{2};
+  std::atomic_int callbacks_outstanding_{
+      3};  // reserve for start, Finish, and CompletionOp
 };
 
 template <class Request, class Response>
@@ -99,71 +139,34 @@ class DefaultMessageHolder
 namespace experimental {
 
 // Forward declarations
-template <class Request, class Response>
+class ServerUnaryReactor;
+template <class Request>
 class ServerReadReactor;
-template <class Request, class Response>
+template <class Response>
 class ServerWriteReactor;
 template <class Request, class Response>
 class ServerBidiReactor;
 
-// For unary RPCs, the exposed controller class is only an interface
-// and the actual implementation is an internal class.
-class ServerCallbackRpcController {
+// NOTE: The actual call/stream object classes are provided as API only to
+// support mocking. There are no implementations of these class interfaces in
+// the API.
+class ServerCallbackUnary : public internal::ServerCallbackCall {
  public:
-  virtual ~ServerCallbackRpcController() = default;
-
-  // The method handler must call this function when it is done so that
-  // the library knows to free its resources
+  virtual ~ServerCallbackUnary() {}
   virtual void Finish(::grpc::Status s) = 0;
+  virtual void SendInitialMetadata() = 0;
 
-  // Allow the method handler to push out the initial metadata before
-  // the response and status are ready
-  virtual void SendInitialMetadata(std::function<void(bool)>) = 0;
-
-  /// SetCancelCallback passes in a callback to be called when the RPC is
-  /// canceled for whatever reason (streaming calls have OnCancel instead). This
-  /// is an advanced and uncommon use with several important restrictions. This
-  /// function may not be called more than once on the same RPC.
-  ///
-  /// If code calls SetCancelCallback on an RPC, it must also call
-  /// ClearCancelCallback before calling Finish on the RPC controller. That
-  /// method makes sure that no cancellation callback is executed for this RPC
-  /// beyond the point of its return. ClearCancelCallback may be called even if
-  /// SetCancelCallback was not called for this RPC, and it may be called
-  /// multiple times. It _must_ be called if SetCancelCallback was called for
-  /// this RPC.
-  ///
-  /// The callback should generally be lightweight and nonblocking and primarily
-  /// concerned with clearing application state related to the RPC or causing
-  /// operations (such as cancellations) to happen on dependent RPCs.
-  ///
-  /// If the RPC is already canceled at the time that SetCancelCallback is
-  /// called, the callback is invoked immediately.
-  ///
-  /// The cancellation callback may be executed concurrently with the method
-  /// handler that invokes it but will certainly not issue or execute after the
-  /// return of ClearCancelCallback. If ClearCancelCallback is invoked while the
-  /// callback is already executing, the callback will complete its execution
-  /// before ClearCancelCallback takes effect.
-  ///
-  /// To preserve the orderings described above, the callback may be called
-  /// under a lock that is also used for ClearCancelCallback and
-  /// ServerContext::IsCancelled, so the callback CANNOT call either of those
-  /// operations on this RPC or any other function that causes those operations
-  /// to be called before the callback completes.
-  virtual void SetCancelCallback(std::function<void()> callback) = 0;
-  virtual void ClearCancelCallback() = 0;
-
-  // NOTE: This is an API for advanced users who need custom allocators.
-  // Get and maybe mutate the allocator state associated with the current RPC.
-  virtual grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() = 0;
+ protected:
+  // Use a template rather than explicitly specifying ServerUnaryReactor to
+  // delay binding and avoid a circular forward declaration issue
+  template <class Reactor>
+  void BindReactor(Reactor* reactor) {
+    reactor->InternalBindCall(this);
+  }
 };
 
-// NOTE: The actual streaming object classes are provided
-// as API only to support mocking. There are no implementations of
-// these class interfaces in the API.
 template <class Request>
-class ServerCallbackReader {
+class ServerCallbackReader : public internal::ServerCallbackCall {
  public:
   virtual ~ServerCallbackReader() {}
   virtual void Finish(::grpc::Status s) = 0;
@@ -171,14 +174,13 @@ class ServerCallbackReader {
   virtual void Read(Request* msg) = 0;
 
  protected:
-  template <class Response>
-  void BindReactor(ServerReadReactor<Request, Response>* reactor) {
+  void BindReactor(ServerReadReactor<Request>* reactor) {
     reactor->InternalBindReader(this);
   }
 };
 
 template <class Response>
-class ServerCallbackWriter {
+class ServerCallbackWriter : public internal::ServerCallbackCall {
  public:
   virtual ~ServerCallbackWriter() {}
 
@@ -186,21 +188,16 @@ class ServerCallbackWriter {
   virtual void SendInitialMetadata() = 0;
   virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
   virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
-                              ::grpc::Status s) {
-    // Default implementation that can/should be overridden
-    Write(msg, std::move(options));
-    Finish(std::move(s));
-  }
+                              ::grpc::Status s) = 0;
 
  protected:
-  template <class Request>
-  void BindReactor(ServerWriteReactor<Request, Response>* reactor) {
+  void BindReactor(ServerWriteReactor<Response>* reactor) {
     reactor->InternalBindWriter(this);
   }
 };
 
 template <class Request, class Response>
-class ServerCallbackReaderWriter {
+class ServerCallbackReaderWriter : public internal::ServerCallbackCall {
  public:
   virtual ~ServerCallbackReaderWriter() {}
 
@@ -209,11 +206,7 @@ class ServerCallbackReaderWriter {
   virtual void Read(Request* msg) = 0;
   virtual void Write(const Response* msg, ::grpc::WriteOptions options) = 0;
   virtual void WriteAndFinish(const Response* msg, ::grpc::WriteOptions options,
-                              ::grpc::Status s) {
-    // Default implementation that can/should be overridden
-    Write(msg, std::move(options));
-    Finish(std::move(s));
-  }
+                              ::grpc::Status s) = 0;
 
  protected:
   void BindReactor(ServerBidiReactor<Request, Response>* reactor) {
@@ -222,34 +215,57 @@ class ServerCallbackReaderWriter {
 };
 
 // The following classes are the reactor interfaces that are to be implemented
-// by the user, returned as the result of the method handler for a callback
-// method, and activated by the call to OnStarted. The library guarantees that
-// OnStarted will be called for any reactor that has been created using a
-// method handler registered on a service. No operation initiation method may be
-// called until after the call to OnStarted.
-// Note that none of the classes are pure; all reactions have a default empty
-// reaction so that the user class only needs to override those classes that it
-// cares about.
+// by the user, returned as the output parameter of the method handler for a
+// callback method. Note that none of the classes are pure; all reactions have a
+// default empty reaction so that the user class only needs to override those
+// classes that it cares about.
 
 /// \a ServerBidiReactor is the interface for a bidirectional streaming RPC.
 template <class Request, class Response>
 class ServerBidiReactor : public internal::ServerReactor {
  public:
+  // NOTE: Initializing stream_ as a constructor initializer rather than a
+  //       default initializer because gcc-4.x requires a copy constructor for
+  //       default initializing a templated member, which isn't ok for atomic.
+  // TODO(vjpai): Switch to default constructor and default initializer when
+  //              gcc-4.x is no longer supported
+  ServerBidiReactor() : stream_(nullptr) {}
   ~ServerBidiReactor() = default;
 
-  /// Do NOT call any operation initiation method (names that start with Start)
-  /// until after the library has called OnStarted on this object.
-
   /// Send any initial metadata stored in the RPC context. If not invoked,
   /// any initial metadata will be passed along with the first Write or the
   /// Finish (if there are no writes).
-  void StartSendInitialMetadata() { stream_->SendInitialMetadata(); }
+  void StartSendInitialMetadata() {
+    ServerCallbackReaderWriter<Request, Response>* stream =
+        stream_.load(std::memory_order_acquire);
+    if (stream == nullptr) {
+      grpc::internal::MutexLock l(&stream_mu_);
+      stream = stream_.load(std::memory_order_relaxed);
+      if (stream == nullptr) {
+        send_initial_metadata_wanted_ = true;
+        return;
+      }
+    }
+    stream->SendInitialMetadata();
+  }
 
   /// Initiate a read operation.
   ///
   /// \param[out] req Where to eventually store the read message. Valid when
   ///                 the library calls OnReadDone
-  void StartRead(Request* req) { stream_->Read(req); }
+  void StartRead(Request* req) {
+    ServerCallbackReaderWriter<Request, Response>* stream =
+        stream_.load(std::memory_order_acquire);
+    if (stream == nullptr) {
+      grpc::internal::MutexLock l(&stream_mu_);
+      stream = stream_.load(std::memory_order_relaxed);
+      if (stream == nullptr) {
+        read_wanted_ = req;
+        return;
+      }
+    }
+    stream->Read(req);
+  }
 
   /// Initiate a write operation.
   ///
@@ -267,7 +283,18 @@ class ServerBidiReactor : public internal::ServerReactor {
   ///                 application regains ownership of resp.
   /// \param[in] options The WriteOptions to use for writing this message
   void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
-    stream_->Write(resp, std::move(options));
+    ServerCallbackReaderWriter<Request, Response>* stream =
+        stream_.load(std::memory_order_acquire);
+    if (stream == nullptr) {
+      grpc::internal::MutexLock l(&stream_mu_);
+      stream = stream_.load(std::memory_order_relaxed);
+      if (stream == nullptr) {
+        write_wanted_ = resp;
+        write_options_wanted_ = std::move(options);
+        return;
+      }
+    }
+    stream->Write(resp, std::move(options));
   }
 
   /// Initiate a write operation with specified options and final RPC Status,
@@ -279,13 +306,26 @@ class ServerBidiReactor : public internal::ServerReactor {
   /// both.
   ///
   /// \param[in] resp The message to be written. The library takes temporary
-  ///                 ownership until Onone, at which point the application
-  ///                 regains ownership of resp.
+  ///                 ownership until OnWriteDone, at which point the
+  ///                 application regains ownership of resp.
   /// \param[in] options The WriteOptions to use for writing this message
   /// \param[in] s The status outcome of this RPC
   void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
                            ::grpc::Status s) {
-    stream_->WriteAndFinish(resp, std::move(options), std::move(s));
+    ServerCallbackReaderWriter<Request, Response>* stream =
+        stream_.load(std::memory_order_acquire);
+    if (stream == nullptr) {
+      grpc::internal::MutexLock l(&stream_mu_);
+      stream = stream_.load(std::memory_order_relaxed);
+      if (stream == nullptr) {
+        write_and_finish_wanted_ = true;
+        write_wanted_ = resp;
+        write_options_wanted_ = std::move(options);
+        status_wanted_ = std::move(s);
+        return;
+      }
+    }
+    stream->WriteAndFinish(resp, std::move(options), std::move(s));
   }
 
   /// Inform system of a planned write operation with specified options, but
@@ -306,15 +346,20 @@ class ServerBidiReactor : public internal::ServerReactor {
   /// cancelled.
   ///
   /// \param[in] s The status outcome of this RPC
-  void Finish(::grpc::Status s) { stream_->Finish(std::move(s)); }
-
-  /// Notify the application that a streaming RPC has started and that it is now
-  /// ok to call any operation initiation method. An RPC is considered started
-  /// after the server has received all initial metadata from the client, which
-  /// is a result of the client calling StartCall().
-  ///
-  /// \param[in] context The context object now associated with this RPC
-  virtual void OnStarted(::grpc_impl::ServerContext* /*context*/) {}
+  void Finish(::grpc::Status s) {
+    ServerCallbackReaderWriter<Request, Response>* stream =
+        stream_.load(std::memory_order_acquire);
+    if (stream == nullptr) {
+      grpc::internal::MutexLock l(&stream_mu_);
+      stream = stream_.load(std::memory_order_relaxed);
+      if (stream == nullptr) {
+        finish_wanted_ = true;
+        status_wanted_ = std::move(s);
+        return;
+      }
+    }
+    stream->Finish(std::move(s));
+  }
 
   /// Notifies the application that an explicit StartSendInitialMetadata
   /// operation completed. Not used when the sending of initial metadata
@@ -338,9 +383,9 @@ class ServerBidiReactor : public internal::ServerReactor {
   virtual void OnWriteDone(bool /*ok*/) {}
 
   /// Notifies the application that all operations associated with this RPC
-  /// have completed. This is an override (from the internal base class) but not
-  /// final, so derived classes should override it if they want to take action.
-  void OnDone() override {}
+  /// have completed. This is an override (from the internal base class) but
+  /// still abstract, so derived classes MUST override it to be instantiated.
+  void OnDone() override = 0;
 
   /// Notifies the application that this RPC has been cancelled. This is an
   /// override (from the internal base class) but not final, so derived classes
@@ -353,84 +398,219 @@ class ServerBidiReactor : public internal::ServerReactor {
   // customization point.
   virtual void InternalBindStream(
       ServerCallbackReaderWriter<Request, Response>* stream) {
-    stream_ = stream;
+    grpc::internal::ReleasableMutexLock l(&stream_mu_);
+    stream_.store(stream, std::memory_order_release);
+    if (send_initial_metadata_wanted_) {
+      stream->SendInitialMetadata();
+      send_initial_metadata_wanted_ = false;
+    }
+    if (read_wanted_ != nullptr) {
+      stream->Read(read_wanted_);
+      read_wanted_ = nullptr;
+    }
+    if (write_and_finish_wanted_) {
+      // Don't perform actual finish actions while holding lock since it could
+      // trigger OnDone that destroys this object including the still-held lock.
+      write_and_finish_wanted_ = false;
+      const Response* write_wanted = write_wanted_;
+      ::grpc::WriteOptions write_options_wanted =
+          std::move(write_options_wanted_);
+      ::grpc::Status status_wanted = std::move(status_wanted_);
+      l.Unlock();
+      stream->WriteAndFinish(write_wanted, std::move(write_options_wanted),
+                             std::move(status_wanted));
+      return;
+    } else {
+      if (write_wanted_ != nullptr) {
+        stream->Write(write_wanted_, std::move(write_options_wanted_));
+        write_wanted_ = nullptr;
+      }
+      if (finish_wanted_) {
+        finish_wanted_ = false;
+        ::grpc::Status status_wanted = std::move(status_wanted_);
+        l.Unlock();
+        stream->Finish(std::move(status_wanted));
+        return;
+      }
+    }
   }
 
-  ServerCallbackReaderWriter<Request, Response>* stream_;
+  grpc::internal::Mutex stream_mu_;
+  std::atomic<ServerCallbackReaderWriter<Request, Response>*> stream_;
+  bool send_initial_metadata_wanted_ /* GUARDED_BY(stream_mu_) */ = false;
+  bool write_and_finish_wanted_ /* GUARDED_BY(stream_mu_) */ = false;
+  bool finish_wanted_ /* GUARDED_BY(stream_mu_) */ = false;
+  Request* read_wanted_ /* GUARDED_BY(stream_mu_) */ = nullptr;
+  const Response* write_wanted_ /* GUARDED_BY(stream_mu_) */ = nullptr;
+  ::grpc::WriteOptions write_options_wanted_ /* GUARDED_BY(stream_mu_) */;
+  ::grpc::Status status_wanted_ /* GUARDED_BY(stream_mu_) */;
 };
 
 /// \a ServerReadReactor is the interface for a client-streaming RPC.
-template <class Request, class Response>
+template <class Request>
 class ServerReadReactor : public internal::ServerReactor {
  public:
+  ServerReadReactor() : reader_(nullptr) {}
   ~ServerReadReactor() = default;
 
   /// The following operation initiations are exactly like ServerBidiReactor.
-  void StartSendInitialMetadata() { reader_->SendInitialMetadata(); }
-  void StartRead(Request* req) { reader_->Read(req); }
-  void Finish(::grpc::Status s) { reader_->Finish(std::move(s)); }
-
-  /// Similar to ServerBidiReactor::OnStarted, except that this also provides
-  /// the response object that the stream fills in before calling Finish.
-  /// (It must be filled in if status is OK, but it may be filled in otherwise.)
-  ///
-  /// \param[in] context The context object now associated with this RPC
-  /// \param[in] resp The response object to be used by this RPC
-  virtual void OnStarted(::grpc_impl::ServerContext* /*context*/,
-                         Response* /*resp*/) {}
+  void StartSendInitialMetadata() {
+    ServerCallbackReader<Request>* reader =
+        reader_.load(std::memory_order_acquire);
+    if (reader == nullptr) {
+      grpc::internal::MutexLock l(&reader_mu_);
+      reader = reader_.load(std::memory_order_relaxed);
+      if (reader == nullptr) {
+        send_initial_metadata_wanted_ = true;
+        return;
+      }
+    }
+    reader->SendInitialMetadata();
+  }
+  void StartRead(Request* req) {
+    ServerCallbackReader<Request>* reader =
+        reader_.load(std::memory_order_acquire);
+    if (reader == nullptr) {
+      grpc::internal::MutexLock l(&reader_mu_);
+      reader = reader_.load(std::memory_order_relaxed);
+      if (reader == nullptr) {
+        read_wanted_ = req;
+        return;
+      }
+    }
+    reader->Read(req);
+  }
+  void Finish(::grpc::Status s) {
+    ServerCallbackReader<Request>* reader =
+        reader_.load(std::memory_order_acquire);
+    if (reader == nullptr) {
+      grpc::internal::MutexLock l(&reader_mu_);
+      reader = reader_.load(std::memory_order_relaxed);
+      if (reader == nullptr) {
+        finish_wanted_ = true;
+        status_wanted_ = std::move(s);
+        return;
+      }
+    }
+    reader->Finish(std::move(s));
+  }
 
   /// The following notifications are exactly like ServerBidiReactor.
   virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
   virtual void OnReadDone(bool /*ok*/) {}
-  void OnDone() override {}
+  void OnDone() override = 0;
   void OnCancel() override {}
 
  private:
   friend class ServerCallbackReader<Request>;
+
   // May be overridden by internal implementation details. This is not a public
   // customization point.
   virtual void InternalBindReader(ServerCallbackReader<Request>* reader) {
-    reader_ = reader;
+    grpc::internal::ReleasableMutexLock l(&reader_mu_);
+    reader_.store(reader, std::memory_order_release);
+    if (send_initial_metadata_wanted_) {
+      reader->SendInitialMetadata();
+      send_initial_metadata_wanted_ = false;
+    }
+    if (read_wanted_ != nullptr) {
+      reader->Read(read_wanted_);
+      read_wanted_ = nullptr;
+    }
+    if (finish_wanted_) {
+      finish_wanted_ = false;
+      ::grpc::Status status_wanted = std::move(status_wanted_);
+      l.Unlock();
+      reader->Finish(std::move(status_wanted));
+      return;
+    }
   }
 
-  ServerCallbackReader<Request>* reader_;
+  grpc::internal::Mutex reader_mu_;
+  std::atomic<ServerCallbackReader<Request>*> reader_;
+  bool send_initial_metadata_wanted_ /* GUARDED_BY(reader_mu_) */ = false;
+  bool finish_wanted_ /* GUARDED_BY(reader_mu_) */ = false;
+  Request* read_wanted_ /* GUARDED_BY(reader_mu_) */ = nullptr;
+  ::grpc::Status status_wanted_ /* GUARDED_BY(reader_mu_) */;
 };
 
 /// \a ServerWriteReactor is the interface for a server-streaming RPC.
-template <class Request, class Response>
+template <class Response>
 class ServerWriteReactor : public internal::ServerReactor {
  public:
+  ServerWriteReactor() : writer_(nullptr) {}
   ~ServerWriteReactor() = default;
 
   /// The following operation initiations are exactly like ServerBidiReactor.
-  void StartSendInitialMetadata() { writer_->SendInitialMetadata(); }
+  void StartSendInitialMetadata() {
+    ServerCallbackWriter<Response>* writer =
+        writer_.load(std::memory_order_acquire);
+    if (writer == nullptr) {
+      grpc::internal::MutexLock l(&writer_mu_);
+      writer = writer_.load(std::memory_order_relaxed);
+      if (writer == nullptr) {
+        send_initial_metadata_wanted_ = true;
+        return;
+      }
+    }
+    writer->SendInitialMetadata();
+  }
   void StartWrite(const Response* resp) {
     StartWrite(resp, ::grpc::WriteOptions());
   }
   void StartWrite(const Response* resp, ::grpc::WriteOptions options) {
-    writer_->Write(resp, std::move(options));
+    ServerCallbackWriter<Response>* writer =
+        writer_.load(std::memory_order_acquire);
+    if (writer == nullptr) {
+      grpc::internal::MutexLock l(&writer_mu_);
+      writer = writer_.load(std::memory_order_relaxed);
+      if (writer == nullptr) {
+        write_wanted_ = resp;
+        write_options_wanted_ = std::move(options);
+        return;
+      }
+    }
+    writer->Write(resp, std::move(options));
   }
   void StartWriteAndFinish(const Response* resp, ::grpc::WriteOptions options,
                            ::grpc::Status s) {
-    writer_->WriteAndFinish(resp, std::move(options), std::move(s));
+    ServerCallbackWriter<Response>* writer =
+        writer_.load(std::memory_order_acquire);
+    if (writer == nullptr) {
+      grpc::internal::MutexLock l(&writer_mu_);
+      writer = writer_.load(std::memory_order_relaxed);
+      if (writer == nullptr) {
+        write_and_finish_wanted_ = true;
+        write_wanted_ = resp;
+        write_options_wanted_ = std::move(options);
+        status_wanted_ = std::move(s);
+        return;
+      }
+    }
+    writer->WriteAndFinish(resp, std::move(options), std::move(s));
   }
   void StartWriteLast(const Response* resp, ::grpc::WriteOptions options) {
     StartWrite(resp, std::move(options.set_last_message()));
   }
-  void Finish(::grpc::Status s) { writer_->Finish(std::move(s)); }
-
-  /// Similar to ServerBidiReactor::OnStarted, except that this also provides
-  /// the request object sent by the client.
-  ///
-  /// \param[in] context The context object now associated with this RPC
-  /// \param[in] req The request object sent by the client
-  virtual void OnStarted(::grpc_impl::ServerContext* /*context*/,
-                         const Request* /*req*/) {}
+  void Finish(::grpc::Status s) {
+    ServerCallbackWriter<Response>* writer =
+        writer_.load(std::memory_order_acquire);
+    if (writer == nullptr) {
+      grpc::internal::MutexLock l(&writer_mu_);
+      writer = writer_.load(std::memory_order_relaxed);
+      if (writer == nullptr) {
+        finish_wanted_ = true;
+        status_wanted_ = std::move(s);
+        return;
+      }
+    }
+    writer->Finish(std::move(s));
+  }
 
   /// The following notifications are exactly like ServerBidiReactor.
   virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
   virtual void OnWriteDone(bool /*ok*/) {}
-  void OnDone() override {}
+  void OnDone() override = 0;
   void OnCancel() override {}
 
  private:
@@ -438,750 +618,135 @@ class ServerWriteReactor : public internal::ServerReactor {
   // May be overridden by internal implementation details. This is not a public
   // customization point.
   virtual void InternalBindWriter(ServerCallbackWriter<Response>* writer) {
-    writer_ = writer;
-  }
-
-  ServerCallbackWriter<Response>* writer_;
-};
-
-}  // namespace experimental
-
-namespace internal {
-
-template <class Request, class Response>
-class UnimplementedReadReactor
-    : public experimental::ServerReadReactor<Request, Response> {
- public:
-  void OnDone() override { delete this; }
-  void OnStarted(::grpc_impl::ServerContext*, Response*) override {
-    this->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
-  }
-};
-
-template <class Request, class Response>
-class UnimplementedWriteReactor
-    : public experimental::ServerWriteReactor<Request, Response> {
- public:
-  void OnDone() override { delete this; }
-  void OnStarted(::grpc_impl::ServerContext*, const Request*) override {
-    this->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
-  }
-};
-
-template <class Request, class Response>
-class UnimplementedBidiReactor
-    : public experimental::ServerBidiReactor<Request, Response> {
- public:
-  void OnDone() override { delete this; }
-  void OnStarted(::grpc_impl::ServerContext*) override {
-    this->Finish(::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""));
-  }
-};
-
-template <class RequestType, class ResponseType>
-class CallbackUnaryHandler : public grpc::internal::MethodHandler {
- public:
-  CallbackUnaryHandler(
-      std::function<void(::grpc_impl::ServerContext*, const RequestType*,
-                         ResponseType*,
-                         experimental::ServerCallbackRpcController*)>
-          func)
-      : func_(func) {}
-
-  void SetMessageAllocator(
-      ::grpc::experimental::MessageAllocator<RequestType, ResponseType>*
-          allocator) {
-    allocator_ = allocator;
-  }
-
-  void RunHandler(const HandlerParameter& param) final {
-    // Arena allocate a controller structure (that includes request/response)
-    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
-    auto* allocator_state = static_cast<
-        grpc::experimental::MessageHolder<RequestType, ResponseType>*>(
-        param.internal_data);
-    auto* controller =
-        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-            param.call->call(), sizeof(ServerCallbackRpcControllerImpl)))
-            ServerCallbackRpcControllerImpl(param.server_context, param.call,
-                                            allocator_state,
-                                            std::move(param.call_requester));
-    ::grpc::Status status = param.status;
-    if (status.ok()) {
-      // Call the actual function handler and expect the user to call finish
-      grpc::internal::CatchingCallback(func_, param.server_context,
-                                       controller->request(),
-                                       controller->response(), controller);
-    } else {
-      // if deserialization failed, we need to fail the call
-      controller->Finish(status);
-    }
-  }
-
-  void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
-                    ::grpc::Status* status, void** handler_data) final {
-    grpc::ByteBuffer buf;
-    buf.set_buffer(req);
-    RequestType* request = nullptr;
-    ::grpc::experimental::MessageHolder<RequestType, ResponseType>*
-        allocator_state = nullptr;
-    if (allocator_ != nullptr) {
-      allocator_state = allocator_->AllocateMessages();
+    grpc::internal::ReleasableMutexLock l(&writer_mu_);
+    writer_.store(writer, std::memory_order_release);
+    if (send_initial_metadata_wanted_) {
+      writer->SendInitialMetadata();
+      send_initial_metadata_wanted_ = false;
+    }
+    if (write_and_finish_wanted_) {
+      write_and_finish_wanted_ = false;
+      const Response* write_wanted = write_wanted_;
+      ::grpc::WriteOptions write_options_wanted =
+          std::move(write_options_wanted_);
+      ::grpc::Status status_wanted = std::move(status_wanted_);
+      l.Unlock();
+      writer->WriteAndFinish(write_wanted, std::move(write_options_wanted),
+                             std::move(status_wanted));
+      return;
     } else {
-      allocator_state =
-          new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-              call, sizeof(DefaultMessageHolder<RequestType, ResponseType>)))
-              DefaultMessageHolder<RequestType, ResponseType>();
-    }
-    *handler_data = allocator_state;
-    request = allocator_state->request();
-    *status =
-        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
-    buf.Release();
-    if (status->ok()) {
-      return request;
-    }
-    // Clean up on deserialization failure.
-    allocator_state->Release();
-    return nullptr;
-  }
-
- private:
-  std::function<void(::grpc_impl::ServerContext*, const RequestType*,
-                     ResponseType*, experimental::ServerCallbackRpcController*)>
-      func_;
-  grpc::experimental::MessageAllocator<RequestType, ResponseType>* allocator_ =
-      nullptr;
-
-  // The implementation class of ServerCallbackRpcController is a private member
-  // of CallbackUnaryHandler since it is never exposed anywhere, and this allows
-  // it to take advantage of CallbackUnaryHandler's friendships.
-  class ServerCallbackRpcControllerImpl
-      : public experimental::ServerCallbackRpcController {
-   public:
-    void Finish(::grpc::Status s) override {
-      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); },
-                      &finish_ops_);
-      if (!ctx_->sent_initial_metadata_) {
-        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                        ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          finish_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      // The response is dropped if the status is not OK.
-      if (s.ok()) {
-        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
-                                     finish_ops_.SendMessagePtr(response()));
-      } else {
-        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
+      if (write_wanted_ != nullptr) {
+        writer->Write(write_wanted_, std::move(write_options_wanted_));
+        write_wanted_ = nullptr;
       }
-      finish_ops_.set_core_cq_tag(&finish_tag_);
-      call_.PerformOps(&finish_ops_);
-    }
-
-    void SendInitialMetadata(std::function<void(bool)> f) override {
-      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      // TODO(vjpai): Consider taking f as a move-capture if we adopt C++14
-      //              and if performance of this operation matters
-      meta_tag_.Set(call_.call(),
-                    [this, f](bool ok) {
-                      f(ok);
-                      MaybeDone();
-                    },
-                    &meta_ops_);
-      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                    ctx_->initial_metadata_flags());
-      if (ctx_->compression_level_set()) {
-        meta_ops_.set_compression_level(ctx_->compression_level());
-      }
-      ctx_->sent_initial_metadata_ = true;
-      meta_ops_.set_core_cq_tag(&meta_tag_);
-      call_.PerformOps(&meta_ops_);
-    }
-
-    // Neither SetCancelCallback nor ClearCancelCallback should affect the
-    // callbacks_outstanding_ count since they are paired and both must precede
-    // the invocation of Finish (if they are used at all)
-    void SetCancelCallback(std::function<void()> callback) override {
-      ctx_->SetCancelCallback(std::move(callback));
-    }
-
-    void ClearCancelCallback() override { ctx_->ClearCancelCallback(); }
-
-    grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() override {
-      return allocator_state_;
-    }
-
-   private:
-    friend class CallbackUnaryHandler<RequestType, ResponseType>;
-
-    ServerCallbackRpcControllerImpl(
-        ServerContext* ctx, ::grpc::internal::Call* call,
-        ::grpc::experimental::MessageHolder<RequestType, ResponseType>*
-            allocator_state,
-        std::function<void()> call_requester)
-        : ctx_(ctx),
-          call_(*call),
-          allocator_state_(allocator_state),
-          call_requester_(std::move(call_requester)) {
-      ctx_->BeginCompletionOp(call, [this](bool) { MaybeDone(); }, nullptr);
-    }
-
-    const RequestType* request() { return allocator_state_->request(); }
-    ResponseType* response() { return allocator_state_->response(); }
-
-    void MaybeDone() {
-      if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
-                           1, std::memory_order_acq_rel) == 1)) {
-        grpc_call* call = call_.call();
-        auto call_requester = std::move(call_requester_);
-        allocator_state_->Release();
-        this->~ServerCallbackRpcControllerImpl();  // explicitly call destructor
-        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
-        call_requester();
+      if (finish_wanted_) {
+        finish_wanted_ = false;
+        ::grpc::Status status_wanted = std::move(status_wanted_);
+        l.Unlock();
+        writer->Finish(std::move(status_wanted));
+        return;
       }
     }
+  }
 
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata>
-        meta_ops_;
-    grpc::internal::CallbackWithSuccessTag meta_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage,
-                              grpc::internal::CallOpServerSendStatus>
-        finish_ops_;
-    grpc::internal::CallbackWithSuccessTag finish_tag_;
-
-    ::grpc_impl::ServerContext* ctx_;
-    grpc::internal::Call call_;
-    grpc::experimental::MessageHolder<RequestType, ResponseType>* const
-        allocator_state_;
-    std::function<void()> call_requester_;
-    std::atomic<intptr_t> callbacks_outstanding_{
-        2};  // reserve for Finish and CompletionOp
-  };
+  grpc::internal::Mutex writer_mu_;
+  std::atomic<ServerCallbackWriter<Response>*> writer_;
+  bool send_initial_metadata_wanted_ /* GUARDED_BY(writer_mu_) */ = false;
+  bool write_and_finish_wanted_ /* GUARDED_BY(writer_mu_) */ = false;
+  bool finish_wanted_ /* GUARDED_BY(writer_mu_) */ = false;
+  const Response* write_wanted_ /* GUARDED_BY(writer_mu_) */ = nullptr;
+  ::grpc::WriteOptions write_options_wanted_ /* GUARDED_BY(writer_mu_) */;
+  ::grpc::Status status_wanted_ /* GUARDED_BY(writer_mu_) */;
 };
 
-template <class RequestType, class ResponseType>
-class CallbackClientStreamingHandler : public grpc::internal::MethodHandler {
+class ServerUnaryReactor : public internal::ServerReactor {
  public:
-  CallbackClientStreamingHandler(
-      std::function<
-          experimental::ServerReadReactor<RequestType, ResponseType>*()>
-          func)
-      : func_(std::move(func)) {}
-  void RunHandler(const HandlerParameter& param) final {
-    // Arena allocate a reader structure (that includes response)
-    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
-
-    experimental::ServerReadReactor<RequestType, ResponseType>* reactor =
-        param.status.ok()
-            ? ::grpc::internal::CatchingReactorCreator<
-                  experimental::ServerReadReactor<RequestType, ResponseType>>(
-                  func_)
-            : nullptr;
-
-    if (reactor == nullptr) {
-      // if deserialization or reactor creator failed, we need to fail the call
-      reactor = new UnimplementedReadReactor<RequestType, ResponseType>;
-    }
-
-    auto* reader = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-        param.call->call(), sizeof(ServerCallbackReaderImpl)))
-        ServerCallbackReaderImpl(param.server_context, param.call,
-                                 std::move(param.call_requester), reactor);
-
-    reader->BindReactor(reactor);
-    reactor->OnStarted(param.server_context, reader->response());
-    // The earliest that OnCancel can be called is after OnStarted is done.
-    reactor->MaybeCallOnCancel();
-    reader->MaybeDone();
-  }
+  ServerUnaryReactor() : call_(nullptr) {}
+  ~ServerUnaryReactor() = default;
 
- private:
-  std::function<experimental::ServerReadReactor<RequestType, ResponseType>*()>
-      func_;
-
-  class ServerCallbackReaderImpl
-      : public experimental::ServerCallbackReader<RequestType> {
-   public:
-    void Finish(::grpc::Status s) override {
-      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); },
-                      &finish_ops_);
-      if (!ctx_->sent_initial_metadata_) {
-        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                        ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          finish_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      // The response is dropped if the status is not OK.
-      if (s.ok()) {
-        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_,
-                                     finish_ops_.SendMessagePtr(&resp_));
-      } else {
-        finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
-      }
-      finish_ops_.set_core_cq_tag(&finish_tag_);
-      call_.PerformOps(&finish_ops_);
-    }
-
-    void SendInitialMetadata() override {
-      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      meta_tag_.Set(call_.call(),
-                    [this](bool ok) {
-                      reactor_->OnSendInitialMetadataDone(ok);
-                      MaybeDone();
-                    },
-                    &meta_ops_);
-      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                    ctx_->initial_metadata_flags());
-      if (ctx_->compression_level_set()) {
-        meta_ops_.set_compression_level(ctx_->compression_level());
+  /// The following operation initiations are exactly like ServerBidiReactor.
+  void StartSendInitialMetadata() {
+    ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+    if (call == nullptr) {
+      grpc::internal::MutexLock l(&call_mu_);
+      call = call_.load(std::memory_order_relaxed);
+      if (call == nullptr) {
+        send_initial_metadata_wanted_ = true;
+        return;
       }
-      ctx_->sent_initial_metadata_ = true;
-      meta_ops_.set_core_cq_tag(&meta_tag_);
-      call_.PerformOps(&meta_ops_);
-    }
-
-    void Read(RequestType* req) override {
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      read_ops_.RecvMessage(req);
-      call_.PerformOps(&read_ops_);
-    }
-
-   private:
-    friend class CallbackClientStreamingHandler<RequestType, ResponseType>;
-
-    ServerCallbackReaderImpl(
-        ::grpc_impl::ServerContext* ctx, grpc::internal::Call* call,
-        std::function<void()> call_requester,
-        experimental::ServerReadReactor<RequestType, ResponseType>* reactor)
-        : ctx_(ctx),
-          call_(*call),
-          call_requester_(std::move(call_requester)),
-          reactor_(reactor) {
-      ctx_->BeginCompletionOp(call, [this](bool) { MaybeDone(); }, reactor);
-      read_tag_.Set(call_.call(),
-                    [this](bool ok) {
-                      reactor_->OnReadDone(ok);
-                      MaybeDone();
-                    },
-                    &read_ops_);
-      read_ops_.set_core_cq_tag(&read_tag_);
     }
-
-    ~ServerCallbackReaderImpl() {}
-
-    ResponseType* response() { return &resp_; }
-
-    void MaybeDone() {
-      if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
-                           1, std::memory_order_acq_rel) == 1)) {
-        reactor_->OnDone();
-        grpc_call* call = call_.call();
-        auto call_requester = std::move(call_requester_);
-        this->~ServerCallbackReaderImpl();  // explicitly call destructor
-        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
-        call_requester();
+    call->SendInitialMetadata();
+  }
+  void Finish(::grpc::Status s) {
+    ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
+    if (call == nullptr) {
+      grpc::internal::MutexLock l(&call_mu_);
+      call = call_.load(std::memory_order_relaxed);
+      if (call == nullptr) {
+        finish_wanted_ = true;
+        status_wanted_ = std::move(s);
+        return;
       }
     }
-
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata>
-        meta_ops_;
-    grpc::internal::CallbackWithSuccessTag meta_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage,
-                              grpc::internal::CallOpServerSendStatus>
-        finish_ops_;
-    grpc::internal::CallbackWithSuccessTag finish_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<RequestType>>
-        read_ops_;
-    grpc::internal::CallbackWithSuccessTag read_tag_;
-
-    ::grpc_impl::ServerContext* ctx_;
-    grpc::internal::Call call_;
-    ResponseType resp_;
-    std::function<void()> call_requester_;
-    experimental::ServerReadReactor<RequestType, ResponseType>* reactor_;
-    std::atomic<intptr_t> callbacks_outstanding_{
-        3};  // reserve for OnStarted, Finish, and CompletionOp
-  };
-};
-
-template <class RequestType, class ResponseType>
-class CallbackServerStreamingHandler : public grpc::internal::MethodHandler {
- public:
-  CallbackServerStreamingHandler(
-      std::function<
-          experimental::ServerWriteReactor<RequestType, ResponseType>*()>
-          func)
-      : func_(std::move(func)) {}
-  void RunHandler(const HandlerParameter& param) final {
-    // Arena allocate a writer structure
-    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
-
-    experimental::ServerWriteReactor<RequestType, ResponseType>* reactor =
-        param.status.ok()
-            ? ::grpc::internal::CatchingReactorCreator<
-                  experimental::ServerWriteReactor<RequestType, ResponseType>>(
-                  func_)
-            : nullptr;
-
-    if (reactor == nullptr) {
-      // if deserialization or reactor creator failed, we need to fail the call
-      reactor = new UnimplementedWriteReactor<RequestType, ResponseType>;
-    }
-
-    auto* writer = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-        param.call->call(), sizeof(ServerCallbackWriterImpl)))
-        ServerCallbackWriterImpl(param.server_context, param.call,
-                                 static_cast<RequestType*>(param.request),
-                                 std::move(param.call_requester), reactor);
-    writer->BindReactor(reactor);
-    reactor->OnStarted(param.server_context, writer->request());
-    // The earliest that OnCancel can be called is after OnStarted is done.
-    reactor->MaybeCallOnCancel();
-    writer->MaybeDone();
+    call->Finish(std::move(s));
   }
 
-  void* Deserialize(grpc_call* call, grpc_byte_buffer* req,
-                    ::grpc::Status* status, void** /*handler_data*/) final {
-    ::grpc::ByteBuffer buf;
-    buf.set_buffer(req);
-    auto* request =
-        new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-            call, sizeof(RequestType))) RequestType();
-    *status =
-        ::grpc::SerializationTraits<RequestType>::Deserialize(&buf, request);
-    buf.Release();
-    if (status->ok()) {
-      return request;
-    }
-    request->~RequestType();
-    return nullptr;
-  }
+  /// The following notifications are exactly like ServerBidiReactor.
+  virtual void OnSendInitialMetadataDone(bool /*ok*/) {}
+  void OnDone() override = 0;
+  void OnCancel() override {}
 
  private:
-  std::function<experimental::ServerWriteReactor<RequestType, ResponseType>*()>
-      func_;
-
-  class ServerCallbackWriterImpl
-      : public experimental::ServerCallbackWriter<ResponseType> {
-   public:
-    void Finish(::grpc::Status s) override {
-      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); },
-                      &finish_ops_);
-      finish_ops_.set_core_cq_tag(&finish_tag_);
-
-      if (!ctx_->sent_initial_metadata_) {
-        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                        ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          finish_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
-      call_.PerformOps(&finish_ops_);
-    }
-
-    void SendInitialMetadata() override {
-      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      meta_tag_.Set(call_.call(),
-                    [this](bool ok) {
-                      reactor_->OnSendInitialMetadataDone(ok);
-                      MaybeDone();
-                    },
-                    &meta_ops_);
-      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                    ctx_->initial_metadata_flags());
-      if (ctx_->compression_level_set()) {
-        meta_ops_.set_compression_level(ctx_->compression_level());
-      }
-      ctx_->sent_initial_metadata_ = true;
-      meta_ops_.set_core_cq_tag(&meta_tag_);
-      call_.PerformOps(&meta_ops_);
-    }
-
-    void Write(const ResponseType* resp,
-               ::grpc::WriteOptions options) override {
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      if (options.is_last_message()) {
-        options.set_buffer_hint();
-      }
-      if (!ctx_->sent_initial_metadata_) {
-        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                       ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          write_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      // TODO(vjpai): don't assert
-      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok());
-      call_.PerformOps(&write_ops_);
-    }
-
-    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
-                        ::grpc::Status s) override {
-      // This combines the write into the finish callback
-      // Don't send any message if the status is bad
-      if (s.ok()) {
-        // TODO(vjpai): don't assert
-        GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
-      }
-      Finish(std::move(s));
-    }
-
-   private:
-    friend class CallbackServerStreamingHandler<RequestType, ResponseType>;
-
-    ServerCallbackWriterImpl(
-        ::grpc_impl::ServerContext* ctx, grpc::internal::Call* call,
-        const RequestType* req, std::function<void()> call_requester,
-        experimental::ServerWriteReactor<RequestType, ResponseType>* reactor)
-        : ctx_(ctx),
-          call_(*call),
-          req_(req),
-          call_requester_(std::move(call_requester)),
-          reactor_(reactor) {
-      ctx_->BeginCompletionOp(call, [this](bool) { MaybeDone(); }, reactor);
-      write_tag_.Set(call_.call(),
-                     [this](bool ok) {
-                       reactor_->OnWriteDone(ok);
-                       MaybeDone();
-                     },
-                     &write_ops_);
-      write_ops_.set_core_cq_tag(&write_tag_);
-    }
-    ~ServerCallbackWriterImpl() { req_->~RequestType(); }
-
-    const RequestType* request() { return req_; }
-
-    void MaybeDone() {
-      if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
-                           1, std::memory_order_acq_rel) == 1)) {
-        reactor_->OnDone();
-        grpc_call* call = call_.call();
-        auto call_requester = std::move(call_requester_);
-        this->~ServerCallbackWriterImpl();  // explicitly call destructor
-        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
-        call_requester();
-      }
-    }
-
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata>
-        meta_ops_;
-    grpc::internal::CallbackWithSuccessTag meta_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage,
-                              grpc::internal::CallOpServerSendStatus>
-        finish_ops_;
-    grpc::internal::CallbackWithSuccessTag finish_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage>
-        write_ops_;
-    grpc::internal::CallbackWithSuccessTag write_tag_;
-
-    ::grpc_impl::ServerContext* ctx_;
-    grpc::internal::Call call_;
-    const RequestType* req_;
-    std::function<void()> call_requester_;
-    experimental::ServerWriteReactor<RequestType, ResponseType>* reactor_;
-    std::atomic<intptr_t> callbacks_outstanding_{
-        3};  // reserve for OnStarted, Finish, and CompletionOp
-  };
-};
-
-template <class RequestType, class ResponseType>
-class CallbackBidiHandler : public grpc::internal::MethodHandler {
- public:
-  CallbackBidiHandler(
-      std::function<
-          experimental::ServerBidiReactor<RequestType, ResponseType>*()>
-          func)
-      : func_(std::move(func)) {}
-  void RunHandler(const HandlerParameter& param) final {
-    ::grpc::g_core_codegen_interface->grpc_call_ref(param.call->call());
-
-    experimental::ServerBidiReactor<RequestType, ResponseType>* reactor =
-        param.status.ok()
-            ? ::grpc::internal::CatchingReactorCreator<
-                  experimental::ServerBidiReactor<RequestType, ResponseType>>(
-                  func_)
-            : nullptr;
-
-    if (reactor == nullptr) {
-      // if deserialization or reactor creator failed, we need to fail the call
-      reactor = new UnimplementedBidiReactor<RequestType, ResponseType>;
+  friend class ServerCallbackUnary;
+  // May be overridden by internal implementation details. This is not a public
+  // customization point.
+  virtual void InternalBindCall(ServerCallbackUnary* call) {
+    grpc::internal::ReleasableMutexLock l(&call_mu_);
+    call_.store(call, std::memory_order_release);
+    if (send_initial_metadata_wanted_) {
+      call->SendInitialMetadata();
+      send_initial_metadata_wanted_ = false;
+    }
+    if (finish_wanted_) {
+      finish_wanted_ = false;
+      ::grpc::Status status_wanted = std::move(status_wanted_);
+      l.Unlock();
+      call->Finish(std::move(status_wanted));
+      return;
     }
-
-    auto* stream = new (::grpc::g_core_codegen_interface->grpc_call_arena_alloc(
-        param.call->call(), sizeof(ServerCallbackReaderWriterImpl)))
-        ServerCallbackReaderWriterImpl(param.server_context, param.call,
-                                       std::move(param.call_requester),
-                                       reactor);
-
-    stream->BindReactor(reactor);
-    reactor->OnStarted(param.server_context);
-    // The earliest that OnCancel can be called is after OnStarted is done.
-    reactor->MaybeCallOnCancel();
-    stream->MaybeDone();
   }
 
- private:
-  std::function<experimental::ServerBidiReactor<RequestType, ResponseType>*()>
-      func_;
-
-  class ServerCallbackReaderWriterImpl
-      : public experimental::ServerCallbackReaderWriter<RequestType,
-                                                        ResponseType> {
-   public:
-    void Finish(::grpc::Status s) override {
-      finish_tag_.Set(call_.call(), [this](bool) { MaybeDone(); },
-                      &finish_ops_);
-      finish_ops_.set_core_cq_tag(&finish_tag_);
-
-      if (!ctx_->sent_initial_metadata_) {
-        finish_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                        ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          finish_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      finish_ops_.ServerSendStatus(&ctx_->trailing_metadata_, s);
-      call_.PerformOps(&finish_ops_);
-    }
-
-    void SendInitialMetadata() override {
-      GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      meta_tag_.Set(call_.call(),
-                    [this](bool ok) {
-                      reactor_->OnSendInitialMetadataDone(ok);
-                      MaybeDone();
-                    },
-                    &meta_ops_);
-      meta_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                    ctx_->initial_metadata_flags());
-      if (ctx_->compression_level_set()) {
-        meta_ops_.set_compression_level(ctx_->compression_level());
-      }
-      ctx_->sent_initial_metadata_ = true;
-      meta_ops_.set_core_cq_tag(&meta_tag_);
-      call_.PerformOps(&meta_ops_);
-    }
-
-    void Write(const ResponseType* resp,
-               ::grpc::WriteOptions options) override {
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      if (options.is_last_message()) {
-        options.set_buffer_hint();
-      }
-      if (!ctx_->sent_initial_metadata_) {
-        write_ops_.SendInitialMetadata(&ctx_->initial_metadata_,
-                                       ctx_->initial_metadata_flags());
-        if (ctx_->compression_level_set()) {
-          write_ops_.set_compression_level(ctx_->compression_level());
-        }
-        ctx_->sent_initial_metadata_ = true;
-      }
-      // TODO(vjpai): don't assert
-      GPR_CODEGEN_ASSERT(write_ops_.SendMessagePtr(resp, options).ok());
-      call_.PerformOps(&write_ops_);
-    }
-
-    void WriteAndFinish(const ResponseType* resp, ::grpc::WriteOptions options,
-                        ::grpc::Status s) override {
-      // Don't send any message if the status is bad
-      if (s.ok()) {
-        // TODO(vjpai): don't assert
-        GPR_CODEGEN_ASSERT(finish_ops_.SendMessagePtr(resp, options).ok());
-      }
-      Finish(std::move(s));
-    }
+  grpc::internal::Mutex call_mu_;
+  std::atomic<ServerCallbackUnary*> call_;
+  bool send_initial_metadata_wanted_ /* GUARDED_BY(writer_mu_) */ = false;
+  bool finish_wanted_ /* GUARDED_BY(writer_mu_) */ = false;
+  ::grpc::Status status_wanted_ /* GUARDED_BY(writer_mu_) */;
+};
 
-    void Read(RequestType* req) override {
-      callbacks_outstanding_.fetch_add(1, std::memory_order_relaxed);
-      read_ops_.RecvMessage(req);
-      call_.PerformOps(&read_ops_);
-    }
+}  // namespace experimental
 
-   private:
-    friend class CallbackBidiHandler<RequestType, ResponseType>;
-
-    ServerCallbackReaderWriterImpl(
-        ::grpc_impl::ServerContext* ctx, grpc::internal::Call* call,
-        std::function<void()> call_requester,
-        experimental::ServerBidiReactor<RequestType, ResponseType>* reactor)
-        : ctx_(ctx),
-          call_(*call),
-          call_requester_(std::move(call_requester)),
-          reactor_(reactor) {
-      ctx_->BeginCompletionOp(call, [this](bool) { MaybeDone(); }, reactor);
-      write_tag_.Set(call_.call(),
-                     [this](bool ok) {
-                       reactor_->OnWriteDone(ok);
-                       MaybeDone();
-                     },
-                     &write_ops_);
-      write_ops_.set_core_cq_tag(&write_tag_);
-      read_tag_.Set(call_.call(),
-                    [this](bool ok) {
-                      reactor_->OnReadDone(ok);
-                      MaybeDone();
-                    },
-                    &read_ops_);
-      read_ops_.set_core_cq_tag(&read_tag_);
-    }
-    ~ServerCallbackReaderWriterImpl() {}
-
-    void MaybeDone() {
-      if (GPR_UNLIKELY(callbacks_outstanding_.fetch_sub(
-                           1, std::memory_order_acq_rel) == 1)) {
-        reactor_->OnDone();
-        grpc_call* call = call_.call();
-        auto call_requester = std::move(call_requester_);
-        this->~ServerCallbackReaderWriterImpl();  // explicitly call destructor
-        ::grpc::g_core_codegen_interface->grpc_call_unref(call);
-        call_requester();
-      }
-    }
+namespace internal {
 
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata>
-        meta_ops_;
-    grpc::internal::CallbackWithSuccessTag meta_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage,
-                              grpc::internal::CallOpServerSendStatus>
-        finish_ops_;
-    grpc::internal::CallbackWithSuccessTag finish_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata,
-                              grpc::internal::CallOpSendMessage>
-        write_ops_;
-    grpc::internal::CallbackWithSuccessTag write_tag_;
-    grpc::internal::CallOpSet<grpc::internal::CallOpRecvMessage<RequestType>>
-        read_ops_;
-    grpc::internal::CallbackWithSuccessTag read_tag_;
-
-    ::grpc_impl::ServerContext* ctx_;
-    grpc::internal::Call call_;
-    std::function<void()> call_requester_;
-    experimental::ServerBidiReactor<RequestType, ResponseType>* reactor_;
-    std::atomic<intptr_t> callbacks_outstanding_{
-        3};  // reserve for OnStarted, Finish, and CompletionOp
-  };
+template <class Base>
+class FinishOnlyReactor : public Base {
+ public:
+  explicit FinishOnlyReactor(::grpc::Status s) { this->Finish(std::move(s)); }
+  void OnDone() override { this->~FinishOnlyReactor(); }
 };
 
-}  // namespace internal
+using UnimplementedUnaryReactor =
+    FinishOnlyReactor<experimental::ServerUnaryReactor>;
+template <class Request>
+using UnimplementedReadReactor =
+    FinishOnlyReactor<experimental::ServerReadReactor<Request>>;
+template <class Response>
+using UnimplementedWriteReactor =
+    FinishOnlyReactor<experimental::ServerWriteReactor<Response>>;
+template <class Request, class Response>
+using UnimplementedBidiReactor =
+    FinishOnlyReactor<experimental::ServerBidiReactor<Request, Response>>;
 
+}  // namespace internal
 }  // namespace grpc_impl
 
 #endif  // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_IMPL_H

+ 8 - 0
include/grpcpp/impl/codegen/server_context.h

@@ -22,7 +22,15 @@
 #include <grpcpp/impl/codegen/server_context_impl.h>
 
 namespace grpc {
+
 typedef ::grpc_impl::ServerContext ServerContext;
+
+namespace experimental {
+
+typedef ::grpc_impl::experimental::ServerContextBase ServerContextBase;
+typedef ::grpc_impl::experimental::CallbackServerContext CallbackServerContext;
+
+}  // namespace experimental
 }  // namespace grpc
 
 #endif  // GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_H

+ 241 - 32
include/grpcpp/impl/codegen/server_context_impl.h

@@ -18,6 +18,8 @@
 
 #ifndef GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_IMPL_H
 #define GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_IMPL_H
+
+#include <atomic>
 #include <map>
 #include <memory>
 #include <vector>
@@ -30,9 +32,12 @@
 #include <grpcpp/impl/codegen/completion_queue_tag.h>
 #include <grpcpp/impl/codegen/config.h>
 #include <grpcpp/impl/codegen/create_auth_context.h>
+#include <grpcpp/impl/codegen/message_allocator.h>
 #include <grpcpp/impl/codegen/metadata_map.h>
 #include <grpcpp/impl/codegen/security/auth_context.h>
+#include <grpcpp/impl/codegen/server_callback_impl.h>
 #include <grpcpp/impl/codegen/server_interceptor.h>
+#include <grpcpp/impl/codegen/status.h>
 #include <grpcpp/impl/codegen/string_ref.h>
 #include <grpcpp/impl/codegen/time.h>
 
@@ -72,6 +77,8 @@ template <class ServiceType, class RequestType, class ResponseType>
 class ClientStreamingHandler;
 template <class ServiceType, class RequestType, class ResponseType>
 class RpcMethodHandler;
+template <class Base>
+class FinishOnlyReactor;
 template <class W, class R>
 class ServerReaderWriterBody;
 template <class ServiceType, class RequestType, class ResponseType>
@@ -88,6 +95,10 @@ namespace grpc {
 class GenericServerContext;
 class ServerInterface;
 
+namespace experimental {
+class GenericCallbackServerContext;
+}  // namespace experimental
+
 namespace internal {
 class Call;
 }  // namespace internal
@@ -95,29 +106,18 @@ class Call;
 namespace testing {
 class InteropServerContextInspector;
 class ServerContextTestSpouse;
+class DefaultReactorTestPeer;
 }  // namespace testing
+
 }  // namespace grpc
 
 namespace grpc_impl {
-/// A ServerContext allows the person implementing a service handler to:
-///
-/// - Add custom initial and trailing metadata key-value pairs that will
-///   propagated to the client side.
-/// - Control call settings such as compression and authentication.
-/// - Access metadata coming from the client.
-/// - Get performance metrics (ie, census).
-///
-/// Context settings are only relevant to the call handler they are supplied to,
-/// that is to say, they aren't sticky across multiple calls. Some of these
-/// settings, such as the compression options, can be made persistent at server
-/// construction time by specifying the appropriate \a ChannelArguments
-/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
-///
-/// \warning ServerContext instances should \em not be reused across rpcs.
-class ServerContext {
+namespace experimental {
+
+/// Base class of ServerContext. Experimental until callback API is final.
+class ServerContextBase {
  public:
-  ServerContext();  // for async calls
-  ~ServerContext();
+  virtual ~ServerContextBase();
 
   /// Return the deadline for the server call.
   std::chrono::system_clock::time_point deadline() const {
@@ -171,7 +171,7 @@ class ServerContext {
 
   /// IsCancelled is always safe to call when using sync or callback API.
   /// When using async API, it is only safe to call IsCancelled after
-  /// the AsyncNotifyWhenDone tag has been delivered.
+  /// the AsyncNotifyWhenDone tag has been delivered. Thread-safe.
   bool IsCancelled() const;
 
   /// Cancel the Call from the server. This is a best-effort API and
@@ -258,6 +258,11 @@ class ServerContext {
   /// Get the census context associated with this server call.
   const struct census_context* census_context() const;
 
+  /// Should be used for framework-level extensions only.
+  /// Applications never need to call this method.
+  grpc_call* c_call() { return call_; }
+
+ protected:
   /// Async only. Has to be called before the rpc starts.
   /// Returns the tag in completion queue when the rpc finishes.
   /// IsCancelled() can then be called to check whether the rpc was cancelled.
@@ -268,13 +273,44 @@ class ServerContext {
     async_notify_when_done_tag_ = tag;
   }
 
-  /// Should be used for framework-level extensions only.
-  /// Applications never need to call this method.
-  grpc_call* c_call() { return call_; }
+  /// NOTE: This is an API for advanced users who need custom allocators.
+  /// Get and maybe mutate the allocator state associated with the current RPC.
+  /// Currently only applicable for callback unary RPC methods.
+  /// WARNING: This is experimental API and could be changed or removed.
+  ::grpc::experimental::RpcAllocatorState* GetRpcAllocatorState() {
+    return message_allocator_state_;
+  }
+
+  /// Get a library-owned default unary reactor for use in minimal reaction
+  /// cases. This supports typical unary RPC usage of providing a response and
+  /// status. It supports immediate Finish (finish from within the method
+  /// handler) or delayed Finish (finish called after the method handler
+  /// invocation). It does not support reacting to cancellation or completion,
+  /// or early sending of initial metadata. Since this is a library-owned
+  /// reactor, it should not be delete'd or freed in any way. This is more
+  /// efficient than creating a user-owned reactor both because of avoiding an
+  /// allocation and because its minimal reactions are optimized using a core
+  /// surface flag that allows their reactions to run inline without any
+  /// thread-hop.
+  ///
+  /// This method should not be called more than once or called after return
+  /// from the method handler.
+  ///
+  /// WARNING: This is experimental API and could be changed or removed.
+  ::grpc_impl::experimental::ServerUnaryReactor* DefaultReactor() {
+    auto reactor = &default_reactor_;
+    default_reactor_used_.store(true, std::memory_order_relaxed);
+    return reactor;
+  }
+
+  /// Constructors for use by derived classes
+  ServerContextBase();
+  ServerContextBase(gpr_timespec deadline, grpc_metadata_array* arr);
 
  private:
   friend class ::grpc::testing::InteropServerContextInspector;
   friend class ::grpc::testing::ServerContextTestSpouse;
+  friend class ::grpc::testing::DefaultReactorTestPeer;
   friend class ::grpc::ServerInterface;
   friend class ::grpc_impl::Server;
   template <class W, class R>
@@ -309,23 +345,24 @@ class ServerContext {
   friend class ::grpc_impl::internal::CallbackBidiHandler;
   template <::grpc::StatusCode code>
   friend class ::grpc_impl::internal::ErrorMethodHandler;
+  template <class Base>
+  friend class ::grpc_impl::internal::FinishOnlyReactor;
   friend class ::grpc_impl::ClientContext;
   friend class ::grpc::GenericServerContext;
+  friend class ::grpc::experimental::GenericCallbackServerContext;
 
   /// Prevent copying.
-  ServerContext(const ServerContext&);
-  ServerContext& operator=(const ServerContext&);
+  ServerContextBase(const ServerContextBase&);
+  ServerContextBase& operator=(const ServerContextBase&);
 
   class CompletionOp;
 
-  void BeginCompletionOp(::grpc::internal::Call* call,
-                         std::function<void(bool)> callback,
-                         ::grpc_impl::internal::ServerReactor* reactor);
+  void BeginCompletionOp(
+      ::grpc::internal::Call* call, std::function<void(bool)> callback,
+      ::grpc_impl::internal::ServerCallbackCall* callback_controller);
   /// Return the tag queued by BeginCompletionOp()
   ::grpc::internal::CompletionQueueTag* GetCompletionOpTag();
 
-  ServerContext(gpr_timespec deadline, grpc_metadata_array* arr);
-
   void set_call(grpc_call* call) { call_ = call; }
 
   void BindDeadlineAndMetadata(gpr_timespec deadline, grpc_metadata_array* arr);
@@ -336,9 +373,6 @@ class ServerContext {
 
   uint32_t initial_metadata_flags() const { return 0; }
 
-  void SetCancelCallback(std::function<void()> callback);
-  void ClearCancelCallback();
-
   ::grpc::experimental::ServerRpcInfo* set_server_rpc_info(
       const char* method, ::grpc::internal::RpcMethod::RpcType type,
       const std::vector<std::unique_ptr<
@@ -350,6 +384,11 @@ class ServerContext {
     return rpc_info_;
   }
 
+  void set_message_allocator_state(
+      ::grpc::experimental::RpcAllocatorState* allocator_state) {
+    message_allocator_state_ = allocator_state;
+  }
+
   CompletionOp* completion_op_;
   bool has_notify_when_done_tag_;
   void* async_notify_when_done_tag_;
@@ -374,6 +413,176 @@ class ServerContext {
   bool has_pending_ops_;
 
   ::grpc::experimental::ServerRpcInfo* rpc_info_;
+  ::grpc::experimental::RpcAllocatorState* message_allocator_state_ = nullptr;
+
+  class Reactor : public experimental::ServerUnaryReactor {
+   public:
+    void OnCancel() override {}
+    void OnDone() override {}
+    // Override InternalInlineable for this class since its reactions are
+    // trivial and thus do not need to be run from the executor (triggering a
+    // thread hop). This should only be used by internal reactors (thus the
+    // name) and not by user application code.
+    bool InternalInlineable() override { return true; }
+  };
+
+  void SetupTestDefaultReactor(std::function<void(::grpc::Status)> func) {
+    test_unary_.reset(new TestServerCallbackUnary(this, std::move(func)));
+  }
+  bool test_status_set() const {
+    return (test_unary_ != nullptr) && test_unary_->status_set();
+  }
+  ::grpc::Status test_status() const { return test_unary_->status(); }
+
+  class TestServerCallbackUnary
+      : public ::grpc_impl::experimental::ServerCallbackUnary {
+   public:
+    TestServerCallbackUnary(ServerContextBase* ctx,
+                            std::function<void(::grpc::Status)> func)
+        : reactor_(&ctx->default_reactor_), func_(std::move(func)) {
+      this->BindReactor(reactor_);
+    }
+    void Finish(::grpc::Status s) override {
+      status_ = s;
+      func_(std::move(s));
+      status_set_.store(true, std::memory_order_release);
+    }
+    void SendInitialMetadata() override {}
+
+    bool status_set() const {
+      return status_set_.load(std::memory_order_acquire);
+    }
+    ::grpc::Status status() const { return status_; }
+
+   private:
+    void MaybeDone() override {}
+    ::grpc_impl::internal::ServerReactor* reactor() override {
+      return reactor_;
+    }
+
+    ::grpc_impl::experimental::ServerUnaryReactor* const reactor_;
+    std::atomic_bool status_set_{false};
+    ::grpc::Status status_;
+    const std::function<void(::grpc::Status s)> func_;
+  };
+
+  Reactor default_reactor_;
+  std::atomic_bool default_reactor_used_{false};
+  std::unique_ptr<TestServerCallbackUnary> test_unary_;
+};
+
+}  // namespace experimental
+
+/// A ServerContext or CallbackServerContext allows the code implementing a
+/// service handler to:
+///
+/// - Add custom initial and trailing metadata key-value pairs that will
+///   propagated to the client side.
+/// - Control call settings such as compression and authentication.
+/// - Access metadata coming from the client.
+/// - Get performance metrics (ie, census).
+///
+/// Context settings are only relevant to the call handler they are supplied to,
+/// that is to say, they aren't sticky across multiple calls. Some of these
+/// settings, such as the compression options, can be made persistent at server
+/// construction time by specifying the appropriate \a ChannelArguments
+/// to a \a grpc::ServerBuilder, via \a ServerBuilder::AddChannelArgument.
+///
+/// \warning ServerContext instances should \em not be reused across rpcs.
+class ServerContext : public experimental::ServerContextBase {
+ public:
+  ServerContext() {}  // for async calls
+
+  using experimental::ServerContextBase::AddInitialMetadata;
+  using experimental::ServerContextBase::AddTrailingMetadata;
+  using experimental::ServerContextBase::IsCancelled;
+  using experimental::ServerContextBase::SetLoadReportingCosts;
+  using experimental::ServerContextBase::TryCancel;
+  using experimental::ServerContextBase::auth_context;
+  using experimental::ServerContextBase::c_call;
+  using experimental::ServerContextBase::census_context;
+  using experimental::ServerContextBase::client_metadata;
+  using experimental::ServerContextBase::compression_algorithm;
+  using experimental::ServerContextBase::compression_level;
+  using experimental::ServerContextBase::compression_level_set;
+  using experimental::ServerContextBase::deadline;
+  using experimental::ServerContextBase::peer;
+  using experimental::ServerContextBase::raw_deadline;
+  using experimental::ServerContextBase::set_compression_algorithm;
+  using experimental::ServerContextBase::set_compression_level;
+
+  // Sync/CQ-based Async ServerContext only
+  using experimental::ServerContextBase::AsyncNotifyWhenDone;
+
+ private:
+  // Constructor for internal use by server only
+  friend class ::grpc_impl::Server;
+  ServerContext(gpr_timespec deadline, grpc_metadata_array* arr)
+      : experimental::ServerContextBase(deadline, arr) {}
+
+  // CallbackServerContext only
+  using experimental::ServerContextBase::DefaultReactor;
+  using experimental::ServerContextBase::GetRpcAllocatorState;
+
+  /// Prevent copying.
+  ServerContext(const ServerContext&) = delete;
+  ServerContext& operator=(const ServerContext&) = delete;
+};
+
+namespace experimental {
+
+class CallbackServerContext : public ServerContextBase {
+ public:
+  /// Public constructors are for direct use only by mocking tests. In practice,
+  /// these objects will be owned by the library.
+  CallbackServerContext() {}
+
+  using ServerContextBase::AddInitialMetadata;
+  using ServerContextBase::AddTrailingMetadata;
+  using ServerContextBase::IsCancelled;
+  using ServerContextBase::SetLoadReportingCosts;
+  using ServerContextBase::TryCancel;
+  using ServerContextBase::auth_context;
+  using ServerContextBase::c_call;
+  using ServerContextBase::census_context;
+  using ServerContextBase::client_metadata;
+  using ServerContextBase::compression_algorithm;
+  using ServerContextBase::compression_level;
+  using ServerContextBase::compression_level_set;
+  using ServerContextBase::deadline;
+  using ServerContextBase::peer;
+  using ServerContextBase::raw_deadline;
+  using ServerContextBase::set_compression_algorithm;
+  using ServerContextBase::set_compression_level;
+
+  // CallbackServerContext only
+  using ServerContextBase::DefaultReactor;
+  using ServerContextBase::GetRpcAllocatorState;
+
+ private:
+  // Sync/CQ-based Async ServerContext only
+  using ServerContextBase::AsyncNotifyWhenDone;
+
+  /// Prevent copying.
+  CallbackServerContext(const CallbackServerContext&) = delete;
+  CallbackServerContext& operator=(const CallbackServerContext&) = delete;
 };
+
+}  // namespace experimental
 }  // namespace grpc_impl
+
+static_assert(std::is_base_of<::grpc_impl::experimental::ServerContextBase,
+                              ::grpc_impl::ServerContext>::value,
+              "improper base class");
+static_assert(
+    std::is_base_of<::grpc_impl::experimental::ServerContextBase,
+                    ::grpc_impl::experimental::CallbackServerContext>::value,
+    "improper base class");
+static_assert(sizeof(::grpc_impl::experimental::ServerContextBase) ==
+                  sizeof(::grpc_impl::ServerContext),
+              "wrong size");
+static_assert(sizeof(::grpc_impl::experimental::ServerContextBase) ==
+                  sizeof(::grpc_impl::experimental::CallbackServerContext),
+              "wrong size");
+
 #endif  // GRPCPP_IMPL_CODEGEN_SERVER_CONTEXT_IMPL_H

+ 8 - 6
include/grpcpp/impl/codegen/server_interceptor.h

@@ -27,8 +27,10 @@
 #include <grpcpp/impl/codegen/string_ref.h>
 
 namespace grpc_impl {
-class ServerContext;
+namespace experimental {
+class ServerContextBase;
 }
+}  // namespace grpc_impl
 
 namespace grpc {
 
@@ -80,7 +82,7 @@ class ServerRpcInfo {
 
   /// Return a pointer to the underlying ServerContext structure associated
   /// with the RPC to support features that apply to it
-  grpc_impl::ServerContext* server_context() { return ctx_; }
+  grpc_impl::experimental::ServerContextBase* server_context() { return ctx_; }
 
  private:
   static_assert(Type::UNARY ==
@@ -96,8 +98,8 @@ class ServerRpcInfo {
                     static_cast<Type>(internal::RpcMethod::BIDI_STREAMING),
                 "violated expectation about Type enum");
 
-  ServerRpcInfo(grpc_impl::ServerContext* ctx, const char* method,
-                internal::RpcMethod::RpcType type)
+  ServerRpcInfo(grpc_impl::experimental::ServerContextBase* ctx,
+                const char* method, internal::RpcMethod::RpcType type)
       : ctx_(ctx), method_(method), type_(static_cast<Type>(type)) {}
 
   // Runs interceptor at pos \a pos.
@@ -127,14 +129,14 @@ class ServerRpcInfo {
     }
   }
 
-  grpc_impl::ServerContext* ctx_ = nullptr;
+  grpc_impl::experimental::ServerContextBase* ctx_ = nullptr;
   const char* method_ = nullptr;
   const Type type_;
   std::atomic<intptr_t> ref_{1};
   std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_;
 
   friend class internal::InterceptorBatchMethodsImpl;
-  friend class grpc_impl::ServerContext;
+  friend class grpc_impl::experimental::ServerContextBase;
 };
 
 }  // namespace experimental

+ 0 - 2
include/grpcpp/server_impl.h

@@ -19,10 +19,8 @@
 #ifndef GRPCPP_SERVER_IMPL_H
 #define GRPCPP_SERVER_IMPL_H
 
-#include <condition_variable>
 #include <list>
 #include <memory>
-#include <mutex>
 #include <vector>
 
 #include <grpc/compression.h>

+ 55 - 0
include/grpcpp/test/default_reactor_test_peer.h

@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPCPP_TEST_DEFAULT_REACTOR_TEST_PEER_H
+#define GRPCPP_TEST_DEFAULT_REACTOR_TEST_PEER_H
+
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/server_callback.h>
+
+namespace grpc {
+namespace testing {
+
+/// A test-only class to monitor the behavior of the ServerContext's
+/// DefaultReactor. It is intended for allow unit-testing of a callback API
+/// service via direct invocation of the service methods rather than through
+/// RPCs. It is only applicable for unary RPC methods that use the
+/// DefaultReactor rather than any user-defined reactor.
+class DefaultReactorTestPeer {
+ public:
+  explicit DefaultReactorTestPeer(experimental::CallbackServerContext* ctx)
+      : DefaultReactorTestPeer(ctx, [](::grpc::Status) {}) {}
+  DefaultReactorTestPeer(experimental::CallbackServerContext* ctx,
+                         std::function<void(::grpc::Status)> finish_func)
+      : ctx_(ctx) {
+    ctx->SetupTestDefaultReactor(std::move(finish_func));
+  }
+  ::grpc::experimental::ServerUnaryReactor* reactor() const {
+    return &ctx_->default_reactor_;
+  }
+  bool test_status_set() const { return ctx_->test_status_set(); }
+  Status test_status() const { return ctx_->test_status(); }
+
+ private:
+  experimental::CallbackServerContext* const ctx_;  // not owned
+};
+
+}  // namespace testing
+}  // namespace grpc
+
+#endif  // GRPCPP_TEST_DEFAULT_REACTOR_TEST_PEER_H

+ 2 - 2
package.xml

@@ -96,7 +96,6 @@
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_factory.h" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/client_channel_plugin.cc" role="src" />
-    <file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/connector.h" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/global_subchannel_pool.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/global_subchannel_pool.h" role="src" />
@@ -131,7 +130,6 @@
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/local_subchannel_pool.h" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.h" role="src" />
-    <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.h" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.cc" role="src" />
     <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.h" role="src" />
@@ -541,6 +539,8 @@
     <file baseinstalldir="/" name="src/core/lib/iomgr/load_file.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/lockfree_event.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/lockfree_event.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/logical_thread.cc" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/iomgr/logical_thread.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/nameser.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/poller/eventmanager_libuv.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/iomgr/poller/eventmanager_libuv.h" role="src" />

+ 52 - 42
src/compiler/cpp_generator.cc

@@ -148,6 +148,7 @@ grpc::string GetHeaderIncludes(grpc_generator::File* file,
         "grpcpp/impl/codegen/proto_utils.h",
         "grpcpp/impl/codegen/rpc_method.h",
         "grpcpp/impl/codegen/server_callback.h",
+        "grpcpp/impl/codegen/server_callback_handlers.h",
         "grpcpp/impl/codegen/server_context.h",
         "grpcpp/impl/codegen/service_type.h",
         "grpcpp/impl/codegen/status.h",
@@ -922,14 +923,12 @@ void PrintHeaderServerCallbackMethodsHelper(
         "  abort();\n"
         "  return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
         "}\n");
-    printer->Print(
-        *vars,
-        "virtual void $Method$("
-        "::grpc::ServerContext* /*context*/, const $RealRequest$* /*request*/, "
-        "$RealResponse$* /*response*/, "
-        "::grpc::experimental::ServerCallbackRpcController* "
-        "controller) { controller->Finish(::grpc::Status("
-        "::grpc::StatusCode::UNIMPLEMENTED, \"\")); }\n");
+    printer->Print(*vars,
+                   "virtual ::grpc::experimental::ServerUnaryReactor* "
+                   "$Method$(::grpc::experimental::CallbackServerContext* "
+                   "/*context*/, const $RealRequest$* "
+                   "/*request*/, $RealResponse$* /*response*/) { "
+                   "return nullptr; }\n");
   } else if (ClientOnlyStreaming(method)) {
     printer->Print(
         *vars,
@@ -941,12 +940,12 @@ void PrintHeaderServerCallbackMethodsHelper(
         "  abort();\n"
         "  return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
         "}\n");
-    printer->Print(
-        *vars,
-        "virtual ::grpc::experimental::ServerReadReactor< "
-        "$RealRequest$, $RealResponse$>* $Method$() {\n"
-        "  return new ::grpc_impl::internal::UnimplementedReadReactor<\n"
-        "    $RealRequest$, $RealResponse$>;}\n");
+    printer->Print(*vars,
+                   "virtual ::grpc::experimental::ServerReadReactor< "
+                   "$RealRequest$>* $Method$("
+                   "::grpc::experimental::CallbackServerContext* /*context*/, "
+                   "$RealResponse$* /*response*/) { "
+                   "return nullptr; }\n");
   } else if (ServerOnlyStreaming(method)) {
     printer->Print(
         *vars,
@@ -961,9 +960,10 @@ void PrintHeaderServerCallbackMethodsHelper(
     printer->Print(
         *vars,
         "virtual ::grpc::experimental::ServerWriteReactor< "
-        "$RealRequest$, $RealResponse$>* $Method$() {\n"
-        "  return new ::grpc_impl::internal::UnimplementedWriteReactor<\n"
-        "    $RealRequest$, $RealResponse$>;}\n");
+        "$RealResponse$>* "
+        "$Method$(::grpc::experimental::CallbackServerContext* /*context*/, "
+        "const $RealRequest$* /*request*/) { "
+        "return nullptr; }\n");
   } else if (method->BidiStreaming()) {
     printer->Print(
         *vars,
@@ -978,9 +978,9 @@ void PrintHeaderServerCallbackMethodsHelper(
     printer->Print(
         *vars,
         "virtual ::grpc::experimental::ServerBidiReactor< "
-        "$RealRequest$, $RealResponse$>* $Method$() {\n"
-        "  return new ::grpc_impl::internal::UnimplementedBidiReactor<\n"
-        "    $RealRequest$, $RealResponse$>;}\n");
+        "$RealRequest$, $RealResponse$>* "
+        "$Method$(::grpc::experimental::CallbackServerContext* /*context*/) { "
+        "return nullptr; }\n");
   }
 }
 
@@ -1011,14 +1011,11 @@ void PrintHeaderServerMethodCallback(
         "  ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackUnaryHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this](::grpc::ServerContext* context,\n"
-        "             const $RealRequest$* request,\n"
-        "             $RealResponse$* response,\n"
-        "             ::grpc::experimental::ServerCallbackRpcController* "
-        "controller) {\n"
-        "               return this->$"
-        "Method$(context, request, response, controller);\n"
-        "             }));\n}\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "const $RealRequest$* "
+        "request, "
+        "$RealResponse$* response) { "
+        "return this->$Method$(context, request, response); }));}\n");
     printer->Print(*vars,
                    "void SetMessageAllocatorFor_$Method$(\n"
                    "    ::grpc::experimental::MessageAllocator< "
@@ -1033,21 +1030,28 @@ void PrintHeaderServerMethodCallback(
         "  ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackClientStreamingHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "$RealResponse$* "
+        "response) { "
+        "return this->$Method$(context, response); }));\n");
   } else if (ServerOnlyStreaming(method)) {
     printer->Print(
         *vars,
         "  ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackServerStreamingHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "const $RealRequest$* "
+        "request) { "
+        "return this->$Method$(context, request); }));\n");
   } else if (method->BidiStreaming()) {
     printer->Print(
         *vars,
         "  ::grpc::Service::experimental().MarkMethodCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackBidiHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context) { "
+        "return this->$Method$(context); }));\n");
   }
   printer->Print(*vars, "}\n");
   printer->Print(*vars,
@@ -1086,35 +1090,39 @@ void PrintHeaderServerMethodRawCallback(
         "  ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackUnaryHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this](::grpc::ServerContext* context,\n"
-        "             const $RealRequest$* request,\n"
-        "             $RealResponse$* response,\n"
-        "             ::grpc::experimental::ServerCallbackRpcController* "
-        "controller) {\n"
-        "               this->$"
-        "Method$(context, request, response, controller);\n"
-        "             }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "const $RealRequest$* "
+        "request, "
+        "$RealResponse$* response) { return "
+        "this->$Method$(context, request, response); }));\n");
   } else if (ClientOnlyStreaming(method)) {
     printer->Print(
         *vars,
         "  ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackClientStreamingHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "$RealResponse$* response) "
+        "{ return this->$Method$(context, response); }));\n");
   } else if (ServerOnlyStreaming(method)) {
     printer->Print(
         *vars,
         "  ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackServerStreamingHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context, "
+        "const"
+        "$RealRequest$* request) { return "
+        "this->$Method$(context, request); }));\n");
   } else if (method->BidiStreaming()) {
     printer->Print(
         *vars,
         "  ::grpc::Service::experimental().MarkMethodRawCallback($Idx$,\n"
         "    new ::grpc_impl::internal::CallbackBidiHandler< "
         "$RealRequest$, $RealResponse$>(\n"
-        "      [this] { return this->$Method$(); }));\n");
+        "      [this](::grpc::experimental::CallbackServerContext* context) { "
+        "return "
+        "this->$Method$(context); }));\n");
   }
   printer->Print(*vars, "}\n");
   printer->Print(*vars,
@@ -1657,6 +1665,8 @@ grpc::string GetSourceIncludes(grpc_generator::File* file,
         "grpcpp/impl/codegen/method_handler.h",
         "grpcpp/impl/codegen/rpc_service_method.h",
         "grpcpp/impl/codegen/server_callback.h",
+        "grpcpp/impl/codegen/server_callback_handlers.h",
+        "grpcpp/impl/codegen/server_context.h",
         "grpcpp/impl/codegen/service_type.h",
         "grpcpp/impl/codegen/sync_stream.h"};
     std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));

+ 0 - 2
src/core/ext/filters/client_channel/OWNERS

@@ -1,4 +1,2 @@
 set noparent
 @markdroth
-@apolcyn
-@AspirinSJL

+ 2 - 2
src/core/ext/filters/client_channel/client_channel.cc

@@ -1465,8 +1465,8 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
   grpc_uri_destroy(uri);
   char* proxy_name = nullptr;
   grpc_channel_args* new_args = nullptr;
-  grpc_proxy_mappers_map_name(server_uri, args->channel_args, &proxy_name,
-                              &new_args);
+  ProxyMapperRegistry::MapName(server_uri, args->channel_args, &proxy_name,
+                               &new_args);
   target_uri_.reset(proxy_name != nullptr ? proxy_name
                                           : gpr_strdup(server_uri));
   channel_args_ = new_args != nullptr

+ 3 - 3
src/core/ext/filters/client_channel/client_channel_plugin.cc

@@ -48,8 +48,8 @@ void grpc_client_channel_init(void) {
   grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry();
   grpc_core::ResolverRegistry::Builder::InitRegistry();
   grpc_core::internal::ServerRetryThrottleMap::Init();
-  grpc_proxy_mapper_registry_init();
-  grpc_register_http_proxy_mapper();
+  grpc_core::ProxyMapperRegistry::Init();
+  grpc_core::RegisterHttpProxyMapper();
   grpc_core::GlobalSubchannelPool::Init();
   grpc_channel_init_register_stage(
       GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
@@ -61,7 +61,7 @@ void grpc_client_channel_init(void) {
 void grpc_client_channel_shutdown(void) {
   grpc_core::GlobalSubchannelPool::Shutdown();
   grpc_channel_init_shutdown();
-  grpc_proxy_mapper_registry_shutdown();
+  grpc_core::ProxyMapperRegistry::Shutdown();
   grpc_core::internal::ServerRetryThrottleMap::Shutdown();
   grpc_core::ResolverRegistry::Builder::ShutdownRegistry();
   grpc_core::LoadBalancingPolicyRegistry::Builder::ShutdownRegistry();

+ 0 - 41
src/core/ext/filters/client_channel/connector.cc

@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/ext/filters/client_channel/connector.h"
-
-grpc_connector* grpc_connector_ref(grpc_connector* connector) {
-  connector->vtable->ref(connector);
-  return connector;
-}
-
-void grpc_connector_unref(grpc_connector* connector) {
-  connector->vtable->unref(connector);
-}
-
-void grpc_connector_connect(grpc_connector* connector,
-                            const grpc_connect_in_args* in_args,
-                            grpc_connect_out_args* out_args,
-                            grpc_closure* notify) {
-  connector->vtable->connect(connector, in_args, out_args, notify);
-}
-
-void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why) {
-  connector->vtable->shutdown(connector, why);
-}

+ 40 - 45
src/core/ext/filters/client_channel/connector.h

@@ -23,62 +23,57 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/channel/channelz.h"
+#include "src/core/lib/gprpp/orphanable.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 #include "src/core/lib/transport/transport.h"
 
-typedef struct grpc_connector grpc_connector;
-typedef struct grpc_connector_vtable grpc_connector_vtable;
+namespace grpc_core {
 
-struct grpc_connector {
-  const grpc_connector_vtable* vtable;
-};
+// Interface for connection-establishment functionality.
+// Each transport that supports client channels (e.g., not inproc) must
+// supply an implementation of this.
+class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
+ public:
+  struct Args {
+    // Set of pollsets interested in this connection.
+    grpc_pollset_set* interested_parties;
+    // Deadline for connection.
+    grpc_millis deadline;
+    // Channel args to be passed to handshakers and transport.
+    const grpc_channel_args* channel_args;
+  };
 
-typedef struct {
-  /** set of pollsets interested in this connection */
-  grpc_pollset_set* interested_parties;
-  /** deadline for connection */
-  grpc_millis deadline;
-  /** channel arguments (to be passed to transport) */
-  const grpc_channel_args* channel_args;
-} grpc_connect_in_args;
+  struct Result {
+    // The connected transport.
+    grpc_transport* transport = nullptr;
+    // Channel args to be passed to filters.
+    const grpc_channel_args* channel_args = nullptr;
+    // Channelz socket node of the connected transport, if any.
+    RefCountedPtr<channelz::SocketNode> socket_node;
 
-typedef struct {
-  /** the connected transport */
-  grpc_transport* transport;
+    void Reset() {
+      transport = nullptr;
+      channel_args = nullptr;
+      socket_node.reset();
+    }
+  };
 
-  /** channel arguments (to be passed to the filters) */
-  grpc_channel_args* channel_args;
+  // Attempts to connect.
+  // When complete, populates *result and invokes notify.
+  // Only one connection attempt may be in progress at any one time.
+  virtual void Connect(const Args& args, Result* result,
+                       grpc_closure* notify) = 0;
 
-  /** channelz socket node of the connected transport. nullptr if not available
-   */
-  grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> socket;
+  // Cancels any in-flight connection attempt and shuts down the
+  // connector.
+  virtual void Shutdown(grpc_error* error) = 0;
 
-  void reset() {
-    transport = nullptr;
-    channel_args = nullptr;
-    socket = nullptr;
+  void Orphan() override {
+    Shutdown(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Subchannel disconnected"));
+    Unref();
   }
-} grpc_connect_out_args;
-
-struct grpc_connector_vtable {
-  void (*ref)(grpc_connector* connector);
-  void (*unref)(grpc_connector* connector);
-  /** Implementation of grpc_connector_shutdown */
-  void (*shutdown)(grpc_connector* connector, grpc_error* why);
-  /** Implementation of grpc_connector_connect */
-  void (*connect)(grpc_connector* connector,
-                  const grpc_connect_in_args* in_args,
-                  grpc_connect_out_args* out_args, grpc_closure* notify);
 };
 
-grpc_connector* grpc_connector_ref(grpc_connector* connector);
-void grpc_connector_unref(grpc_connector* connector);
-/** Connect using the connector: max one outstanding call at a time */
-void grpc_connector_connect(grpc_connector* connector,
-                            const grpc_connect_in_args* in_args,
-                            grpc_connect_out_args* out_args,
-                            grpc_closure* notify);
-/** Cancel any pending connection */
-void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why);
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */

+ 107 - 116
src/core/ext/filters/client_channel/http_proxy.cc

@@ -36,13 +36,16 @@
 #include "src/core/lib/slice/b64.h"
 #include "src/core/lib/uri/uri_parser.h"
 
+namespace grpc_core {
+namespace {
+
 /**
  * Parses the 'https_proxy' env var (fallback on 'http_proxy') and returns the
  * proxy hostname to resolve or nullptr on error. Also sets 'user_cred' to user
  * credentials if present in the 'http_proxy' env var, otherwise leaves it
  * unchanged. It is caller's responsibility to gpr_free user_cred.
  */
-static char* get_http_proxy_server(char** user_cred) {
+char* GetHttpProxyServer(char** user_cred) {
   GPR_ASSERT(user_cred != nullptr);
   char* proxy_name = nullptr;
   char** authority_strs = nullptr;
@@ -89,127 +92,115 @@ done:
   return proxy_name;
 }
 
-/**
- * Checks the value of GRPC_ARG_ENABLE_HTTP_PROXY to determine if http_proxy
- * should be used.
- */
-bool http_proxy_enabled(const grpc_channel_args* args) {
-  const grpc_arg* arg =
-      grpc_channel_args_find(args, GRPC_ARG_ENABLE_HTTP_PROXY);
-  return grpc_channel_arg_get_bool(arg, true);
-}
-
-static bool proxy_mapper_map_name(grpc_proxy_mapper* /*mapper*/,
-                                  const char* server_uri,
-                                  const grpc_channel_args* args,
-                                  char** name_to_resolve,
-                                  grpc_channel_args** new_args) {
-  if (!http_proxy_enabled(args)) {
-    return false;
-  }
-  char* user_cred = nullptr;
-  *name_to_resolve = get_http_proxy_server(&user_cred);
-  if (*name_to_resolve == nullptr) return false;
-  char* no_proxy_str = nullptr;
-  grpc_uri* uri = grpc_uri_parse(server_uri, false /* suppress_errors */);
-  if (uri == nullptr || uri->path[0] == '\0') {
-    gpr_log(GPR_ERROR,
-            "'http_proxy' environment variable set, but cannot "
-            "parse server URI '%s' -- not using proxy",
-            server_uri);
-    goto no_use_proxy;
-  }
-  if (strcmp(uri->scheme, "unix") == 0) {
-    gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
-            server_uri);
-    goto no_use_proxy;
-  }
-  /* Prefer using 'no_grpc_proxy'. Fallback on 'no_proxy' if it is not set. */
-  no_proxy_str = gpr_getenv("no_grpc_proxy");
-  if (no_proxy_str == nullptr) no_proxy_str = gpr_getenv("no_proxy");
-  if (no_proxy_str != nullptr) {
-    static const char* NO_PROXY_SEPARATOR = ",";
-    bool use_proxy = true;
-    grpc_core::UniquePtr<char> server_host;
-    grpc_core::UniquePtr<char> server_port;
-    if (!grpc_core::SplitHostPort(
-            uri->path[0] == '/' ? uri->path + 1 : uri->path, &server_host,
-            &server_port)) {
-      gpr_log(GPR_INFO,
-              "unable to split host and port, not checking no_proxy list for "
-              "host '%s'",
+class HttpProxyMapper : public ProxyMapperInterface {
+ public:
+  bool MapName(const char* server_uri, const grpc_channel_args* args,
+               char** name_to_resolve, grpc_channel_args** new_args) override {
+    if (!grpc_channel_args_find_bool(args, GRPC_ARG_ENABLE_HTTP_PROXY, true)) {
+      return false;
+    }
+    char* user_cred = nullptr;
+    *name_to_resolve = GetHttpProxyServer(&user_cred);
+    if (*name_to_resolve == nullptr) return false;
+    char* no_proxy_str = nullptr;
+    grpc_uri* uri = grpc_uri_parse(server_uri, false /* suppress_errors */);
+    if (uri == nullptr || uri->path[0] == '\0') {
+      gpr_log(GPR_ERROR,
+              "'http_proxy' environment variable set, but cannot "
+              "parse server URI '%s' -- not using proxy",
               server_uri);
-      gpr_free(no_proxy_str);
-    } else {
-      size_t uri_len = strlen(server_host.get());
-      char** no_proxy_hosts;
-      size_t num_no_proxy_hosts;
-      gpr_string_split(no_proxy_str, NO_PROXY_SEPARATOR, &no_proxy_hosts,
-                       &num_no_proxy_hosts);
-      for (size_t i = 0; i < num_no_proxy_hosts; i++) {
-        char* no_proxy_entry = no_proxy_hosts[i];
-        size_t no_proxy_len = strlen(no_proxy_entry);
-        if (no_proxy_len <= uri_len &&
-            gpr_stricmp(no_proxy_entry,
-                        &(server_host.get()[uri_len - no_proxy_len])) == 0) {
-          gpr_log(GPR_INFO, "not using proxy for host in no_proxy list '%s'",
-                  server_uri);
-          use_proxy = false;
-          break;
+      goto no_use_proxy;
+    }
+    if (strcmp(uri->scheme, "unix") == 0) {
+      gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
+              server_uri);
+      goto no_use_proxy;
+    }
+    /* Prefer using 'no_grpc_proxy'. Fallback on 'no_proxy' if it is not set. */
+    no_proxy_str = gpr_getenv("no_grpc_proxy");
+    if (no_proxy_str == nullptr) no_proxy_str = gpr_getenv("no_proxy");
+    if (no_proxy_str != nullptr) {
+      static const char* NO_PROXY_SEPARATOR = ",";
+      bool use_proxy = true;
+      grpc_core::UniquePtr<char> server_host;
+      grpc_core::UniquePtr<char> server_port;
+      if (!grpc_core::SplitHostPort(
+              uri->path[0] == '/' ? uri->path + 1 : uri->path, &server_host,
+              &server_port)) {
+        gpr_log(GPR_INFO,
+                "unable to split host and port, not checking no_proxy list for "
+                "host '%s'",
+                server_uri);
+        gpr_free(no_proxy_str);
+      } else {
+        size_t uri_len = strlen(server_host.get());
+        char** no_proxy_hosts;
+        size_t num_no_proxy_hosts;
+        gpr_string_split(no_proxy_str, NO_PROXY_SEPARATOR, &no_proxy_hosts,
+                         &num_no_proxy_hosts);
+        for (size_t i = 0; i < num_no_proxy_hosts; i++) {
+          char* no_proxy_entry = no_proxy_hosts[i];
+          size_t no_proxy_len = strlen(no_proxy_entry);
+          if (no_proxy_len <= uri_len &&
+              gpr_stricmp(no_proxy_entry,
+                          &(server_host.get()[uri_len - no_proxy_len])) == 0) {
+            gpr_log(GPR_INFO, "not using proxy for host in no_proxy list '%s'",
+                    server_uri);
+            use_proxy = false;
+            break;
+          }
         }
+        for (size_t i = 0; i < num_no_proxy_hosts; i++) {
+          gpr_free(no_proxy_hosts[i]);
+        }
+        gpr_free(no_proxy_hosts);
+        gpr_free(no_proxy_str);
+        if (!use_proxy) goto no_use_proxy;
       }
-      for (size_t i = 0; i < num_no_proxy_hosts; i++) {
-        gpr_free(no_proxy_hosts[i]);
-      }
-      gpr_free(no_proxy_hosts);
-      gpr_free(no_proxy_str);
-      if (!use_proxy) goto no_use_proxy;
     }
+    grpc_arg args_to_add[2];
+    args_to_add[0] = grpc_channel_arg_string_create(
+        (char*)GRPC_ARG_HTTP_CONNECT_SERVER,
+        uri->path[0] == '/' ? uri->path + 1 : uri->path);
+    if (user_cred != nullptr) {
+      /* Use base64 encoding for user credentials as stated in RFC 7617 */
+      char* encoded_user_cred =
+          grpc_base64_encode(user_cred, strlen(user_cred), 0, 0);
+      char* header;
+      gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
+      gpr_free(encoded_user_cred);
+      args_to_add[1] = grpc_channel_arg_string_create(
+          (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
+      *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
+      gpr_free(header);
+    } else {
+      *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
+    }
+    grpc_uri_destroy(uri);
+    gpr_free(user_cred);
+    return true;
+  no_use_proxy:
+    if (uri != nullptr) grpc_uri_destroy(uri);
+    gpr_free(*name_to_resolve);
+    *name_to_resolve = nullptr;
+    gpr_free(user_cred);
+    return false;
   }
-  grpc_arg args_to_add[2];
-  args_to_add[0] = grpc_channel_arg_string_create(
-      (char*)GRPC_ARG_HTTP_CONNECT_SERVER,
-      uri->path[0] == '/' ? uri->path + 1 : uri->path);
-  if (user_cred != nullptr) {
-    /* Use base64 encoding for user credentials as stated in RFC 7617 */
-    char* encoded_user_cred =
-        grpc_base64_encode(user_cred, strlen(user_cred), 0, 0);
-    char* header;
-    gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
-    gpr_free(encoded_user_cred);
-    args_to_add[1] = grpc_channel_arg_string_create(
-        (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
-    *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
-    gpr_free(header);
-  } else {
-    *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
-  }
-  grpc_uri_destroy(uri);
-  gpr_free(user_cred);
-  return true;
-no_use_proxy:
-  if (uri != nullptr) grpc_uri_destroy(uri);
-  gpr_free(*name_to_resolve);
-  *name_to_resolve = nullptr;
-  gpr_free(user_cred);
-  return false;
-}
-
-static bool proxy_mapper_map_address(grpc_proxy_mapper* /*mapper*/,
-                                     const grpc_resolved_address* /*address*/,
-                                     const grpc_channel_args* /*args*/,
-                                     grpc_resolved_address** /*new_address*/,
-                                     grpc_channel_args** /*new_args*/) {
-  return false;
-}
-
-static void proxy_mapper_destroy(grpc_proxy_mapper* /*mapper*/) {}
 
-static const grpc_proxy_mapper_vtable proxy_mapper_vtable = {
-    proxy_mapper_map_name, proxy_mapper_map_address, proxy_mapper_destroy};
+  bool MapAddress(const grpc_resolved_address& address,
+                  const grpc_channel_args* args,
+                  grpc_resolved_address** new_address,
+                  grpc_channel_args** new_args) override {
+    return false;
+  }
+};
 
-static grpc_proxy_mapper proxy_mapper = {&proxy_mapper_vtable};
+}  // namespace
 
-void grpc_register_http_proxy_mapper() {
-  grpc_proxy_mapper_register(true /* at_start */, &proxy_mapper);
+void RegisterHttpProxyMapper() {
+  ProxyMapperRegistry::Register(
+      true /* at_start */,
+      std::unique_ptr<ProxyMapperInterface>(new HttpProxyMapper()));
 }
+
+}  // namespace grpc_core

+ 5 - 1
src/core/ext/filters/client_channel/http_proxy.h

@@ -19,6 +19,10 @@
 #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
 #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
 
-void grpc_register_http_proxy_mapper();
+namespace grpc_core {
+
+void RegisterHttpProxyMapper();
+
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */

+ 0 - 48
src/core/ext/filters/client_channel/proxy_mapper.cc

@@ -1,48 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include "src/core/ext/filters/client_channel/proxy_mapper.h"
-
-void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable,
-                            grpc_proxy_mapper* mapper) {
-  mapper->vtable = vtable;
-}
-
-bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper,
-                                const char* server_uri,
-                                const grpc_channel_args* args,
-                                char** name_to_resolve,
-                                grpc_channel_args** new_args) {
-  return mapper->vtable->map_name(mapper, server_uri, args, name_to_resolve,
-                                  new_args);
-}
-
-bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper,
-                                   const grpc_resolved_address* address,
-                                   const grpc_channel_args* args,
-                                   grpc_resolved_address** new_address,
-                                   grpc_channel_args** new_args) {
-  return mapper->vtable->map_address(mapper, address, args, new_address,
-                                     new_args);
-}
-
-void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper) {
-  mapper->vtable->destroy(mapper);
-}

+ 14 - 34
src/core/ext/filters/client_channel/proxy_mapper.h

@@ -21,54 +21,34 @@
 
 #include <grpc/support/port_platform.h>
 
-#include <stdbool.h>
-
 #include <grpc/impl/codegen/grpc_types.h>
 
 #include "src/core/lib/iomgr/resolve_address.h"
 
-typedef struct grpc_proxy_mapper grpc_proxy_mapper;
+namespace grpc_core {
+
+class ProxyMapperInterface {
+ public:
+  virtual ~ProxyMapperInterface() = default;
 
-typedef struct {
   /// Determines the proxy name to resolve for \a server_uri.
   /// If no proxy is needed, returns false.
   /// Otherwise, sets \a name_to_resolve, optionally sets \a new_args,
   /// and returns true.
-  bool (*map_name)(grpc_proxy_mapper* mapper, const char* server_uri,
-                   const grpc_channel_args* args, char** name_to_resolve,
-                   grpc_channel_args** new_args);
+  virtual bool MapName(const char* server_uri, const grpc_channel_args* args,
+                       char** name_to_resolve,
+                       grpc_channel_args** new_args) = 0;
+
   /// Determines the proxy address to use to contact \a address.
   /// If no proxy is needed, returns false.
   /// Otherwise, sets \a new_address, optionally sets \a new_args, and
   /// returns true.
-  bool (*map_address)(grpc_proxy_mapper* mapper,
-                      const grpc_resolved_address* address,
-                      const grpc_channel_args* args,
-                      grpc_resolved_address** new_address,
-                      grpc_channel_args** new_args);
-  /// Destroys \a mapper.
-  void (*destroy)(grpc_proxy_mapper* mapper);
-} grpc_proxy_mapper_vtable;
-
-struct grpc_proxy_mapper {
-  const grpc_proxy_mapper_vtable* vtable;
+  virtual bool MapAddress(const grpc_resolved_address& address,
+                          const grpc_channel_args* args,
+                          grpc_resolved_address** new_address,
+                          grpc_channel_args** new_args) = 0;
 };
 
-void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable,
-                            grpc_proxy_mapper* mapper);
-
-bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper,
-                                const char* server_uri,
-                                const grpc_channel_args* args,
-                                char** name_to_resolve,
-                                grpc_channel_args** new_args);
-
-bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper,
-                                   const grpc_resolved_address* address,
-                                   const grpc_channel_args* args,
-                                   grpc_resolved_address** new_address,
-                                   grpc_channel_args** new_args);
-
-void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper);
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */

+ 46 - 79
src/core/ext/filters/client_channel/proxy_mapper_registry.cc

@@ -20,103 +20,70 @@
 
 #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
 
-#include <string.h>
+#include <memory>
+#include <vector>
 
-#include <grpc/support/alloc.h>
+namespace grpc_core {
 
-//
-// grpc_proxy_mapper_list
-//
+namespace {
 
-typedef struct {
-  grpc_proxy_mapper** list;
-  size_t num_mappers;
-} grpc_proxy_mapper_list;
+using ProxyMapperList = std::vector<std::unique_ptr<ProxyMapperInterface>>;
+ProxyMapperList* g_proxy_mapper_list;
 
-static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
-                                            bool at_start,
-                                            grpc_proxy_mapper* mapper) {
-  list->list = static_cast<grpc_proxy_mapper**>(gpr_realloc(
-      list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*)));
+}  // namespace
+
+void ProxyMapperRegistry::Init() {
+  if (g_proxy_mapper_list == nullptr) {
+    g_proxy_mapper_list = new ProxyMapperList();
+  }
+}
+
+void ProxyMapperRegistry::Shutdown() {
+  delete g_proxy_mapper_list;
+  // Clean up in case we re-initialze later.
+  // TODO(roth): This should ideally live in Init().  However, if we did this
+  // there, then we would do it AFTER we start registering proxy mappers from
+  // third-party plugins, so they'd never show up (and would leak memory).
+  // We probably need some sort of dependency system for plugins to fix
+  // this.
+  g_proxy_mapper_list = nullptr;
+}
+
+void ProxyMapperRegistry::Register(
+    bool at_start, std::unique_ptr<ProxyMapperInterface> mapper) {
+  Init();
   if (at_start) {
-    memmove(list->list + 1, list->list,
-            sizeof(grpc_proxy_mapper*) * list->num_mappers);
-    list->list[0] = mapper;
+    g_proxy_mapper_list->insert(g_proxy_mapper_list->begin(),
+                                std::move(mapper));
   } else {
-    list->list[list->num_mappers] = mapper;
+    g_proxy_mapper_list->emplace_back(std::move(mapper));
   }
-  ++list->num_mappers;
 }
 
-static bool grpc_proxy_mapper_list_map_name(grpc_proxy_mapper_list* list,
-                                            const char* server_uri,
-                                            const grpc_channel_args* args,
-                                            char** name_to_resolve,
-                                            grpc_channel_args** new_args) {
-  for (size_t i = 0; i < list->num_mappers; ++i) {
-    if (grpc_proxy_mapper_map_name(list->list[i], server_uri, args,
-                                   name_to_resolve, new_args)) {
+bool ProxyMapperRegistry::MapName(const char* server_uri,
+                                  const grpc_channel_args* args,
+                                  char** name_to_resolve,
+                                  grpc_channel_args** new_args) {
+  Init();
+  for (const auto& mapper : *g_proxy_mapper_list) {
+    if (mapper->MapName(server_uri, args, name_to_resolve, new_args)) {
       return true;
     }
   }
   return false;
 }
 
-static bool grpc_proxy_mapper_list_map_address(
-    grpc_proxy_mapper_list* list, const grpc_resolved_address* address,
-    const grpc_channel_args* args, grpc_resolved_address** new_address,
-    grpc_channel_args** new_args) {
-  for (size_t i = 0; i < list->num_mappers; ++i) {
-    if (grpc_proxy_mapper_map_address(list->list[i], address, args, new_address,
-                                      new_args)) {
+bool ProxyMapperRegistry::MapAddress(const grpc_resolved_address& address,
+                                     const grpc_channel_args* args,
+                                     grpc_resolved_address** new_address,
+                                     grpc_channel_args** new_args) {
+  Init();
+  for (const auto& mapper : *g_proxy_mapper_list) {
+    if (mapper->MapAddress(address, args, new_address, new_args)) {
       return true;
     }
   }
   return false;
 }
 
-static void grpc_proxy_mapper_list_destroy(grpc_proxy_mapper_list* list) {
-  for (size_t i = 0; i < list->num_mappers; ++i) {
-    grpc_proxy_mapper_destroy(list->list[i]);
-  }
-  gpr_free(list->list);
-  // Clean up in case we re-initialze later.
-  // TODO(ctiller): This should ideally live in
-  // grpc_proxy_mapper_registry_init().  However, if we did this there,
-  // then we would do it AFTER we start registering proxy mappers from
-  // third-party plugins, so they'd never show up (and would leak memory).
-  // We probably need some sort of dependency system for plugins to fix
-  // this.
-  memset(list, 0, sizeof(*list));
-}
-
-//
-// plugin
-//
-
-static grpc_proxy_mapper_list g_proxy_mapper_list;
-
-void grpc_proxy_mapper_registry_init() {}
-
-void grpc_proxy_mapper_registry_shutdown() {
-  grpc_proxy_mapper_list_destroy(&g_proxy_mapper_list);
-}
-
-void grpc_proxy_mapper_register(bool at_start, grpc_proxy_mapper* mapper) {
-  grpc_proxy_mapper_list_register(&g_proxy_mapper_list, at_start, mapper);
-}
-
-bool grpc_proxy_mappers_map_name(const char* server_uri,
-                                 const grpc_channel_args* args,
-                                 char** name_to_resolve,
-                                 grpc_channel_args** new_args) {
-  return grpc_proxy_mapper_list_map_name(&g_proxy_mapper_list, server_uri, args,
-                                         name_to_resolve, new_args);
-}
-bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address,
-                                    const grpc_channel_args* args,
-                                    grpc_resolved_address** new_address,
-                                    grpc_channel_args** new_args) {
-  return grpc_proxy_mapper_list_map_address(&g_proxy_mapper_list, address, args,
-                                            new_address, new_args);
-}
+}  // namespace grpc_core

+ 23 - 17
src/core/ext/filters/client_channel/proxy_mapper_registry.h

@@ -23,22 +23,28 @@
 
 #include "src/core/ext/filters/client_channel/proxy_mapper.h"
 
-void grpc_proxy_mapper_registry_init();
-void grpc_proxy_mapper_registry_shutdown();
-
-/// Registers a new proxy mapper.  Takes ownership.
-/// If \a at_start is true, the new mapper will be at the beginning of
-/// the list.  Otherwise, it will be added to the end.
-void grpc_proxy_mapper_register(bool at_start, grpc_proxy_mapper* mapper);
-
-bool grpc_proxy_mappers_map_name(const char* server_uri,
-                                 const grpc_channel_args* args,
-                                 char** name_to_resolve,
-                                 grpc_channel_args** new_args);
-
-bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address,
-                                    const grpc_channel_args* args,
-                                    grpc_resolved_address** new_address,
-                                    grpc_channel_args** new_args);
+namespace grpc_core {
+
+class ProxyMapperRegistry {
+ public:
+  static void Init();
+  static void Shutdown();
+
+  /// Registers a new proxy mapper.
+  /// If \a at_start is true, the new mapper will be at the beginning of
+  /// the list.  Otherwise, it will be added to the end.
+  static void Register(bool at_start,
+                       std::unique_ptr<ProxyMapperInterface> mapper);
+
+  static bool MapName(const char* server_uri, const grpc_channel_args* args,
+                      char** name_to_resolve, grpc_channel_args** new_args);
+
+  static bool MapAddress(const grpc_resolved_address& address,
+                         const grpc_channel_args* args,
+                         grpc_resolved_address** new_address,
+                         grpc_channel_args** new_args);
+};
+
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */

+ 14 - 15
src/core/ext/filters/client_channel/subchannel.cc

@@ -613,21 +613,21 @@ BackOff::Options ParseArgsForBackoffValues(
 
 }  // namespace
 
-Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
+Subchannel::Subchannel(SubchannelKey* key,
+                       OrphanablePtr<SubchannelConnector> connector,
                        const grpc_channel_args* args)
     : key_(key),
-      connector_(connector),
+      connector_(std::move(connector)),
       backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_ms_)) {
   GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
   gpr_atm_no_barrier_store(&ref_pair_, 1 << INTERNAL_REF_BITS);
-  grpc_connector_ref(connector_);
   pollset_set_ = grpc_pollset_set_create();
   grpc_resolved_address* addr =
       static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(*addr)));
   GetAddressFromSubchannelAddressArg(args, addr);
   grpc_resolved_address* new_address = nullptr;
   grpc_channel_args* new_args = nullptr;
-  if (grpc_proxy_mappers_map_address(addr, args, &new_address, &new_args)) {
+  if (ProxyMapperRegistry::MapAddress(*addr, args, &new_address, &new_args)) {
     GPR_ASSERT(new_address != nullptr);
     gpr_free(addr);
     addr = new_address;
@@ -668,12 +668,12 @@ Subchannel::~Subchannel() {
     channelz_node_->UpdateConnectivityState(GRPC_CHANNEL_SHUTDOWN);
   }
   grpc_channel_args_destroy(args_);
-  grpc_connector_unref(connector_);
+  connector_.reset();
   grpc_pollset_set_destroy(pollset_set_);
   delete key_;
 }
 
-Subchannel* Subchannel::Create(grpc_connector* connector,
+Subchannel* Subchannel::Create(OrphanablePtr<SubchannelConnector> connector,
                                const grpc_channel_args* args) {
   SubchannelKey* key = new SubchannelKey(args);
   SubchannelPoolInterface* subchannel_pool =
@@ -684,7 +684,7 @@ Subchannel* Subchannel::Create(grpc_connector* connector,
     delete key;
     return c;
   }
-  c = new Subchannel(key, connector, args);
+  c = new Subchannel(key, std::move(connector), args);
   // Try to register the subchannel before setting the subchannel pool.
   // Otherwise, in case of a registration race, unreffing c in
   // RegisterSubchannel() will cause c to be tried to be unregistered, while
@@ -975,7 +975,7 @@ void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
 }
 
 void Subchannel::ContinueConnectingLocked() {
-  grpc_connect_in_args args;
+  SubchannelConnector::Args args;
   args.interested_parties = pollset_set_;
   const grpc_millis min_deadline =
       min_connect_timeout_ms_ + ExecCtx::Get()->Now();
@@ -983,13 +983,13 @@ void Subchannel::ContinueConnectingLocked() {
   args.deadline = std::max(next_attempt_deadline_, min_deadline);
   args.channel_args = args_;
   SetConnectivityStateLocked(GRPC_CHANNEL_CONNECTING);
-  grpc_connector_connect(connector_, &args, &connecting_result_,
-                         &on_connecting_finished_);
+  connector_->Connect(args, &connecting_result_, &on_connecting_finished_);
 }
 
 void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
   auto* c = static_cast<Subchannel*>(arg);
-  grpc_channel_args* delete_channel_args = c->connecting_result_.channel_args;
+  const grpc_channel_args* delete_channel_args =
+      c->connecting_result_.channel_args;
   GRPC_SUBCHANNEL_WEAK_REF(c, "on_connecting_finished");
   {
     MutexLock lock(&c->mu_);
@@ -1042,8 +1042,8 @@ bool Subchannel::PublishTransportLocked() {
     return false;
   }
   RefCountedPtr<channelz::SocketNode> socket =
-      std::move(connecting_result_.socket);
-  connecting_result_.reset();
+      std::move(connecting_result_.socket_node);
+  connecting_result_.Reset();
   if (disconnected_) {
     grpc_channel_stack_destroy(stk);
     gpr_free(stk);
@@ -1075,8 +1075,7 @@ void Subchannel::Disconnect() {
   MutexLock lock(&mu_);
   GPR_ASSERT(!disconnected_);
   disconnected_ = true;
-  grpc_connector_shutdown(connector_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
-                                          "Subchannel disconnected"));
+  connector_.reset();
   connected_subchannel_.reset();
   health_watcher_map_.ShutdownLocked();
 }

+ 4 - 4
src/core/ext/filters/client_channel/subchannel.h

@@ -197,12 +197,12 @@ class Subchannel {
   };
 
   // The ctor and dtor are not intended to use directly.
-  Subchannel(SubchannelKey* key, grpc_connector* connector,
+  Subchannel(SubchannelKey* key, OrphanablePtr<SubchannelConnector> connector,
              const grpc_channel_args* args);
   ~Subchannel();
 
   // Creates a subchannel given \a connector and \a args.
-  static Subchannel* Create(grpc_connector* connector,
+  static Subchannel* Create(OrphanablePtr<SubchannelConnector> connector,
                             const grpc_channel_args* args);
 
   // Strong and weak refcounting.
@@ -365,9 +365,9 @@ class Subchannel {
   gpr_atm ref_pair_;
 
   // Connection states.
-  grpc_connector* connector_ = nullptr;
+  OrphanablePtr<SubchannelConnector> connector_;
   // Set during connection.
-  grpc_connect_out_args connecting_result_;
+  SubchannelConnector::Result connecting_result_;
   grpc_closure on_connecting_finished_;
   // Active connection, or null.
   RefCountedPtr<ConnectedSubchannel> connected_subchannel_;

+ 3 - 4
src/core/ext/filters/client_idle/client_idle_filter.cc

@@ -27,10 +27,9 @@
 #include "src/core/lib/surface/channel_init.h"
 #include "src/core/lib/transport/http2_errors.h"
 
-// The idle filter is enabled in client channel by default.
-// Set GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS to [1000, INT_MAX) in channel args to
-// configure the idle timeout.
-#define DEFAULT_IDLE_TIMEOUT_MS (30 /*minutes*/ * 60 * 1000)
+// TODO(juanlishen): The idle filter is disabled in client channel by default
+// due to b/143502997. Try to fix the bug and enable the filter by default.
+#define DEFAULT_IDLE_TIMEOUT_MS INT_MAX
 // The user input idle timeout smaller than this would be capped to it.
 #define MIN_IDLE_TIMEOUT_MS (1 /*second*/ * 1000)
 

+ 5 - 0
src/core/ext/filters/http/message_compress/message_compress_filter.cc

@@ -321,6 +321,11 @@ static grpc_error* pull_slice_from_send_message(call_data* calld) {
 // eventually result in calling on_send_message_next_done().
 static void continue_reading_send_message(grpc_call_element* elem) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
+  if (calld->slices.length ==
+      calld->send_message_batch->payload->send_message.send_message->length()) {
+    finish_send_message(elem);
+    return;
+  }
   while (calld->send_message_batch->payload->send_message.send_message->Next(
       ~static_cast<size_t>(0), &calld->on_send_message_next_done)) {
     grpc_error* error = pull_slice_from_send_message(calld);

+ 134 - 174
src/core/ext/transport/chttp2/client/chttp2_connector.cc

@@ -38,202 +38,162 @@
 #include "src/core/lib/iomgr/tcp_client.h"
 #include "src/core/lib/slice/slice_internal.h"
 
-typedef struct {
-  grpc_connector base;
+namespace grpc_core {
 
-  gpr_mu mu;
-  gpr_refcount refs;
-
-  bool shutdown;
-  bool connecting;
-
-  grpc_closure* notify;
-  grpc_connect_in_args args;
-  grpc_connect_out_args* result;
-
-  grpc_endpoint* endpoint;  // Non-NULL until handshaking starts.
-
-  grpc_closure connected;
-
-  grpc_core::RefCountedPtr<grpc_core::HandshakeManager> handshake_mgr;
-} chttp2_connector;
+Chttp2Connector::Chttp2Connector() {
+  GRPC_CLOSURE_INIT(&connected_, Connected, this, grpc_schedule_on_exec_ctx);
+}
 
-static void chttp2_connector_ref(grpc_connector* con) {
-  chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
-  gpr_ref(&c->refs);
+Chttp2Connector::~Chttp2Connector() {
+  if (endpoint_ != nullptr) grpc_endpoint_destroy(endpoint_);
 }
 
-static void chttp2_connector_unref(grpc_connector* con) {
-  chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
-  if (gpr_unref(&c->refs)) {
-    gpr_mu_destroy(&c->mu);
-    // If handshaking is not yet in progress, destroy the endpoint.
-    // Otherwise, the handshaker will do this for us.
-    if (c->endpoint != nullptr) grpc_endpoint_destroy(c->endpoint);
-    gpr_free(c);
+void Chttp2Connector::Connect(const Args& args, Result* result,
+                              grpc_closure* notify) {
+  grpc_resolved_address addr;
+  Subchannel::GetAddressFromSubchannelAddressArg(args.channel_args, &addr);
+  grpc_endpoint** ep;
+  {
+    MutexLock lock(&mu_);
+    GPR_ASSERT(notify_ == nullptr);
+    args_ = args;
+    result_ = result;
+    notify_ = notify;
+    GPR_ASSERT(!connecting_);
+    connecting_ = true;
+    GPR_ASSERT(endpoint_ == nullptr);
+    ep = &endpoint_;
   }
+  // In some implementations, the closure can be flushed before
+  // grpc_tcp_client_connect() returns, and since the closure requires access
+  // to mu_, this can result in a deadlock (see
+  // https://github.com/grpc/grpc/issues/16427 for details).
+  // grpc_tcp_client_connect() will fill endpoint_ with proper contents, and we
+  // make sure that we still exist at that point by taking a ref.
+  Ref().release();  // Ref held by callback.
+  grpc_tcp_client_connect(&connected_, ep, args.interested_parties,
+                          args.channel_args, &addr, args.deadline);
 }
 
-static void chttp2_connector_shutdown(grpc_connector* con, grpc_error* why) {
-  chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
-  gpr_mu_lock(&c->mu);
-  c->shutdown = true;
-  if (c->handshake_mgr != nullptr) {
-    c->handshake_mgr->Shutdown(GRPC_ERROR_REF(why));
+void Chttp2Connector::Shutdown(grpc_error* error) {
+  MutexLock lock(&mu_);
+  shutdown_ = true;
+  if (handshake_mgr_ != nullptr) {
+    handshake_mgr_->Shutdown(GRPC_ERROR_REF(error));
   }
   // If handshaking is not yet in progress, shutdown the endpoint.
   // Otherwise, the handshaker will do this for us.
-  if (!c->connecting && c->endpoint != nullptr) {
-    grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(why));
+  if (!connecting_ && endpoint_ != nullptr) {
+    grpc_endpoint_shutdown(endpoint_, GRPC_ERROR_REF(error));
   }
-  gpr_mu_unlock(&c->mu);
-  GRPC_ERROR_UNREF(why);
+  GRPC_ERROR_UNREF(error);
 }
 
-static void on_handshake_done(void* arg, grpc_error* error) {
-  auto* args = static_cast<grpc_core::HandshakerArgs*>(arg);
-  chttp2_connector* c = static_cast<chttp2_connector*>(args->user_data);
-  gpr_mu_lock(&c->mu);
-  if (error != GRPC_ERROR_NONE || c->shutdown) {
-    if (error == GRPC_ERROR_NONE) {
-      error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
-      // We were shut down after handshaking completed successfully, so
-      // destroy the endpoint here.
-      // TODO(ctiller): It is currently necessary to shutdown endpoints
-      // before destroying them, even if we know that there are no
-      // pending read/write callbacks.  This should be fixed, at which
-      // point this can be removed.
-      grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
-      grpc_endpoint_destroy(args->endpoint);
-      grpc_channel_args_destroy(args->args);
-      grpc_slice_buffer_destroy_internal(args->read_buffer);
-      gpr_free(args->read_buffer);
+void Chttp2Connector::Connected(void* arg, grpc_error* error) {
+  Chttp2Connector* self = static_cast<Chttp2Connector*>(arg);
+  bool unref = false;
+  {
+    MutexLock lock(&self->mu_);
+    GPR_ASSERT(self->connecting_);
+    self->connecting_ = false;
+    if (error != GRPC_ERROR_NONE || self->shutdown_) {
+      if (error == GRPC_ERROR_NONE) {
+        error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
+      } else {
+        error = GRPC_ERROR_REF(error);
+      }
+      if (self->endpoint_ != nullptr) {
+        grpc_endpoint_shutdown(self->endpoint_, GRPC_ERROR_REF(error));
+      }
+      self->result_->Reset();
+      grpc_closure* notify = self->notify_;
+      self->notify_ = nullptr;
+      ExecCtx::Run(DEBUG_LOCATION, notify, error);
+      unref = true;
     } else {
-      error = GRPC_ERROR_REF(error);
+      GPR_ASSERT(self->endpoint_ != nullptr);
+      self->StartHandshakeLocked();
     }
-    c->result->reset();
-  } else {
-    grpc_endpoint_delete_from_pollset_set(args->endpoint,
-                                          c->args.interested_parties);
-    c->result->transport =
-        grpc_create_chttp2_transport(args->args, args->endpoint, true);
-    c->result->socket =
-        grpc_chttp2_transport_get_socket_node(c->result->transport);
-    GPR_ASSERT(c->result->transport);
-    // TODO(roth): We ideally want to wait until we receive HTTP/2
-    // settings from the server before we consider the connection
-    // established.  If that doesn't happen before the connection
-    // timeout expires, then we should consider the connection attempt a
-    // failure and feed that information back into the backoff code.
-    // We could pass a notify_on_receive_settings callback to
-    // grpc_chttp2_transport_start_reading() to let us know when
-    // settings are received, but we would need to figure out how to use
-    // that information here.
-    //
-    // Unfortunately, we don't currently have a way to split apart the two
-    // effects of scheduling c->notify: we start sending RPCs immediately
-    // (which we want to do) and we consider the connection attempt successful
-    // (which we don't want to do until we get the notify_on_receive_settings
-    // callback from the transport).  If we could split those things
-    // apart, then we could start sending RPCs but then wait for our
-    // timeout before deciding if the connection attempt is successful.
-    // If the attempt is not successful, then we would tear down the
-    // transport and feed the failure back into the backoff code.
-    //
-    // In addition, even if we did that, we would probably not want to do
-    // so until after transparent retries is implemented.  Otherwise, any
-    // RPC that we attempt to send on the connection before the timeout
-    // would fail instead of being retried on a subsequent attempt.
-    grpc_chttp2_transport_start_reading(c->result->transport, args->read_buffer,
-                                        nullptr);
-    c->result->channel_args = args->args;
   }
-  grpc_closure* notify = c->notify;
-  c->notify = nullptr;
-  grpc_core::ExecCtx::Run(DEBUG_LOCATION, notify, error);
-  c->handshake_mgr.reset();
-  gpr_mu_unlock(&c->mu);
-  chttp2_connector_unref(reinterpret_cast<grpc_connector*>(c));
+  if (unref) self->Unref();
 }
 
-static void start_handshake_locked(chttp2_connector* c) {
-  c->handshake_mgr = grpc_core::MakeRefCounted<grpc_core::HandshakeManager>();
-  grpc_core::HandshakerRegistry::AddHandshakers(
-      grpc_core::HANDSHAKER_CLIENT, c->args.channel_args,
-      c->args.interested_parties, c->handshake_mgr.get());
-  grpc_endpoint_add_to_pollset_set(c->endpoint, c->args.interested_parties);
-  c->handshake_mgr->DoHandshake(c->endpoint, c->args.channel_args,
-                                c->args.deadline, nullptr /* acceptor */,
-                                on_handshake_done, c);
-  c->endpoint = nullptr;  // Endpoint handed off to handshake manager.
+void Chttp2Connector::StartHandshakeLocked() {
+  handshake_mgr_ = MakeRefCounted<HandshakeManager>();
+  HandshakerRegistry::AddHandshakers(HANDSHAKER_CLIENT, args_.channel_args,
+                                     args_.interested_parties,
+                                     handshake_mgr_.get());
+  grpc_endpoint_add_to_pollset_set(endpoint_, args_.interested_parties);
+  handshake_mgr_->DoHandshake(endpoint_, args_.channel_args, args_.deadline,
+                              nullptr /* acceptor */, OnHandshakeDone, this);
+  endpoint_ = nullptr;  // Endpoint handed off to handshake manager.
 }
 
-static void connected(void* arg, grpc_error* error) {
-  chttp2_connector* c = static_cast<chttp2_connector*>(arg);
-  gpr_mu_lock(&c->mu);
-  GPR_ASSERT(c->connecting);
-  c->connecting = false;
-  if (error != GRPC_ERROR_NONE || c->shutdown) {
-    if (error == GRPC_ERROR_NONE) {
-      error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
+void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error* error) {
+  auto* args = static_cast<HandshakerArgs*>(arg);
+  Chttp2Connector* self = static_cast<Chttp2Connector*>(args->user_data);
+  {
+    MutexLock lock(&self->mu_);
+    if (error != GRPC_ERROR_NONE || self->shutdown_) {
+      if (error == GRPC_ERROR_NONE) {
+        error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
+        // We were shut down after handshaking completed successfully, so
+        // destroy the endpoint here.
+        // TODO(ctiller): It is currently necessary to shutdown endpoints
+        // before destroying them, even if we know that there are no
+        // pending read/write callbacks.  This should be fixed, at which
+        // point this can be removed.
+        grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
+        grpc_endpoint_destroy(args->endpoint);
+        grpc_channel_args_destroy(args->args);
+        grpc_slice_buffer_destroy_internal(args->read_buffer);
+        gpr_free(args->read_buffer);
+      } else {
+        error = GRPC_ERROR_REF(error);
+      }
+      self->result_->Reset();
     } else {
-      error = GRPC_ERROR_REF(error);
+      grpc_endpoint_delete_from_pollset_set(args->endpoint,
+                                            self->args_.interested_parties);
+      self->result_->transport =
+          grpc_create_chttp2_transport(args->args, args->endpoint, true);
+      self->result_->socket_node =
+          grpc_chttp2_transport_get_socket_node(self->result_->transport);
+      GPR_ASSERT(self->result_->transport != nullptr);
+      // TODO(roth): We ideally want to wait until we receive HTTP/2
+      // settings from the server before we consider the connection
+      // established.  If that doesn't happen before the connection
+      // timeout expires, then we should consider the connection attempt a
+      // failure and feed that information back into the backoff code.
+      // We could pass a notify_on_receive_settings callback to
+      // grpc_chttp2_transport_start_reading() to let us know when
+      // settings are received, but we would need to figure out how to use
+      // that information here.
+      //
+      // Unfortunately, we don't currently have a way to split apart the two
+      // effects of scheduling c->notify: we start sending RPCs immediately
+      // (which we want to do) and we consider the connection attempt successful
+      // (which we don't want to do until we get the notify_on_receive_settings
+      // callback from the transport).  If we could split those things
+      // apart, then we could start sending RPCs but then wait for our
+      // timeout before deciding if the connection attempt is successful.
+      // If the attempt is not successful, then we would tear down the
+      // transport and feed the failure back into the backoff code.
+      //
+      // In addition, even if we did that, we would probably not want to do
+      // so until after transparent retries is implemented.  Otherwise, any
+      // RPC that we attempt to send on the connection before the timeout
+      // would fail instead of being retried on a subsequent attempt.
+      grpc_chttp2_transport_start_reading(self->result_->transport,
+                                          args->read_buffer, nullptr);
+      self->result_->channel_args = args->args;
     }
-    c->result->reset();
-    grpc_closure* notify = c->notify;
-    c->notify = nullptr;
-    grpc_core::ExecCtx::Run(DEBUG_LOCATION, notify, error);
-    if (c->endpoint != nullptr) {
-      grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(error));
-    }
-    gpr_mu_unlock(&c->mu);
-    chttp2_connector_unref(static_cast<grpc_connector*>(arg));
-  } else {
-    GPR_ASSERT(c->endpoint != nullptr);
-    start_handshake_locked(c);
-    gpr_mu_unlock(&c->mu);
+    grpc_closure* notify = self->notify_;
+    self->notify_ = nullptr;
+    ExecCtx::Run(DEBUG_LOCATION, notify, error);
+    self->handshake_mgr_.reset();
   }
+  self->Unref();
 }
 
-static void chttp2_connector_connect(grpc_connector* con,
-                                     const grpc_connect_in_args* args,
-                                     grpc_connect_out_args* result,
-                                     grpc_closure* notify) {
-  chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
-  grpc_resolved_address addr;
-  grpc_core::Subchannel::GetAddressFromSubchannelAddressArg(args->channel_args,
-                                                            &addr);
-  gpr_mu_lock(&c->mu);
-  GPR_ASSERT(c->notify == nullptr);
-  c->notify = notify;
-  c->args = *args;
-  c->result = result;
-  GPR_ASSERT(c->endpoint == nullptr);
-  chttp2_connector_ref(con);  // Ref taken for callback.
-  GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
-  GPR_ASSERT(!c->connecting);
-  c->connecting = true;
-  grpc_closure* closure = &c->connected;
-  grpc_endpoint** ep = &c->endpoint;
-  gpr_mu_unlock(&c->mu);
-  // In some implementations, the closure can be flushed before
-  // grpc_tcp_client_connect and since the closure requires access to c->mu,
-  // this can result in a deadlock. Refer
-  // https://github.com/grpc/grpc/issues/16427
-  // grpc_tcp_client_connect would fill c->endpoint with proper contents and we
-  // make sure that we would still exist at that point by taking a ref.
-  grpc_tcp_client_connect(closure, ep, args->interested_parties,
-                          args->channel_args, &addr, args->deadline);
-}
-
-static const grpc_connector_vtable chttp2_connector_vtable = {
-    chttp2_connector_ref, chttp2_connector_unref, chttp2_connector_shutdown,
-    chttp2_connector_connect};
-
-grpc_connector* grpc_chttp2_connector_create() {
-  chttp2_connector* c = static_cast<chttp2_connector*>(gpr_zalloc(sizeof(*c)));
-  c->base.vtable = &chttp2_connector_vtable;
-  gpr_mu_init(&c->mu);
-  gpr_ref_init(&c->refs, 1);
-  return &c->base;
-}
+}  // namespace grpc_core

+ 31 - 1
src/core/ext/transport/chttp2/client/chttp2_connector.h

@@ -22,7 +22,37 @@
 #include <grpc/support/port_platform.h>
 
 #include "src/core/ext/filters/client_channel/connector.h"
+#include "src/core/lib/channel/handshaker.h"
+#include "src/core/lib/channel/handshaker_registry.h"
 
-grpc_connector* grpc_chttp2_connector_create();
+namespace grpc_core {
+
+class Chttp2Connector : public SubchannelConnector {
+ public:
+  Chttp2Connector();
+  ~Chttp2Connector();
+
+  void Connect(const Args& args, Result* result, grpc_closure* notify) override;
+  void Shutdown(grpc_error* error) override;
+
+ private:
+  static void Connected(void* arg, grpc_error* error);
+  void StartHandshakeLocked();
+  static void OnHandshakeDone(void* arg, grpc_error* error);
+
+  Mutex mu_;
+  Args args_;
+  Result* result_ = nullptr;
+  grpc_closure* notify_ = nullptr;
+  bool shutdown_ = false;
+  bool connecting_ = false;
+  // Holds the endpoint when first created before being handed off to
+  // the handshake manager.
+  grpc_endpoint* endpoint_ = nullptr;
+  grpc_closure connected_;
+  RefCountedPtr<HandshakeManager> handshake_mgr_;
+};
+
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */

+ 2 - 3
src/core/ext/transport/chttp2/client/insecure/channel_create.cc

@@ -40,9 +40,8 @@ class Chttp2InsecureClientChannelFactory : public ClientChannelFactory {
   Subchannel* CreateSubchannel(const grpc_channel_args* args) override {
     grpc_channel_args* new_args =
         grpc_default_authority_add_if_not_present(args);
-    grpc_connector* connector = grpc_chttp2_connector_create();
-    Subchannel* s = Subchannel::Create(connector, new_args);
-    grpc_connector_unref(connector);
+    Subchannel* s =
+        Subchannel::Create(MakeOrphanable<Chttp2Connector>(), new_args);
     grpc_channel_args_destroy(new_args);
     return s;
   }

+ 2 - 3
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc

@@ -51,9 +51,8 @@ class Chttp2SecureClientChannelFactory : public ClientChannelFactory {
               "Failed to create channel args during subchannel creation.");
       return nullptr;
     }
-    grpc_connector* connector = grpc_chttp2_connector_create();
-    Subchannel* s = Subchannel::Create(connector, new_args);
-    grpc_connector_unref(connector);
+    Subchannel* s =
+        Subchannel::Create(MakeOrphanable<Chttp2Connector>(), new_args);
     grpc_channel_args_destroy(new_args);
     return s;
   }

+ 1 - 1
src/core/lib/iomgr/endpoint_pair_posix.cc

@@ -20,7 +20,7 @@
 
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef GRPC_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET_TCP
 
 #include "src/core/lib/iomgr/endpoint_pair.h"
 #include "src/core/lib/iomgr/socket_utils_posix.h"

+ 103 - 0
src/core/lib/iomgr/logical_thread.cc

@@ -0,0 +1,103 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/iomgr/logical_thread.h"
+
+namespace grpc_core {
+
+DebugOnlyTraceFlag grpc_logical_thread_trace(false, "logical_thread");
+
+struct CallbackWrapper {
+  CallbackWrapper(std::function<void()> cb, const grpc_core::DebugLocation& loc)
+      : callback(std::move(cb)), location(loc) {}
+
+  MultiProducerSingleConsumerQueue::Node mpscq_node;
+  const std::function<void()> callback;
+  const DebugLocation location;
+};
+
+void LogicalThread::Run(std::function<void()> callback,
+                        const grpc_core::DebugLocation& location) {
+  if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+    gpr_log(GPR_INFO, "LogicalThread::Run() %p Scheduling callback [%s:%d]",
+            this, location.file(), location.line());
+  }
+  const size_t prev_size = size_.FetchAdd(1);
+  if (prev_size == 0) {
+    // There is no other closure executing right now on this logical thread.
+    // Execute this closure immediately.
+    if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+      gpr_log(GPR_INFO, "  Executing immediately");
+    }
+    callback();
+    // Loan this thread to the logical thread and drain the queue.
+    DrainQueue();
+  } else {
+    CallbackWrapper* cb_wrapper =
+        new CallbackWrapper(std::move(callback), location);
+    // There already are closures executing on this logical thread. Simply add
+    // this closure to the queue.
+    if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+      gpr_log(GPR_INFO, "  Scheduling on queue : item %p", cb_wrapper);
+    }
+    queue_.Push(&cb_wrapper->mpscq_node);
+  }
+}
+
+// The thread that calls this loans itself to the logical thread so as to
+// execute all the scheduled callback. This is called from within
+// LogicalThread::Run() after executing a callback immediately, and hence size_
+// is atleast 1.
+void LogicalThread::DrainQueue() {
+  while (true) {
+    if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+      gpr_log(GPR_INFO, "LogicalThread::DrainQueue() %p", this);
+    }
+    size_t prev_size = size_.FetchSub(1);
+    // prev_size should be atleast 1 since
+    GPR_DEBUG_ASSERT(prev_size >= 1);
+    if (prev_size == 1) {
+      if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+        gpr_log(GPR_INFO, "  Queue Drained");
+      }
+      break;
+    }
+    // There is atleast one callback on the queue. Pop the callback from the
+    // queue and execute it.
+    CallbackWrapper* cb_wrapper = nullptr;
+    bool empty_unused;
+    while ((cb_wrapper = reinterpret_cast<CallbackWrapper*>(
+                queue_.PopAndCheckEnd(&empty_unused))) == nullptr) {
+      // This can happen either due to a race condition within the mpscq
+      // implementation or because of a race with Run()
+      if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+        gpr_log(GPR_INFO, "  Queue returned nullptr, trying again");
+      }
+    }
+    if (GRPC_TRACE_FLAG_ENABLED(grpc_logical_thread_trace)) {
+      gpr_log(GPR_INFO, "  Running item %p : callback scheduled at [%s:%d]",
+              cb_wrapper, cb_wrapper->location.file(),
+              cb_wrapper->location.line());
+    }
+    cb_wrapper->callback();
+    delete cb_wrapper;
+  }
+}
+}  // namespace grpc_core

+ 52 - 0
src/core/lib/iomgr/logical_thread.h

@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <functional>
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/gprpp/atomic.h"
+#include "src/core/lib/gprpp/debug_location.h"
+#include "src/core/lib/gprpp/mpscq.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+
+#ifndef GRPC_CORE_LIB_IOMGR_LOGICAL_THREAD_H
+#define GRPC_CORE_LIB_IOMGR_LOGICAL_THREAD_H
+
+namespace grpc_core {
+extern DebugOnlyTraceFlag grpc_logical_thread_trace;
+
+// LogicalThread is a mechanism to schedule callbacks in a synchronized manner.
+// All callbacks scheduled on a LogicalThread instance will be executed serially
+// in a borrowed thread. The API provides a FIFO guarantee to the execution of
+// callbacks scheduled on the thread.
+class LogicalThread : public RefCounted<LogicalThread> {
+ public:
+  void Run(std::function<void()> callback,
+           const grpc_core::DebugLocation& location);
+
+ private:
+  void DrainQueue();
+
+  Atomic<size_t> size_{0};
+  MultiProducerSingleConsumerQueue queue_;
+};
+} /* namespace grpc_core */
+
+#endif /* GRPC_CORE_LIB_IOMGR_LOGICAL_THREAD_H */

+ 13 - 0
src/core/lib/iomgr/pollset_windows.cc

@@ -185,19 +185,23 @@ done:
 
 static grpc_error* pollset_kick(grpc_pollset* p,
                                 grpc_pollset_worker* specific_worker) {
+  bool should_kick_global = false;
   if (specific_worker != NULL) {
     if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
+      should_kick_global = true;
       for (specific_worker =
                p->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next;
            specific_worker != &p->root_worker;
            specific_worker =
                specific_worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next) {
         specific_worker->kicked = 1;
+        should_kick_global = false;
         gpr_cv_signal(&specific_worker->cv);
       }
       p->kicked_without_pollers = 1;
       if (p->is_iocp_worker) {
         grpc_iocp_kick();
+        should_kick_global = false;
       }
     } else {
       if (p->is_iocp_worker && g_active_poller == specific_worker) {
@@ -216,6 +220,15 @@ static grpc_error* pollset_kick(grpc_pollset* p,
       grpc_iocp_kick();
     } else {
       p->kicked_without_pollers = 1;
+      should_kick_global = true;
+    }
+  }
+  if (should_kick_global && g_active_poller == NULL) {
+    grpc_pollset_worker* next_global_worker = pop_front_worker(
+        &g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
+    if (next_global_worker != NULL) {
+      next_global_worker->kicked = 1;
+      gpr_cv_signal(&next_global_worker->cv);
     }
   }
   return GRPC_ERROR_NONE;

+ 3 - 0
src/core/lib/iomgr/port.h

@@ -134,6 +134,7 @@
 #define GRPC_POSIX_SOCKET_EV_EPOLL1 1
 #define GRPC_POSIX_SOCKET_EV_EPOLLEX 1
 #define GRPC_POSIX_SOCKET_EV_POLL 1
+#define GRPC_POSIX_SOCKET_IF_NAMETOINDEX 1
 #define GRPC_POSIX_SOCKET_RESOLVE_ADDRESS 1
 #define GRPC_POSIX_SOCKET_SOCKADDR 1
 #define GRPC_POSIX_SOCKET_SOCKET_FACTORY 1
@@ -141,6 +142,7 @@
 #define GRPC_POSIX_SOCKET_TCP_CLIENT 1
 #define GRPC_POSIX_SOCKET_TCP_SERVER 1
 #define GRPC_POSIX_SOCKET_TCP_SERVER_UTILS_COMMON 1
+#define GRPC_POSIX_SOCKET_UDP_SERVER 1
 #define GRPC_POSIX_SOCKET_UTILS_COMMON 1
 #else
 #define GRPC_POSIX_SOCKET 1
@@ -227,6 +229,7 @@
 #define GRPC_POSIX_SOCKET_TCP_CLIENT 1
 #define GRPC_POSIX_SOCKET_TCP_SERVER 1
 #define GRPC_POSIX_SOCKET_TCP_SERVER_UTILS_COMMON 1
+#define GRPC_POSIX_SOCKET_UDP_SERVER 1
 #define GRPC_POSIX_SOCKET_UTILS_COMMON 1
 #endif
 

+ 1 - 1
src/core/lib/iomgr/udp_server.cc

@@ -29,7 +29,7 @@
 
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef GRPC_POSIX_SOCKET
+#ifdef GRPC_POSIX_SOCKET_UDP_SERVER
 
 #include "src/core/lib/iomgr/udp_server.h"
 

+ 1 - 1
src/core/lib/security/credentials/oauth2/oauth2_credentials.cc

@@ -616,7 +616,7 @@ class StsTokenFetcherCredentials
       if (err != GRPC_ERROR_NONE) return cleanup();
       MaybeAddToBody(
           &body_strvec, "actor_token",
-          reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(subject_token)));
+          reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(actor_token)));
       MaybeAddToBody(&body_strvec, "actor_token_type", actor_token_type_.get());
     }
     return cleanup();

+ 2 - 2
src/core/lib/security/security_connector/alts/alts_security_connector.cc

@@ -69,7 +69,7 @@ class grpc_alts_channel_security_connector final
       grpc_core::RefCountedPtr<grpc_channel_credentials> channel_creds,
       grpc_core::RefCountedPtr<grpc_call_credentials> request_metadata_creds,
       const char* target_name)
-      : grpc_channel_security_connector(/*url_scheme=*/nullptr,
+      : grpc_channel_security_connector(GRPC_ALTS_URL_SCHEME,
                                         std::move(channel_creds),
                                         std::move(request_metadata_creds)),
         target_name_(gpr_strdup(target_name)) {}
@@ -129,7 +129,7 @@ class grpc_alts_server_security_connector final
  public:
   grpc_alts_server_security_connector(
       grpc_core::RefCountedPtr<grpc_server_credentials> server_creds)
-      : grpc_server_security_connector(/*url_scheme=*/nullptr,
+      : grpc_server_security_connector(GRPC_ALTS_URL_SCHEME,
                                        std::move(server_creds)) {}
 
   ~grpc_alts_server_security_connector() override = default;

+ 1 - 0
src/core/lib/security/security_connector/alts/alts_security_connector.h

@@ -25,6 +25,7 @@
 #include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h"
 
 #define GRPC_ALTS_TRANSPORT_SECURITY_TYPE "alts"
+#define GRPC_ALTS_URL_SCHEME "https"
 
 /**
  * This method creates an ALTS channel security connector.

+ 2 - 1
src/core/lib/surface/completion_queue.cc

@@ -854,7 +854,8 @@ static void cq_end_op_for_callback(
   }
 
   auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(tag);
-  if (internal || grpc_iomgr_is_any_background_poller_thread()) {
+  if (internal || functor->inlineable ||
+      grpc_iomgr_is_any_background_poller_thread()) {
     grpc_core::ApplicationCallbackExecCtx::Enqueue(functor,
                                                    (error == GRPC_ERROR_NONE));
     GRPC_ERROR_UNREF(error);

+ 2 - 1
src/core/lib/transport/byte_stream.h

@@ -40,7 +40,8 @@ class ByteStream : public Orphanable {
   // Returns true if the bytes are available immediately (in which case
   // on_complete will not be called), or false if the bytes will be available
   // asynchronously (in which case on_complete will be called when they
-  // are available).
+  // are available). Should not be called if there is no data left on the
+  // stream.
   //
   // max_size_hint can be set as a hint as to the maximum number
   // of bytes that would be acceptable to read.

+ 130 - 28
src/core/tsi/alts/handshaker/alts_handshaker_client.cc

@@ -18,6 +18,8 @@
 
 #include <grpc/support/port_platform.h>
 
+#include <list>
+
 #include "src/core/tsi/alts/handshaker/alts_handshaker_client.h"
 
 #include <grpc/byte_buffer.h>
@@ -171,26 +173,6 @@ static void maybe_complete_tsi_next(
   gpr_free(r);
 }
 
-static void on_status_received(void* arg, grpc_error* error) {
-  alts_grpc_handshaker_client* client =
-      static_cast<alts_grpc_handshaker_client*>(arg);
-  if (client->handshake_status_code != GRPC_STATUS_OK) {
-    // TODO(apolcyn): consider overriding the handshake result's
-    // status from the final ALTS message with the status here.
-    char* status_details =
-        grpc_slice_to_c_string(client->handshake_status_details);
-    gpr_log(GPR_INFO,
-            "alts_grpc_handshaker_client:%p on_status_received "
-            "status:%d details:|%s| error:|%s|",
-            client, client->handshake_status_code, status_details,
-            grpc_error_string(error));
-    gpr_free(status_details);
-  }
-  maybe_complete_tsi_next(client, true /* receive_status_finished */,
-                          nullptr /* pending_recv_message_result */);
-  alts_grpc_handshaker_client_unref(client);
-}
-
 static void handle_response_done(alts_grpc_handshaker_client* client,
                                  tsi_result status,
                                  const unsigned char* bytes_to_send,
@@ -301,14 +283,9 @@ void alts_handshaker_client_handle_response(alts_handshaker_client* c,
                        bytes_to_send, bytes_to_send_size, result);
 }
 
-/**
- * Populate grpc operation data with the fields of ALTS handshaker client and
- * make a grpc call.
- */
-static tsi_result make_grpc_call(alts_handshaker_client* c, bool is_start) {
-  GPR_ASSERT(c != nullptr);
-  alts_grpc_handshaker_client* client =
-      reinterpret_cast<alts_grpc_handshaker_client*>(c);
+static tsi_result continue_make_grpc_call(alts_grpc_handshaker_client* client,
+                                          bool is_start) {
+  GPR_ASSERT(client != nullptr);
   grpc_op ops[kHandshakerClientOpNum];
   memset(ops, 0, sizeof(ops));
   grpc_op* op = ops;
@@ -358,6 +335,125 @@ static tsi_result make_grpc_call(alts_handshaker_client* c, bool is_start) {
   return TSI_OK;
 }
 
+// TODO(apolcyn): remove this global queue when we can safely rely
+// on a MAX_CONCURRENT_STREAMS setting in the ALTS handshake server to
+// limit the number of concurrent handshakes.
+namespace {
+
+class HandshakeQueue {
+ public:
+  explicit HandshakeQueue(size_t max_outstanding_handshakes)
+      : max_outstanding_handshakes_(max_outstanding_handshakes) {}
+
+  void RequestHandshake(alts_grpc_handshaker_client* client) {
+    {
+      grpc_core::MutexLock lock(&mu_);
+      if (outstanding_handshakes_ == max_outstanding_handshakes_) {
+        // Max number already running, add to queue.
+        queued_handshakes_.push_back(client);
+        return;
+      }
+      // Start the handshake immediately.
+      ++outstanding_handshakes_;
+    }
+    continue_make_grpc_call(client, true /* is_start */);
+  }
+
+  void HandshakeDone() {
+    alts_grpc_handshaker_client* client = nullptr;
+    {
+      grpc_core::MutexLock lock(&mu_);
+      if (queued_handshakes_.empty()) {
+        // Nothing more in queue.  Decrement count and return immediately.
+        --outstanding_handshakes_;
+        return;
+      }
+      // Remove next entry from queue and start the handshake.
+      client = queued_handshakes_.front();
+      queued_handshakes_.pop_front();
+    }
+    continue_make_grpc_call(client, true /* is_start */);
+  }
+
+ private:
+  grpc_core::Mutex mu_;
+  std::list<alts_grpc_handshaker_client*> queued_handshakes_;
+  size_t outstanding_handshakes_ = 0;
+  const size_t max_outstanding_handshakes_;
+};
+
+gpr_once g_queued_handshakes_init = GPR_ONCE_INIT;
+/* Using separate queues for client and server handshakes is a
+ * hack that's mainly intended to satisfy the alts_concurrent_connectivity_test,
+ * which runs many concurrent handshakes where both endpoints
+ * are in the same process; this situation is problematic with a
+ * single queue because we have a high chance of using up all outstanding
+ * slots in the queue, such that there aren't any
+ * mutual client/server handshakes outstanding at the same time and
+ * able to make progress. */
+HandshakeQueue* g_client_handshake_queue;
+HandshakeQueue* g_server_handshake_queue;
+
+void DoHandshakeQueuesInit(void) {
+  const size_t per_queue_max_outstanding_handshakes = 40;
+  g_client_handshake_queue =
+      new HandshakeQueue(per_queue_max_outstanding_handshakes);
+  g_server_handshake_queue =
+      new HandshakeQueue(per_queue_max_outstanding_handshakes);
+}
+
+void RequestHandshake(alts_grpc_handshaker_client* client, bool is_client) {
+  gpr_once_init(&g_queued_handshakes_init, DoHandshakeQueuesInit);
+  HandshakeQueue* queue =
+      is_client ? g_client_handshake_queue : g_server_handshake_queue;
+  queue->RequestHandshake(client);
+}
+
+void HandshakeDone(bool is_client) {
+  HandshakeQueue* queue =
+      is_client ? g_client_handshake_queue : g_server_handshake_queue;
+  queue->HandshakeDone();
+}
+
+};  // namespace
+
+/**
+ * Populate grpc operation data with the fields of ALTS handshaker client and
+ * make a grpc call.
+ */
+static tsi_result make_grpc_call(alts_handshaker_client* c, bool is_start) {
+  GPR_ASSERT(c != nullptr);
+  alts_grpc_handshaker_client* client =
+      reinterpret_cast<alts_grpc_handshaker_client*>(c);
+  if (is_start) {
+    RequestHandshake(client, client->is_client);
+    return TSI_OK;
+  } else {
+    return continue_make_grpc_call(client, is_start);
+  }
+}
+
+static void on_status_received(void* arg, grpc_error* error) {
+  alts_grpc_handshaker_client* client =
+      static_cast<alts_grpc_handshaker_client*>(arg);
+  if (client->handshake_status_code != GRPC_STATUS_OK) {
+    // TODO(apolcyn): consider overriding the handshake result's
+    // status from the final ALTS message with the status here.
+    char* status_details =
+        grpc_slice_to_c_string(client->handshake_status_details);
+    gpr_log(GPR_INFO,
+            "alts_grpc_handshaker_client:%p on_status_received "
+            "status:%d details:|%s| error:|%s|",
+            client, client->handshake_status_code, status_details,
+            grpc_error_string(error));
+    gpr_free(status_details);
+  }
+  maybe_complete_tsi_next(client, true /* receive_status_finished */,
+                          nullptr /* pending_recv_message_result */);
+  HandshakeDone(client->is_client);
+  alts_grpc_handshaker_client_unref(client);
+}
+
 /* Serializes a grpc_gcp_HandshakerReq message into a buffer and returns newly
  * grpc_byte_buffer holding it. */
 static grpc_byte_buffer* get_serialized_handshaker_req(
@@ -732,6 +828,12 @@ void alts_handshaker_client_ref_for_testing(alts_handshaker_client* c) {
 
 void alts_handshaker_client_on_status_received_for_testing(
     alts_handshaker_client* c, grpc_status_code status, grpc_error* error) {
+  // We first make sure that the handshake queue has been initialized
+  // here because there are tests that use this API that mock out
+  // other parts of the alts_handshaker_client in such a way that the
+  // code path that would normally ensure that the handshake queue
+  // has been initialized isn't taken.
+  gpr_once_init(&g_queued_handshakes_init, DoHandshakeQueuesInit);
   alts_grpc_handshaker_client* client =
       reinterpret_cast<alts_grpc_handshaker_client*>(c);
   client->handshake_status_code = status;

+ 8 - 2
src/cpp/client/channel_cc.cc

@@ -20,7 +20,6 @@
 
 #include <cstring>
 #include <memory>
-#include <mutex>
 
 #include <grpc/grpc.h>
 #include <grpc/slice.h>
@@ -214,7 +213,14 @@ bool Channel::WaitForStateChangeImpl(grpc_connectivity_state last_observed,
 namespace {
 class ShutdownCallback : public grpc_experimental_completion_queue_functor {
  public:
-  ShutdownCallback() { functor_run = &ShutdownCallback::Run; }
+  ShutdownCallback() {
+    functor_run = &ShutdownCallback::Run;
+    // Set inlineable to true since this callback is trivial and thus does not
+    // need to be run from the executor (triggering a thread hop). This should
+    // only be used by internal callbacks like this and not by user application
+    // code.
+    inlineable = true;
+  }
   // TakeCQ takes ownership of the cq into the shutdown callback
   // so that the shutdown callback will be responsible for destroying it
   void TakeCQ(::grpc::CompletionQueue* cq) { cq_ = cq; }

+ 15 - 2
src/cpp/client/client_context.cc

@@ -88,14 +88,27 @@ void ClientContext::set_credentials(
   }
 }
 
-std::unique_ptr<ClientContext> ClientContext::FromServerContext(
-    const grpc::ServerContext& context, PropagationOptions options) {
+std::unique_ptr<ClientContext> ClientContext::FromInternalServerContext(
+    const grpc_impl::experimental::ServerContextBase& context,
+    PropagationOptions options) {
   std::unique_ptr<ClientContext> ctx(new ClientContext);
   ctx->propagate_from_call_ = context.call_;
   ctx->propagation_options_ = options;
   return ctx;
 }
 
+std::unique_ptr<ClientContext> ClientContext::FromServerContext(
+    const grpc_impl::ServerContext& server_context,
+    PropagationOptions options) {
+  return FromInternalServerContext(server_context, options);
+}
+
+std::unique_ptr<ClientContext> ClientContext::FromCallbackServerContext(
+    const grpc_impl::experimental::CallbackServerContext& server_context,
+    PropagationOptions options) {
+  return FromInternalServerContext(server_context, options);
+}
+
 void ClientContext::AddMetadata(const grpc::string& meta_key,
                                 const grpc::string& meta_value) {
   send_initial_metadata_.insert(std::make_pair(meta_key, meta_value));

+ 0 - 2
src/cpp/server/dynamic_thread_pool.cc

@@ -18,8 +18,6 @@
 
 #include "src/cpp/server/dynamic_thread_pool.h"
 
-#include <mutex>
-
 #include <grpc/support/log.h>
 #include <grpcpp/impl/codegen/sync.h>
 

+ 0 - 2
src/cpp/server/dynamic_thread_pool.h

@@ -19,10 +19,8 @@
 #ifndef GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
 #define GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
 
-#include <condition_variable>
 #include <list>
 #include <memory>
-#include <mutex>
 #include <queue>
 
 #include <grpcpp/support/config.h>

+ 0 - 1
src/cpp/server/health/default_health_check_service.cc

@@ -17,7 +17,6 @@
  */
 
 #include <memory>
-#include <mutex>
 
 #include <grpc/slice.h>
 #include <grpc/support/alloc.h>

+ 0 - 1
src/cpp/server/health/default_health_check_service.h

@@ -20,7 +20,6 @@
 #define GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
 
 #include <atomic>
-#include <mutex>
 #include <set>
 
 #include <grpc/support/log.h>

+ 52 - 0
src/cpp/server/server_callback.cc

@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/impl/codegen/server_callback_impl.h>
+
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/executor.h"
+
+namespace grpc_impl {
+namespace internal {
+
+void ServerCallbackCall::CallOnCancel(ServerReactor* reactor) {
+  if (reactor->InternalInlineable()) {
+    reactor->OnCancel();
+  } else {
+    Ref();
+    grpc_core::ExecCtx exec_ctx;
+    struct ClosureArg {
+      ServerCallbackCall* call;
+      ServerReactor* reactor;
+    };
+    ClosureArg* arg = new ClosureArg{this, reactor};
+    grpc_core::Executor::Run(GRPC_CLOSURE_CREATE(
+                                 [](void* void_arg, grpc_error*) {
+                                   ClosureArg* arg =
+                                       static_cast<ClosureArg*>(void_arg);
+                                   arg->reactor->OnCancel();
+                                   arg->call->MaybeDone();
+                                   delete arg;
+                                 },
+                                 arg, nullptr),
+                             GRPC_ERROR_NONE);
+  }
+}
+
+}  // namespace internal
+}  // namespace grpc_impl

+ 40 - 16
src/cpp/server/server_cc.cc

@@ -258,7 +258,14 @@ bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
 namespace {
 class ShutdownCallback : public grpc_experimental_completion_queue_functor {
  public:
-  ShutdownCallback() { functor_run = &ShutdownCallback::Run; }
+  ShutdownCallback() {
+    functor_run = &ShutdownCallback::Run;
+    // Set inlineable to true since this callback is trivial and thus does not
+    // need to be run from the executor (triggering a thread hop). This should
+    // only be used by internal callbacks like this and not by user application
+    // code.
+    inlineable = true;
+  }
   // TakeCQ takes ownership of the cq into the shutdown callback
   // so that the shutdown callback will be responsible for destroying it
   void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
@@ -536,8 +543,9 @@ class Server::CallbackRequestBase : public grpc::internal::CompletionQueueTag {
 template <class ServerContextType>
 class Server::CallbackRequest final : public Server::CallbackRequestBase {
  public:
-  static_assert(std::is_base_of<grpc::ServerContext, ServerContextType>::value,
-                "ServerContextType must be derived from ServerContext");
+  static_assert(std::is_base_of<grpc::experimental::CallbackServerContext,
+                                ServerContextType>::value,
+                "ServerContextType must be derived from CallbackServerContext");
 
   // The constructor needs to know the server for this callback request and its
   // index in the server's request count array to allow for proper dynamic
@@ -609,6 +617,13 @@ class Server::CallbackRequest final : public Server::CallbackRequestBase {
     CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
         : req_(req) {
       functor_run = &CallbackCallTag::StaticRun;
+      // Set inlineable to true since this callback is internally-controlled
+      // without taking any locks, and thus does not need to be run from the
+      // executor (which triggers a thread hop). This should only be used by
+      // internal callbacks like this and not by user application code. The work
+      // here is actually non-trivial, but there is no chance of having user
+      // locks conflict with each other so it's ok to run inlined.
+      inlineable = true;
     }
 
     // force_run can not be performed on a tag if operations using this tag
@@ -784,14 +799,14 @@ class Server::CallbackRequest final : public Server::CallbackRequestBase {
 };
 
 template <>
-bool Server::CallbackRequest<grpc::ServerContext>::FinalizeResult(
-    void** /*tag*/, bool* /*status*/) {
+bool Server::CallbackRequest<grpc::experimental::CallbackServerContext>::
+    FinalizeResult(void** /*tag*/, bool* /*status*/) {
   return false;
 }
 
 template <>
-bool Server::CallbackRequest<grpc::GenericServerContext>::FinalizeResult(
-    void** /*tag*/, bool* status) {
+bool Server::CallbackRequest<grpc::experimental::GenericCallbackServerContext>::
+    FinalizeResult(void** /*tag*/, bool* status) {
   if (*status) {
     // TODO(yangg) remove the copy here
     ctx_.method_ = grpc::StringFromCopiedSlice(call_details_->method);
@@ -803,13 +818,14 @@ bool Server::CallbackRequest<grpc::GenericServerContext>::FinalizeResult(
 }
 
 template <>
-const char* Server::CallbackRequest<grpc::ServerContext>::method_name() const {
+const char* Server::CallbackRequest<
+    grpc::experimental::CallbackServerContext>::method_name() const {
   return method_->name();
 }
 
 template <>
-const char* Server::CallbackRequest<grpc::GenericServerContext>::method_name()
-    const {
+const char* Server::CallbackRequest<
+    grpc::experimental::GenericCallbackServerContext>::method_name() const {
   return ctx_.method().c_str();
 }
 
@@ -1009,9 +1025,6 @@ Server::Server(
 Server::~Server() {
   {
     grpc::internal::ReleasableMutexLock lock(&mu_);
-    if (callback_cq_ != nullptr) {
-      callback_cq_->Shutdown();
-    }
     if (started_ && !shutdown_) {
       lock.Unlock();
       Shutdown();
@@ -1020,6 +1033,10 @@ Server::~Server() {
       for (const auto& value : sync_req_mgrs_) {
         value->Shutdown();
       }
+      if (callback_cq_ != nullptr) {
+        callback_cq_->Shutdown();
+        callback_cq_ = nullptr;
+      }
     }
   }
 
@@ -1114,7 +1131,7 @@ bool Server::RegisterService(const grpc::string* host, grpc::Service* service) {
       // TODO(vjpai): Register these dynamically based on need
       for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
         callback_reqs_to_start_.push_back(
-            new CallbackRequest<grpc::ServerContext>(
+            new CallbackRequest<grpc::experimental::CallbackServerContext>(
                 this, method_index, method.get(), method_registration_tag));
       }
       // Enqueue it so that it will be Request'ed later after all request
@@ -1157,8 +1174,8 @@ void Server::RegisterCallbackGenericService(
   // TODO(vjpai): Register these dynamically based on need
   for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
     callback_reqs_to_start_.push_back(
-        new CallbackRequest<grpc::GenericServerContext>(this, method_index,
-                                                        nullptr, nullptr));
+        new CallbackRequest<grpc::experimental::GenericCallbackServerContext>(
+            this, method_index, nullptr, nullptr));
   }
 }
 
@@ -1315,6 +1332,13 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
         &callback_reqs_mu_, [this] { return callback_reqs_outstanding_ == 0; });
   }
 
+  // Shutdown the callback CQ. The CQ is owned by its own shutdown tag, so it
+  // will delete itself at true shutdown.
+  if (callback_cq_ != nullptr) {
+    callback_cq_->Shutdown();
+    callback_cq_ = nullptr;
+  }
+
   // Drain the shutdown queue (if the previous call to AsyncNext() timed out
   // and we didn't remove the tag from the queue yet)
   while (shutdown_cq.Next(&tag, &ok)) {

+ 47 - 70
src/cpp/server/server_context.cc

@@ -19,7 +19,6 @@
 #include <grpcpp/impl/codegen/server_context_impl.h>
 
 #include <algorithm>
-#include <mutex>
 #include <utility>
 
 #include <grpc/compression.h>
@@ -37,17 +36,19 @@
 #include "src/core/lib/surface/call.h"
 
 namespace grpc_impl {
+namespace experimental {
 
 // CompletionOp
 
-class ServerContext::CompletionOp final
+class ServerContextBase::CompletionOp final
     : public ::grpc::internal::CallOpSetInterface {
  public:
   // initial refs: one in the server context, one in the cq
   // must ref the call before calling constructor and after deleting this
-  CompletionOp(::grpc::internal::Call* call, internal::ServerReactor* reactor)
+  CompletionOp(::grpc::internal::Call* call,
+               ::grpc_impl::internal::ServerCallbackCall* callback_controller)
       : call_(*call),
-        reactor_(reactor),
+        callback_controller_(callback_controller),
         has_tag_(false),
         tag_(nullptr),
         core_cq_tag_(this),
@@ -100,22 +101,6 @@ class ServerContext::CompletionOp final
     tag_ = tag;
   }
 
-  void SetCancelCallback(std::function<void()> callback) {
-    grpc_core::MutexLock lock(&mu_);
-
-    if (finalized_ && (cancelled_ != 0)) {
-      callback();
-      return;
-    }
-
-    cancel_callback_ = std::move(callback);
-  }
-
-  void ClearCancelCallback() {
-    grpc_core::MutexLock g(&mu_);
-    cancel_callback_ = nullptr;
-  }
-
   void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; }
 
   void* core_cq_tag() override { return core_cq_tag_; }
@@ -153,7 +138,7 @@ class ServerContext::CompletionOp final
   }
 
   ::grpc::internal::Call call_;
-  internal::ServerReactor* const reactor_;
+  ::grpc_impl::internal::ServerCallbackCall* const callback_controller_;
   bool has_tag_;
   void* tag_;
   void* core_cq_tag_;
@@ -161,12 +146,11 @@ class ServerContext::CompletionOp final
   grpc_core::Mutex mu_;
   bool finalized_;
   int cancelled_;  // This is an int (not bool) because it is passed to core
-  std::function<void()> cancel_callback_;
   bool done_intercepting_;
   ::grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
 };
 
-void ServerContext::CompletionOp::Unref() {
+void ServerContextBase::CompletionOp::Unref() {
   if (refs_.Unref()) {
     grpc_call* call = call_.call();
     delete this;
@@ -174,7 +158,7 @@ void ServerContext::CompletionOp::Unref() {
   }
 }
 
-void ServerContext::CompletionOp::FillOps(::grpc::internal::Call* call) {
+void ServerContextBase::CompletionOp::FillOps(::grpc::internal::Call* call) {
   grpc_op ops;
   ops.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
   ops.data.recv_close_on_server.cancelled = &cancelled_;
@@ -190,7 +174,7 @@ void ServerContext::CompletionOp::FillOps(::grpc::internal::Call* call) {
   /* No interceptors to run here */
 }
 
-bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
+bool ServerContextBase::CompletionOp::FinalizeResult(void** tag, bool* status) {
   bool ret = false;
   grpc_core::ReleasableMutexLock lock(&mu_);
   if (done_intercepting_) {
@@ -214,21 +198,11 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
   // Decide whether to call the cancel callback before releasing the lock
   bool call_cancel = (cancelled_ != 0);
 
-  // If it's a unary cancel callback, call it under the lock so that it doesn't
-  // race with ClearCancelCallback. Although we don't normally call callbacks
-  // under a lock, this is a special case since the user needs a guarantee that
-  // the callback won't issue or run after ClearCancelCallback has returned.
-  // This requirement imposes certain restrictions on the callback, documented
-  // in the API comments of SetCancelCallback.
-  if (cancel_callback_) {
-    cancel_callback_();
-  }
-
   // Release the lock since we may call a callback and interceptors now.
   lock.Unlock();
 
-  if (call_cancel && reactor_ != nullptr) {
-    reactor_->MaybeCallOnCancel();
+  if (call_cancel && callback_controller_ != nullptr) {
+    callback_controller_->MaybeCallOnCancel();
   }
   /* Add interception point and run through interceptors */
   interceptor_methods_.AddInterceptionHookPoint(
@@ -246,16 +220,19 @@ bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
   return false;
 }
 
-// ServerContext body
+// ServerContextBase body
 
-ServerContext::ServerContext() { Setup(gpr_inf_future(GPR_CLOCK_REALTIME)); }
+ServerContextBase::ServerContextBase() {
+  Setup(gpr_inf_future(GPR_CLOCK_REALTIME));
+}
 
-ServerContext::ServerContext(gpr_timespec deadline, grpc_metadata_array* arr) {
+ServerContextBase::ServerContextBase(gpr_timespec deadline,
+                                     grpc_metadata_array* arr) {
   Setup(deadline);
   std::swap(*client_metadata_.arr(), *arr);
 }
 
-void ServerContext::Setup(gpr_timespec deadline) {
+void ServerContextBase::Setup(gpr_timespec deadline) {
   completion_op_ = nullptr;
   has_notify_when_done_tag_ = false;
   async_notify_when_done_tag_ = nullptr;
@@ -268,15 +245,15 @@ void ServerContext::Setup(gpr_timespec deadline) {
   rpc_info_ = nullptr;
 }
 
-void ServerContext::BindDeadlineAndMetadata(gpr_timespec deadline,
-                                            grpc_metadata_array* arr) {
+void ServerContextBase::BindDeadlineAndMetadata(gpr_timespec deadline,
+                                                grpc_metadata_array* arr) {
   deadline_ = deadline;
   std::swap(*client_metadata_.arr(), *arr);
 }
 
-ServerContext::~ServerContext() { Clear(); }
+ServerContextBase::~ServerContextBase() { Clear(); }
 
-void ServerContext::Clear() {
+void ServerContextBase::Clear() {
   auth_context_.reset();
   initial_metadata_.clear();
   trailing_metadata_.clear();
@@ -295,11 +272,17 @@ void ServerContext::Clear() {
     call_ = nullptr;
     grpc_call_unref(call);
   }
+  if (default_reactor_used_.load(std::memory_order_relaxed)) {
+    default_reactor_.~Reactor();
+    new (&default_reactor_) Reactor;
+    default_reactor_used_.store(false, std::memory_order_relaxed);
+  }
+  test_unary_.reset();
 }
 
-void ServerContext::BeginCompletionOp(::grpc::internal::Call* call,
-                                      std::function<void(bool)> callback,
-                                      internal::ServerReactor* reactor) {
+void ServerContextBase::BeginCompletionOp(
+    ::grpc::internal::Call* call, std::function<void(bool)> callback,
+    ::grpc_impl::internal::ServerCallbackCall* callback_controller) {
   GPR_ASSERT(!completion_op_);
   if (rpc_info_) {
     rpc_info_->Ref();
@@ -307,9 +290,10 @@ void ServerContext::BeginCompletionOp(::grpc::internal::Call* call,
   grpc_call_ref(call->call());
   completion_op_ =
       new (grpc_call_arena_alloc(call->call(), sizeof(CompletionOp)))
-          CompletionOp(call, reactor);
-  if (callback != nullptr) {
-    completion_tag_.Set(call->call(), std::move(callback), completion_op_);
+          CompletionOp(call, callback_controller);
+  if (callback_controller != nullptr) {
+    completion_tag_.Set(call->call(), std::move(callback), completion_op_,
+                        true);
     completion_op_->set_core_cq_tag(&completion_tag_);
     completion_op_->set_tag(completion_op_);
   } else if (has_notify_when_done_tag_) {
@@ -318,21 +302,21 @@ void ServerContext::BeginCompletionOp(::grpc::internal::Call* call,
   call->PerformOps(completion_op_);
 }
 
-::grpc::internal::CompletionQueueTag* ServerContext::GetCompletionOpTag() {
+::grpc::internal::CompletionQueueTag* ServerContextBase::GetCompletionOpTag() {
   return static_cast<::grpc::internal::CompletionQueueTag*>(completion_op_);
 }
 
-void ServerContext::AddInitialMetadata(const grpc::string& key,
-                                       const grpc::string& value) {
+void ServerContextBase::AddInitialMetadata(const grpc::string& key,
+                                           const grpc::string& value) {
   initial_metadata_.insert(std::make_pair(key, value));
 }
 
-void ServerContext::AddTrailingMetadata(const grpc::string& key,
-                                        const grpc::string& value) {
+void ServerContextBase::AddTrailingMetadata(const grpc::string& key,
+                                            const grpc::string& value) {
   trailing_metadata_.insert(std::make_pair(key, value));
 }
 
-void ServerContext::TryCancel() const {
+void ServerContextBase::TryCancel() const {
   ::grpc::internal::CancelInterceptorBatchMethods cancel_methods;
   if (rpc_info_) {
     for (size_t i = 0; i < rpc_info_->interceptors_.size(); i++) {
@@ -346,15 +330,7 @@ void ServerContext::TryCancel() const {
   }
 }
 
-void ServerContext::SetCancelCallback(std::function<void()> callback) {
-  completion_op_->SetCancelCallback(std::move(callback));
-}
-
-void ServerContext::ClearCancelCallback() {
-  completion_op_->ClearCancelCallback();
-}
-
-bool ServerContext::IsCancelled() const {
+bool ServerContextBase::IsCancelled() const {
   if (completion_tag_) {
     // When using callback API, this result is always valid.
     return completion_op_->CheckCancelledAsync();
@@ -368,7 +344,7 @@ bool ServerContext::IsCancelled() const {
   }
 }
 
-void ServerContext::set_compression_algorithm(
+void ServerContextBase::set_compression_algorithm(
     grpc_compression_algorithm algorithm) {
   compression_algorithm_ = algorithm;
   const char* algorithm_name = nullptr;
@@ -381,7 +357,7 @@ void ServerContext::set_compression_algorithm(
   AddInitialMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
 }
 
-grpc::string ServerContext::peer() const {
+grpc::string ServerContextBase::peer() const {
   grpc::string peer;
   if (call_) {
     char* c_peer = grpc_call_get_peer(call_);
@@ -391,11 +367,11 @@ grpc::string ServerContext::peer() const {
   return peer;
 }
 
-const struct census_context* ServerContext::census_context() const {
+const struct census_context* ServerContextBase::census_context() const {
   return grpc_census_call_get_context(call_);
 }
 
-void ServerContext::SetLoadReportingCosts(
+void ServerContextBase::SetLoadReportingCosts(
     const std::vector<grpc::string>& cost_data) {
   if (call_ == nullptr) return;
   for (const auto& cost_datum : cost_data) {
@@ -403,4 +379,5 @@ void ServerContext::SetLoadReportingCosts(
   }
 }
 
+}  // namespace experimental
 }  // namespace grpc_impl

+ 0 - 1
src/cpp/thread_manager/thread_manager.cc

@@ -19,7 +19,6 @@
 #include "src/cpp/thread_manager/thread_manager.h"
 
 #include <climits>
-#include <mutex>
 
 #include <grpc/support/log.h>
 #include "src/core/lib/gprpp/thd.h"

+ 0 - 2
src/cpp/thread_manager/thread_manager.h

@@ -19,10 +19,8 @@
 #ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H
 #define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
 
-#include <condition_variable>
 #include <list>
 #include <memory>
-#include <mutex>
 
 #include <grpcpp/support/config.h>
 

+ 20 - 0
src/csharp/Grpc.Core.Tests/CompressionTest.cs

@@ -129,5 +129,25 @@ namespace Grpc.Core.Tests
 
             Assert.AreEqual(request, response);
         }
+
+        [Test]
+        public void CanReadCompressedMessages_EmptyPayload()
+        {
+            var compressionMetadata = new Metadata
+            {
+                { new Metadata.Entry(Metadata.CompressionRequestAlgorithmMetadataKey, "gzip") }
+            };
+
+            helper.UnaryHandler = new UnaryServerMethod<string, string>(async (req, context) =>
+            {
+                await context.WriteResponseHeadersAsync(compressionMetadata);
+                return req;
+            });
+
+            var request = "";
+            var response = Calls.BlockingUnaryCall(helper.CreateUnaryCall(new CallOptions(compressionMetadata)), request);
+
+            Assert.AreEqual(request, response);
+        }
     }
 }

+ 4 - 0
src/objective-c/GRPCClient/GRPCInterceptor.h

@@ -97,6 +97,10 @@
                                                    receiveNextMessages
    \endverbatim
  *
+ * An interceptor must forward responses to its previous interceptor in the order of initial
+ * metadata, message(s), and trailing metadata. Forwarding responses out of this order (e.g.
+ * forwarding a message before initial metadata) is not allowed.
+ *
  * Events of requests and responses are dispatched to interceptor objects using the interceptor's
  * dispatch queue. The dispatch queue should be serial queue to make sure the events are processed
  * in order. Interceptor implementations must derive from GRPCInterceptor class. The class makes

+ 5 - 2
src/objective-c/ProtoRPC/ProtoRPC.h

@@ -73,8 +73,11 @@ NS_ASSUME_NONNULL_BEGIN
 /**
  * A convenience class of objects that act as response handlers of calls. Issues
  * response to a single handler when the response is completed.
+ *
+ * The object is stateful and should not be reused for multiple calls. If multiple calls share the
+ * same response handling logic, create separate GRPCUnaryResponseHandler objects for each call.
  */
-@interface GRPCUnaryResponseHandler : NSObject<GRPCProtoResponseHandler>
+@interface GRPCUnaryResponseHandler<ResponseType> : NSObject<GRPCProtoResponseHandler>
 
 /**
  * Creates a responsehandler object with a unary call handler.
@@ -83,7 +86,7 @@ NS_ASSUME_NONNULL_BEGIN
  * responseDispatchQueue: the dispatch queue on which the response handler
  * should be issued. If it's nil, the handler will use the main queue.
  */
-- (nullable instancetype)initWithResponseHandler:(void (^)(GPBMessage *, NSError *))handler
+- (nullable instancetype)initWithResponseHandler:(void (^)(ResponseType, NSError *))handler
                            responseDispatchQueue:(nullable dispatch_queue_t)dispatchQueue;
 
 /** Response headers received during the call. */

+ 2 - 2
src/objective-c/ProtoRPC/ProtoRPC.m

@@ -28,13 +28,13 @@
 #import <RxLibrary/GRXWriter+Transformations.h>
 
 @implementation GRPCUnaryResponseHandler {
-  void (^_responseHandler)(GPBMessage *, NSError *);
+  void (^_responseHandler)(id, NSError *);
   dispatch_queue_t _responseDispatchQueue;
 
   GPBMessage *_message;
 }
 
-- (nullable instancetype)initWithResponseHandler:(void (^)(GPBMessage *, NSError *))handler
+- (nullable instancetype)initWithResponseHandler:(void (^)(id, NSError *))handler
                            responseDispatchQueue:(dispatch_queue_t)dispatchQueue {
   if ((self = [super init])) {
     _responseHandler = handler;

+ 31 - 0
src/php/ext/grpc/channel_credentials.c

@@ -100,6 +100,27 @@ PHP_METHOD(ChannelCredentials, setDefaultRootsPem) {
   memcpy(default_pem_root_certs, pem_roots, pem_roots_length + 1);
 }
 
+/**
+ * if default roots pem is set
+ * @return TRUE/FALSE
+ */
+PHP_METHOD(ChannelCredentials, isDefaultRootsPemSet) {
+  if (default_pem_root_certs) {
+    RETURN_TRUE;
+  }
+  RETURN_FALSE;
+}
+
+/**
+ * free default roots pem, if it is set
+ */
+PHP_METHOD(ChannelCredentials, invalidateDefaultRootsPem) {
+  if (default_pem_root_certs) {
+    gpr_free(default_pem_root_certs);
+    default_pem_root_certs = NULL;
+  }
+}
+
 /**
  * Create a default channel credentials object.
  * @return ChannelCredentials The new default channel credentials object
@@ -214,6 +235,12 @@ ZEND_BEGIN_ARG_INFO_EX(arginfo_setDefaultRootsPem, 0, 0, 1)
   ZEND_ARG_INFO(0, pem_roots)
 ZEND_END_ARG_INFO()
 
+ZEND_BEGIN_ARG_INFO_EX(arginfo_isDefaultRootsPemSet, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
+ZEND_BEGIN_ARG_INFO_EX(arginfo_invalidateDefaultRootsPem, 0, 0, 0)
+ZEND_END_ARG_INFO()
+
 ZEND_BEGIN_ARG_INFO_EX(arginfo_createDefault, 0, 0, 0)
 ZEND_END_ARG_INFO()
 
@@ -234,6 +261,10 @@ ZEND_END_ARG_INFO()
 static zend_function_entry channel_credentials_methods[] = {
   PHP_ME(ChannelCredentials, setDefaultRootsPem, arginfo_setDefaultRootsPem,
          ZEND_ACC_PUBLIC | ZEND_ACC_STATIC)
+  PHP_ME(ChannelCredentials, isDefaultRootsPemSet, arginfo_isDefaultRootsPemSet,
+         ZEND_ACC_PUBLIC | ZEND_ACC_STATIC)
+  PHP_ME(ChannelCredentials, invalidateDefaultRootsPem, arginfo_invalidateDefaultRootsPem,
+         ZEND_ACC_PUBLIC | ZEND_ACC_STATIC)
   PHP_ME(ChannelCredentials, createDefault, arginfo_createDefault,
          ZEND_ACC_PUBLIC | ZEND_ACC_STATIC)
   PHP_ME(ChannelCredentials, createSsl, arginfo_createSsl,

+ 65 - 0
src/php/ext/grpc/php_grpc.c

@@ -26,6 +26,10 @@
 #include "call_credentials.h"
 #include "server_credentials.h"
 #include "completion_queue.h"
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/time.h>
 #include <ext/spl/spl_exceptions.h>
 #include <zend_exceptions.h>
 
@@ -74,6 +78,12 @@ ZEND_GET_MODULE(grpc)
                      enable_fork_support, zend_grpc_globals, grpc_globals)
    STD_PHP_INI_ENTRY("grpc.poll_strategy", NULL, PHP_INI_SYSTEM, OnUpdateString,
                      poll_strategy, zend_grpc_globals, grpc_globals)
+   STD_PHP_INI_ENTRY("grpc.grpc_verbosity", NULL, PHP_INI_SYSTEM, OnUpdateString,
+                     grpc_verbosity, zend_grpc_globals, grpc_globals)
+   STD_PHP_INI_ENTRY("grpc.grpc_trace", NULL, PHP_INI_SYSTEM, OnUpdateString,
+                     grpc_trace, zend_grpc_globals, grpc_globals)
+   STD_PHP_INI_ENTRY("grpc.log_filename", NULL, PHP_INI_SYSTEM, OnUpdateString,
+                     log_filename, zend_grpc_globals, grpc_globals)
    PHP_INI_END()
 /* }}} */
 
@@ -222,6 +232,55 @@ void apply_ini_settings(TSRMLS_D) {
     strcat(poll_str, GRPC_G(poll_strategy));
     putenv(poll_str);
   }
+
+  if (GRPC_G(grpc_verbosity)) {
+    char *verbosity_str = malloc(sizeof("GRPC_VERBOSITY=") +
+                                 strlen(GRPC_G(grpc_verbosity)));
+    strcpy(verbosity_str, "GRPC_VERBOSITY=");
+    strcat(verbosity_str, GRPC_G(grpc_verbosity));
+    putenv(verbosity_str);
+  }
+
+  if (GRPC_G(grpc_trace)) {
+    char *trace_str = malloc(sizeof("GRPC_TRACE=") +
+                             strlen(GRPC_G(grpc_trace)));
+    strcpy(trace_str, "GRPC_TRACE=");
+    strcat(trace_str, GRPC_G(grpc_trace));
+    putenv(trace_str);
+  }
+}
+
+static void custom_logger(gpr_log_func_args* args) {
+  TSRMLS_FETCH();
+
+  const char* final_slash;
+  const char* display_file;
+  char* prefix;
+  char* final;
+  gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
+
+  final_slash = strrchr(args->file, '/');
+  if (final_slash) {
+    display_file = final_slash + 1;
+  } else {
+    display_file = args->file;
+  }
+
+  FILE *fp = fopen(GRPC_G(log_filename), "ab");
+  if (!fp) {
+    return;
+  }
+
+  gpr_asprintf(&prefix, "%s%" PRId64 ".%09" PRId32 " %s:%d]",
+               gpr_log_severity_string(args->severity), now.tv_sec,
+               now.tv_nsec, display_file, args->line);
+
+  gpr_asprintf(&final, "%-60s %s\n", prefix, args->message);
+
+  fprintf(fp, "%s", final);
+  fclose(fp);
+  gpr_free(prefix);
+  gpr_free(final);
 }
 
 /* {{{ PHP_MINIT_FUNCTION
@@ -397,6 +456,9 @@ PHP_MINFO_FUNCTION(grpc) {
 PHP_RINIT_FUNCTION(grpc) {
   if (!GRPC_G(initialized)) {
     apply_ini_settings(TSRMLS_C);
+    if (GRPC_G(log_filename)) {
+      gpr_set_log_function(custom_logger);
+    }
     grpc_init();
     register_fork_handlers();
     grpc_php_init_completion_queue(TSRMLS_C);
@@ -412,6 +474,9 @@ static PHP_GINIT_FUNCTION(grpc) {
   grpc_globals->initialized = 0;
   grpc_globals->enable_fork_support = 0;
   grpc_globals->poll_strategy = NULL;
+  grpc_globals->grpc_verbosity = NULL;
+  grpc_globals->grpc_trace = NULL;
+  grpc_globals->log_filename = NULL;
 }
 /* }}} */
 

+ 3 - 0
src/php/ext/grpc/php_grpc.h

@@ -68,6 +68,9 @@ ZEND_BEGIN_MODULE_GLOBALS(grpc)
   zend_bool initialized;
   zend_bool enable_fork_support;
   char *poll_strategy;
+  char *grpc_verbosity;
+  char *grpc_trace;
+  char *log_filename;
 ZEND_END_MODULE_GLOBALS(grpc)
 
 ZEND_EXTERN_MODULE_GLOBALS(grpc);

+ 6 - 4
src/php/lib/Grpc/BaseStub.php

@@ -43,10 +43,12 @@ class BaseStub
      */
     public function __construct($hostname, $opts, $channel = null)
     {
-        $ssl_roots = file_get_contents(
-            dirname(__FILE__).'/../../../../etc/roots.pem'
-        );
-        ChannelCredentials::setDefaultRootsPem($ssl_roots);
+        if (!ChannelCredentials::isDefaultRootsPemSet()) {
+            $ssl_roots = file_get_contents(
+                dirname(__FILE__).'/../../../../etc/roots.pem'
+            );
+            ChannelCredentials::setDefaultRootsPem($ssl_roots);
+        }
 
         $this->hostname = $hostname;
         $this->update_metadata = null;

+ 10 - 0
src/php/tests/unit_tests/ChannelCredentialsTest.php

@@ -46,6 +46,16 @@ class ChanellCredentialsTest extends PHPUnit_Framework_TestCase
         $this->assertNull($channel_credentials);
     }
 
+    public function testDefaultRootsPem()
+    {
+        Grpc\ChannelCredentials::setDefaultRootsPem("Pem-Content-Not-Verified");
+        $this->assertTrue(Grpc\ChannelCredentials::isDefaultRootsPemSet());
+        Grpc\ChannelCredentials::invalidateDefaultRootsPem();
+        $this->assertFalse(Grpc\ChannelCredentials::isDefaultRootsPemSet());
+        Grpc\ChannelCredentials::setDefaultRootsPem("Content-Not-Verified");
+        $this->assertTrue(Grpc\ChannelCredentials::isDefaultRootsPemSet());
+    }
+
     /**
      * @expectedException InvalidArgumentException
      */

+ 31 - 0
src/proto/grpc/http_over_grpc/BUILD

@@ -0,0 +1,31 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//bazel:grpc_build_system.bzl", "grpc_package", "grpc_proto_library")
+
+licenses(["notice"])  # Apache v2
+
+grpc_package(
+    name = "http_over_grpc",
+    visibility = "public",
+)
+
+grpc_proto_library(
+    name = "http_over_grpc_proto",
+    srcs = [
+        "http_over_grpc.proto",
+    ],
+    has_services = True,
+    well_known_protos = True,
+)

+ 51 - 0
src/proto/grpc/http_over_grpc/http_over_grpc.proto

@@ -0,0 +1,51 @@
+// Copyright 2019 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.http_over_grpc;
+
+// Represents HTTP 1.1 header.
+message Header {
+  string key = 1;
+  repeated string values = 2;
+}
+
+// An HTTP 1.1 request encapsulated in a gRPC.
+message HTTPOverGRPCRequest {
+  // The HTTP request method.
+  string method = 1;
+  // The HTTP request URL.
+  string url = 2;
+  // The HTTP request headers.
+  repeated Header headers = 3;
+  // HTTP request body.
+  bytes body = 4;
+}
+
+// An HTTP 1.1 reply encapsulated in an RPC.
+message HTTPOverGRPCReply {
+  // The HTTP status code (e.g. 200, 400, 404).
+  int32 status = 1;
+  // The HTTP response headers.
+  repeated Header headers = 2;
+  // The HTTP response body.
+  bytes body = 3;
+}
+
+// Currently does not support HTTP chunked transfer encoding.
+service HTTPOverGRPC {
+ // Perform the given HTTP request.
+ rpc HTTPRequest(HTTPOverGRPCRequest) returns (HTTPOverGRPCReply) {}
+}

+ 11 - 6
src/proto/grpc/testing/control.proto

@@ -220,17 +220,22 @@ message Scenarios {
 // once the scenario has finished.
 message ScenarioResultSummary
 {
-  // Total number of operations per second over all clients.
+  // Total number of operations per second over all clients. What is counted as 1 'operation' depends on the benchmark scenarios:
+  // For unary benchmarks, an operation is processing of a single unary RPC. 
+  // For streaming benchmarks, an operation is processing of a single ping pong of request and response. 
   double qps = 1;
-  // QPS per one server core.
+  // QPS per server core.
   double qps_per_server_core = 2;
-  // server load based on system_time (0.85 => 85%)
+  // The total server cpu load based on system time across all server processes, expressed as percentage of a single cpu core.
+  // For example, 85 implies 85% of a cpu core, 125 implies 125% of a cpu core. Since we are accumulating the cpu load across all the server 
+  // processes, the value could > 100 when there are multiple servers or a single server using multiple threads and cores. 
+  // Same explanation for the total client cpu load below.
   double server_system_time = 3;
-  // server load based on user_time (0.85 => 85%)
+  // The total server cpu load based on user time across all server processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
   double server_user_time = 4;
-  // client load based on system_time (0.85 => 85%)
+  // The total client cpu load based on system time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
   double client_system_time = 5;
-  // client load based on user_time (0.85 => 85%)
+  // The total client cpu load based on user time across all client processes, expressed as percentage of a single cpu core. (85 => 85%, 125 => 125%)
   double client_user_time = 6;
 
   // X% latency percentiles (in nanoseconds)

+ 1 - 0
src/python/grpcio/commands.py

@@ -217,6 +217,7 @@ class BuildExt(build_ext.build_ext):
             """Test if default compiler is okay with specifying c++ version
             when invoked in C mode. GCC is okay with this, while clang is not.
             """
+            # TODO(lidiz) Remove the generated a.out for success tests.
             cc_test = subprocess.Popen(
                 ['cc', '-x', 'c', '-std=c++11', '-'],
                 stdin=subprocess.PIPE,

+ 1 - 2
src/python/grpcio/grpc_core_dependencies.py

@@ -24,7 +24,6 @@ CORE_SOURCE_FILES = [
     'src/core/ext/filters/client_channel/client_channel_channelz.cc',
     'src/core/ext/filters/client_channel/client_channel_factory.cc',
     'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-    'src/core/ext/filters/client_channel/connector.cc',
     'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
     'src/core/ext/filters/client_channel/health/health_check_client.cc',
     'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
@@ -42,7 +41,6 @@ CORE_SOURCE_FILES = [
     'src/core/ext/filters/client_channel/lb_policy_registry.cc',
     'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
     'src/core/ext/filters/client_channel/parse_address.cc',
-    'src/core/ext/filters/client_channel/proxy_mapper.cc',
     'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
     'src/core/ext/filters/client_channel/resolver.cc',
     'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
@@ -261,6 +259,7 @@ CORE_SOURCE_FILES = [
     'src/core/lib/iomgr/is_epollexclusive_available.cc',
     'src/core/lib/iomgr/load_file.cc',
     'src/core/lib/iomgr/lockfree_event.cc',
+    'src/core/lib/iomgr/logical_thread.cc',
     'src/core/lib/iomgr/poller/eventmanager_libuv.cc',
     'src/core/lib/iomgr/polling_entity.cc',
     'src/core/lib/iomgr/pollset.cc',

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно