Răsfoiți Sursa

Merge branch 'master' into updatefiles

Jan Tattermusch 6 ani în urmă
părinte
comite
18856a87fc
100 a modificat fișierele cu 3015 adăugiri și 1719 ștergeri
  1. 2 0
      BUILD
  2. 41 0
      CMakeLists.txt
  3. 39 0
      Makefile
  4. 1 1
      bazel/grpc_build_system.bzl
  5. 17 0
      build.yaml
  6. 2 0
      config.m4
  7. 2 0
      config.w32
  8. 38 0
      doc/interop-test-descriptions.md
  9. 4 1
      gRPC-C++.podspec
  10. 6 1
      gRPC-Core.podspec
  11. 1 0
      gRPC-ProtoRPC.podspec
  12. 1 0
      gRPC-RxLibrary.podspec
  13. 1 0
      gRPC.podspec
  14. 9 0
      grpc.def
  15. 2 0
      grpc.gemspec
  16. 1 0
      grpc.gyp
  17. 195 0
      include/grpc/grpc_security.h
  18. 4 0
      include/grpc/impl/codegen/grpc_types.h
  19. 31 0
      include/grpc/impl/codegen/port_platform.h
  20. 1 1
      include/grpc/impl/codegen/slice.h
  21. 9 0
      include/grpcpp/impl/codegen/call_op_set.h
  22. 12 1
      include/grpcpp/impl/codegen/completion_queue.h
  23. 1 0
      include/grpcpp/impl/codegen/core_codegen.h
  24. 1 0
      include/grpcpp/impl/codegen/core_codegen_interface.h
  25. 23 4
      include/grpcpp/impl/codegen/interceptor.h
  26. 23 4
      include/grpcpp/impl/codegen/interceptor_common.h
  27. 21 2
      include/grpcpp/server.h
  28. 2 0
      package.xml
  29. 25 5
      src/compiler/csharp_generator.cc
  30. 45 69
      src/core/ext/filters/client_channel/client_channel.cc
  31. 2 2
      src/core/ext/filters/client_channel/client_channel.h
  32. 5 6
      src/core/ext/filters/client_channel/client_channel_channelz.cc
  33. 5 4
      src/core/ext/filters/client_channel/client_channel_channelz.h
  34. 1 1
      src/core/ext/filters/client_channel/client_channel_factory.cc
  35. 3 3
      src/core/ext/filters/client_channel/client_channel_factory.h
  36. 9 10
      src/core/ext/filters/client_channel/global_subchannel_pool.cc
  37. 3 3
      src/core/ext/filters/client_channel/global_subchannel_pool.h
  38. 10 8
      src/core/ext/filters/client_channel/health/health_check_client.cc
  39. 1 1
      src/core/ext/filters/client_channel/health/health_check_client.h
  40. 29 2
      src/core/ext/filters/client_channel/lb_policy.cc
  41. 10 6
      src/core/ext/filters/client_channel/lb_policy.h
  42. 10 10
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  43. 5 5
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  44. 5 5
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  45. 20 21
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  46. 80 13
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  47. 6 1
      src/core/ext/filters/client_channel/lb_policy_factory.h
  48. 2 2
      src/core/ext/filters/client_channel/lb_policy_registry.cc
  49. 1 1
      src/core/ext/filters/client_channel/lb_policy_registry.h
  50. 7 7
      src/core/ext/filters/client_channel/local_subchannel_pool.cc
  51. 3 3
      src/core/ext/filters/client_channel/local_subchannel_pool.h
  52. 1 1
      src/core/ext/filters/client_channel/request_routing.cc
  53. 1 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  54. 2 2
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  55. 7 35
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  56. 686 770
      src/core/ext/filters/client_channel/subchannel.cc
  57. 220 111
      src/core/ext/filters/client_channel/subchannel.h
  58. 5 5
      src/core/ext/filters/client_channel/subchannel_pool_interface.h
  59. 2 1
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  60. 2 2
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  61. 4 3
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
  62. 5 0
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  63. 1 1
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  64. 8 5
      src/core/ext/transport/chttp2/transport/writing.cc
  65. 2 2
      src/core/lib/gprpp/optional.h
  66. 3 1
      src/core/lib/iomgr/buffer_list.h
  67. 138 162
      src/core/lib/iomgr/ev_epollex_linux.cc
  68. 1 0
      src/core/lib/iomgr/exec_ctx.cc
  69. 59 3
      src/core/lib/iomgr/exec_ctx.h
  70. 7 0
      src/core/lib/iomgr/executor.cc
  71. 25 25
      src/core/lib/iomgr/tcp_posix.cc
  72. 7 0
      src/core/lib/iomgr/timer_manager.cc
  73. 1 1
      src/core/lib/security/credentials/alts/alts_credentials.cc
  74. 2 2
      src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc
  75. 1 1
      src/core/lib/security/credentials/credentials.h
  76. 1 1
      src/core/lib/security/credentials/google_default/google_default_credentials.cc
  77. 192 0
      src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
  78. 213 0
      src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
  79. 33 0
      src/core/lib/security/security_connector/ssl_utils.h
  80. 5 1
      src/core/lib/surface/call.cc
  81. 4 7
      src/core/lib/surface/completion_queue.cc
  82. 2 0
      src/core/lib/surface/init.cc
  83. 16 15
      src/core/lib/surface/server.cc
  84. 279 279
      src/core/lib/transport/static_metadata.cc
  85. 4 2
      src/core/lib/transport/transport.cc
  86. 10 4
      src/core/tsi/ssl_transport_security.cc
  87. 3 0
      src/cpp/common/alarm.cc
  88. 0 8
      src/cpp/common/completion_queue_cc.cc
  89. 4 0
      src/cpp/common/core_codegen.cc
  90. 2 2
      src/cpp/server/load_reporter/load_reporter_async_service_impl.cc
  91. 149 59
      src/cpp/server/server_cc.cc
  92. 2 0
      src/csharp/Grpc.Core.Api/.gitignore
  93. 0 1
      src/csharp/Grpc.Core.Api/AuthContext.cs
  94. 3 2
      src/csharp/Grpc.Core.Api/AuthProperty.cs
  95. 59 0
      src/csharp/Grpc.Core.Api/ContextPropagationOptions.cs
  96. 35 0
      src/csharp/Grpc.Core.Api/ContextPropagationToken.cs
  97. 0 0
      src/csharp/Grpc.Core.Api/DeserializationContext.cs
  98. 32 0
      src/csharp/Grpc.Core.Api/Grpc.Core.Api.csproj
  99. 0 0
      src/csharp/Grpc.Core.Api/IAsyncStreamReader.cs
  100. 0 0
      src/csharp/Grpc.Core.Api/IAsyncStreamWriter.cs

+ 2 - 0
BUILD

@@ -1609,6 +1609,7 @@ grpc_cc_library(
         "src/core/lib/security/credentials/oauth2/oauth2_credentials.cc",
         "src/core/lib/security/credentials/plugin/plugin_credentials.cc",
         "src/core/lib/security/credentials/ssl/ssl_credentials.cc",
+        "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc",
         "src/core/lib/security/security_connector/alts/alts_security_connector.cc",
         "src/core/lib/security/security_connector/fake/fake_security_connector.cc",
         "src/core/lib/security/security_connector/load_system_roots_fallback.cc",
@@ -1643,6 +1644,7 @@ grpc_cc_library(
         "src/core/lib/security/credentials/oauth2/oauth2_credentials.h",
         "src/core/lib/security/credentials/plugin/plugin_credentials.h",
         "src/core/lib/security/credentials/ssl/ssl_credentials.h",
+        "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h",
         "src/core/lib/security/security_connector/alts/alts_security_connector.h",
         "src/core/lib/security/security_connector/fake/fake_security_connector.h",
         "src/core/lib/security/security_connector/load_system_roots.h",

+ 41 - 0
CMakeLists.txt

@@ -257,6 +257,9 @@ add_dependencies(buildtests_c channel_create_test)
 add_dependencies(buildtests_c chttp2_hpack_encoder_test)
 add_dependencies(buildtests_c chttp2_stream_map_test)
 add_dependencies(buildtests_c chttp2_varint_test)
+if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
+add_dependencies(buildtests_c close_fd_test)
+endif()
 add_dependencies(buildtests_c cmdline_test)
 add_dependencies(buildtests_c combiner_test)
 add_dependencies(buildtests_c compression_test)
@@ -1151,6 +1154,7 @@ add_library(grpc
   src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
   src/core/lib/security/credentials/plugin/plugin_credentials.cc
   src/core/lib/security/credentials/ssl/ssl_credentials.cc
+  src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
   src/core/lib/security/security_connector/alts/alts_security_connector.cc
   src/core/lib/security/security_connector/fake/fake_security_connector.cc
   src/core/lib/security/security_connector/load_system_roots_fallback.cc
@@ -1609,6 +1613,7 @@ add_library(grpc_cronet
   src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
   src/core/lib/security/credentials/plugin/plugin_credentials.cc
   src/core/lib/security/credentials/ssl/ssl_credentials.cc
+  src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
   src/core/lib/security/security_connector/alts/alts_security_connector.cc
   src/core/lib/security/security_connector/fake/fake_security_connector.cc
   src/core/lib/security/security_connector/load_system_roots_fallback.cc
@@ -6300,6 +6305,42 @@ target_link_libraries(chttp2_varint_test
 
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
+if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
+
+add_executable(close_fd_test
+  test/core/bad_connection/close_fd_test.cc
+)
+
+
+target_include_directories(close_fd_test
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${_gRPC_PROTOBUF_INCLUDE_DIR}
+  PRIVATE ${_gRPC_ZLIB_INCLUDE_DIR}
+  PRIVATE ${_gRPC_BENCHMARK_INCLUDE_DIR}
+  PRIVATE ${_gRPC_CARES_INCLUDE_DIR}
+  PRIVATE ${_gRPC_GFLAGS_INCLUDE_DIR}
+  PRIVATE ${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
+  PRIVATE ${_gRPC_NANOPB_INCLUDE_DIR}
+)
+
+target_link_libraries(close_fd_test
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  grpc
+  gpr
+)
+
+  # avoid dependency on libstdc++
+  if (_gRPC_CORE_NOSTDCXX_FLAGS)
+    set_target_properties(close_fd_test PROPERTIES LINKER_LANGUAGE C)
+    target_compile_options(close_fd_test PRIVATE $<$<COMPILE_LANGUAGE:CXX>:${_gRPC_CORE_NOSTDCXX_FLAGS}>)
+  endif()
+
+endif()
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
 
 add_executable(cmdline_test
   test/core/util/cmdline_test.cc

+ 39 - 0
Makefile

@@ -986,6 +986,7 @@ chttp2_hpack_encoder_test: $(BINDIR)/$(CONFIG)/chttp2_hpack_encoder_test
 chttp2_stream_map_test: $(BINDIR)/$(CONFIG)/chttp2_stream_map_test
 chttp2_varint_test: $(BINDIR)/$(CONFIG)/chttp2_varint_test
 client_fuzzer: $(BINDIR)/$(CONFIG)/client_fuzzer
+close_fd_test: $(BINDIR)/$(CONFIG)/close_fd_test
 cmdline_test: $(BINDIR)/$(CONFIG)/cmdline_test
 combiner_test: $(BINDIR)/$(CONFIG)/combiner_test
 compression_test: $(BINDIR)/$(CONFIG)/compression_test
@@ -1450,6 +1451,7 @@ buildtests_c: privatelibs_c \
   $(BINDIR)/$(CONFIG)/chttp2_hpack_encoder_test \
   $(BINDIR)/$(CONFIG)/chttp2_stream_map_test \
   $(BINDIR)/$(CONFIG)/chttp2_varint_test \
+  $(BINDIR)/$(CONFIG)/close_fd_test \
   $(BINDIR)/$(CONFIG)/cmdline_test \
   $(BINDIR)/$(CONFIG)/combiner_test \
   $(BINDIR)/$(CONFIG)/compression_test \
@@ -1988,6 +1990,8 @@ test_c: buildtests_c
 	$(Q) $(BINDIR)/$(CONFIG)/chttp2_stream_map_test || ( echo test chttp2_stream_map_test failed ; exit 1 )
 	$(E) "[RUN]     Testing chttp2_varint_test"
 	$(Q) $(BINDIR)/$(CONFIG)/chttp2_varint_test || ( echo test chttp2_varint_test failed ; exit 1 )
+	$(E) "[RUN]     Testing close_fd_test"
+	$(Q) $(BINDIR)/$(CONFIG)/close_fd_test || ( echo test close_fd_test failed ; exit 1 )
 	$(E) "[RUN]     Testing cmdline_test"
 	$(Q) $(BINDIR)/$(CONFIG)/cmdline_test || ( echo test cmdline_test failed ; exit 1 )
 	$(E) "[RUN]     Testing combiner_test"
@@ -3672,6 +3676,7 @@ LIBGRPC_SRC = \
     src/core/lib/security/credentials/oauth2/oauth2_credentials.cc \
     src/core/lib/security/credentials/plugin/plugin_credentials.cc \
     src/core/lib/security/credentials/ssl/ssl_credentials.cc \
+    src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
     src/core/lib/security/security_connector/alts/alts_security_connector.cc \
     src/core/lib/security/security_connector/fake/fake_security_connector.cc \
     src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@@ -4124,6 +4129,7 @@ LIBGRPC_CRONET_SRC = \
     src/core/lib/security/credentials/oauth2/oauth2_credentials.cc \
     src/core/lib/security/credentials/plugin/plugin_credentials.cc \
     src/core/lib/security/credentials/ssl/ssl_credentials.cc \
+    src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
     src/core/lib/security/security_connector/alts/alts_security_connector.cc \
     src/core/lib/security/security_connector/fake/fake_security_connector.cc \
     src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@@ -11121,6 +11127,38 @@ endif
 endif
 
 
+CLOSE_FD_TEST_SRC = \
+    test/core/bad_connection/close_fd_test.cc \
+
+CLOSE_FD_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(CLOSE_FD_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/close_fd_test: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/close_fd_test: $(CLOSE_FD_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(CLOSE_FD_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/close_fd_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/bad_connection/close_fd_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_close_fd_test: $(CLOSE_FD_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(CLOSE_FD_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 CMDLINE_TEST_SRC = \
     test/core/util/cmdline_test.cc \
 
@@ -25370,6 +25408,7 @@ src/core/lib/security/credentials/local/local_credentials.cc: $(OPENSSL_DEP)
 src/core/lib/security/credentials/oauth2/oauth2_credentials.cc: $(OPENSSL_DEP)
 src/core/lib/security/credentials/plugin/plugin_credentials.cc: $(OPENSSL_DEP)
 src/core/lib/security/credentials/ssl/ssl_credentials.cc: $(OPENSSL_DEP)
+src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc: $(OPENSSL_DEP)
 src/core/lib/security/security_connector/alts/alts_security_connector.cc: $(OPENSSL_DEP)
 src/core/lib/security/security_connector/fake/fake_security_connector.cc: $(OPENSSL_DEP)
 src/core/lib/security/security_connector/load_system_roots_fallback.cc: $(OPENSSL_DEP)

+ 1 - 1
bazel/grpc_build_system.bzl

@@ -132,7 +132,7 @@ def grpc_proto_library(
         generate_mocks = generate_mocks,
     )
 
-def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = "moderate", tags = [], exec_compatible_with = []):
+def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = []):
     copts = []
     if language.upper() == "C":
         copts = if_not_windows(["-std=c99"])

+ 17 - 0
build.yaml

@@ -837,6 +837,7 @@ filegroups:
   - src/core/lib/security/credentials/oauth2/oauth2_credentials.h
   - src/core/lib/security/credentials/plugin/plugin_credentials.h
   - src/core/lib/security/credentials/ssl/ssl_credentials.h
+  - src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h
   - src/core/lib/security/security_connector/alts/alts_security_connector.h
   - src/core/lib/security/security_connector/fake/fake_security_connector.h
   - src/core/lib/security/security_connector/load_system_roots.h
@@ -869,6 +870,7 @@ filegroups:
   - src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
   - src/core/lib/security/credentials/plugin/plugin_credentials.cc
   - src/core/lib/security/credentials/ssl/ssl_credentials.cc
+  - src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
   - src/core/lib/security/security_connector/alts/alts_security_connector.cc
   - src/core/lib/security/security_connector/fake/fake_security_connector.cc
   - src/core/lib/security/security_connector/load_system_roots_fallback.cc
@@ -2244,6 +2246,21 @@ targets:
   - test/core/end2end/fuzzers/client_fuzzer_corpus
   dict: test/core/end2end/fuzzers/hpack.dictionary
   maxlen: 2048
+- name: close_fd_test
+  build: test
+  language: c
+  src:
+  - test/core/bad_connection/close_fd_test.cc
+  deps:
+  - grpc_test_util
+  - grpc
+  - gpr
+  exclude_configs:
+  - tsan
+  platforms:
+  - mac
+  - linux
+  - posix
 - name: cmdline_test
   build: test
   language: c

+ 2 - 0
config.m4

@@ -283,6 +283,7 @@ if test "$PHP_GRPC" != "no"; then
     src/core/lib/security/credentials/oauth2/oauth2_credentials.cc \
     src/core/lib/security/credentials/plugin/plugin_credentials.cc \
     src/core/lib/security/credentials/ssl/ssl_credentials.cc \
+    src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc \
     src/core/lib/security/security_connector/alts/alts_security_connector.cc \
     src/core/lib/security/security_connector/fake/fake_security_connector.cc \
     src/core/lib/security/security_connector/load_system_roots_fallback.cc \
@@ -728,6 +729,7 @@ if test "$PHP_GRPC" != "no"; then
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/credentials/oauth2)
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/credentials/plugin)
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/credentials/ssl)
+  PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/credentials/tls)
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector)
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/alts)
   PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/security/security_connector/fake)

+ 2 - 0
config.w32

@@ -258,6 +258,7 @@ if (PHP_GRPC != "no") {
     "src\\core\\lib\\security\\credentials\\oauth2\\oauth2_credentials.cc " +
     "src\\core\\lib\\security\\credentials\\plugin\\plugin_credentials.cc " +
     "src\\core\\lib\\security\\credentials\\ssl\\ssl_credentials.cc " +
+    "src\\core\\lib\\security\\credentials\\tls\\grpc_tls_credentials_options.cc " +
     "src\\core\\lib\\security\\security_connector\\alts\\alts_security_connector.cc " +
     "src\\core\\lib\\security\\security_connector\\fake\\fake_security_connector.cc " +
     "src\\core\\lib\\security\\security_connector\\load_system_roots_fallback.cc " +
@@ -743,6 +744,7 @@ if (PHP_GRPC != "no") {
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\credentials\\oauth2");
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\credentials\\plugin");
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\credentials\\ssl");
+  FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\credentials\\tls");
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector");
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\alts");
   FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\security\\security_connector\\fake");

+ 38 - 0
doc/interop-test-descriptions.md

@@ -679,6 +679,44 @@ Client asserts:
 by the auth library. The client can optionally check the username matches the
 email address in the key file.
 
+### google_default_credentials
+
+Similar to the other auth tests, this test should only be run against prod
+servers. Different from some of the other auth tests however, this test
+may be also run from outside of GCP.
+
+This test verifies unary calls succeed when the client uses
+GoogleDefaultCredentials. The path to a service account key file in the
+GOOGLE_APPLICATION_CREDENTIALS environment variable may or may not be
+provided by the test runner. For example, the test runner might set
+this environment when outside of GCP but keep it unset when on GCP.
+
+The test uses `--default_service_account` with GCE service account email.
+
+Server features:
+* [UnaryCall][]
+* [Echo Authenticated Username][]
+
+Procedure:
+ 1. Client configures the channel to use GoogleDefaultCredentials
+     * Note: the term `GoogleDefaultCredentials` within the context
+       of this test description refers to an API which encapsulates
+       both "transport credentials" and "call credentials" and which
+       is capable of transport creds auto-selection (including ALTS).
+       Similar APIs involving only auto-selection of OAuth mechanisms
+       might work for this test but aren't the intended subjects.
+ 2. Client calls UnaryCall with:
+
+    ```
+    {
+      fill_username: true
+    }
+    ```
+
+Client asserts:
+* call was successful
+* received SimpleResponse.username matches the value of
+  `--default_service_account`
 
 ### custom_metadata
 

+ 4 - 1
gRPC-C++.podspec

@@ -24,7 +24,7 @@ Pod::Spec.new do |s|
   s.name     = 'gRPC-C++'
   # TODO (mxyan): use version that match gRPC version when pod is stabilized
   # version = '1.19.0-dev'
-  version = '0.0.6-dev'
+  version = '0.0.8-dev'
   s.version  = version
   s.summary  = 'gRPC C++ library'
   s.homepage = 'https://grpc.io'
@@ -40,6 +40,8 @@ Pod::Spec.new do |s|
 
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
+  s.tvos.deployment_target = '10.0'
+
   s.requires_arc = false
 
   name = 'grpcpp'
@@ -298,6 +300,7 @@ Pod::Spec.new do |s|
                       'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
                       'src/core/lib/security/credentials/plugin/plugin_credentials.h',
                       'src/core/lib/security/credentials/ssl/ssl_credentials.h',
+                      'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
                       'src/core/lib/security/security_connector/alts/alts_security_connector.h',
                       'src/core/lib/security/security_connector/fake/fake_security_connector.h',
                       'src/core/lib/security/security_connector/load_system_roots.h',

+ 6 - 1
gRPC-Core.podspec

@@ -40,6 +40,8 @@ Pod::Spec.new do |s|
 
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
+  s.tvos.deployment_target = '10.0'
+  
   s.requires_arc = false
 
   name = 'grpc'
@@ -181,7 +183,7 @@ Pod::Spec.new do |s|
     ss.header_mappings_dir = '.'
     ss.libraries = 'z'
     ss.dependency "#{s.name}/Interface", version
-    ss.dependency 'BoringSSL-GRPC', '0.0.2'
+    ss.dependency 'BoringSSL-GRPC', '0.0.3'
     ss.dependency 'nanopb', '~> 0.3'
     ss.compiler_flags = '-DGRPC_SHADOW_BORINGSSL_SYMBOLS'
 
@@ -292,6 +294,7 @@ Pod::Spec.new do |s|
                       'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
                       'src/core/lib/security/credentials/plugin/plugin_credentials.h',
                       'src/core/lib/security/credentials/ssl/ssl_credentials.h',
+                      'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
                       'src/core/lib/security/security_connector/alts/alts_security_connector.h',
                       'src/core/lib/security/security_connector/fake/fake_security_connector.h',
                       'src/core/lib/security/security_connector/load_system_roots.h',
@@ -729,6 +732,7 @@ Pod::Spec.new do |s|
                       'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
                       'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
                       'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
+                      'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
                       'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
                       'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
                       'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
@@ -921,6 +925,7 @@ Pod::Spec.new do |s|
                               'src/core/lib/security/credentials/oauth2/oauth2_credentials.h',
                               'src/core/lib/security/credentials/plugin/plugin_credentials.h',
                               'src/core/lib/security/credentials/ssl/ssl_credentials.h',
+                              'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h',
                               'src/core/lib/security/security_connector/alts/alts_security_connector.h',
                               'src/core/lib/security/security_connector/fake/fake_security_connector.h',
                               'src/core/lib/security/security_connector/load_system_roots.h',

+ 1 - 0
gRPC-ProtoRPC.podspec

@@ -35,6 +35,7 @@ Pod::Spec.new do |s|
 
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
+  s.tvos.deployment_target = '10.0'
 
   name = 'ProtoRPC'
   s.module_name = name

+ 1 - 0
gRPC-RxLibrary.podspec

@@ -35,6 +35,7 @@ Pod::Spec.new do |s|
 
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
+  s.tvos.deployment_target = '10.0'
 
   name = 'RxLibrary'
   s.module_name = name

+ 1 - 0
gRPC.podspec

@@ -34,6 +34,7 @@ Pod::Spec.new do |s|
 
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
+  s.tvos.deployment_target = '10.0'
 
   name = 'GRPCClient'
   s.module_name = name

+ 9 - 0
grpc.def

@@ -131,6 +131,15 @@ EXPORTS
     grpc_alts_server_credentials_create
     grpc_local_credentials_create
     grpc_local_server_credentials_create
+    grpc_tls_credentials_options_create
+    grpc_tls_credentials_options_set_cert_request_type
+    grpc_tls_credentials_options_set_key_materials_config
+    grpc_tls_credentials_options_set_credential_reload_config
+    grpc_tls_credentials_options_set_server_authorization_check_config
+    grpc_tls_key_materials_config_create
+    grpc_tls_key_materials_config_set_key_materials
+    grpc_tls_credential_reload_config_create
+    grpc_tls_server_authorization_check_config_create
     grpc_raw_byte_buffer_create
     grpc_raw_compressed_byte_buffer_create
     grpc_byte_buffer_copy

+ 2 - 0
grpc.gemspec

@@ -224,6 +224,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/security/credentials/oauth2/oauth2_credentials.h )
   s.files += %w( src/core/lib/security/credentials/plugin/plugin_credentials.h )
   s.files += %w( src/core/lib/security/credentials/ssl/ssl_credentials.h )
+  s.files += %w( src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h )
   s.files += %w( src/core/lib/security/security_connector/alts/alts_security_connector.h )
   s.files += %w( src/core/lib/security/security_connector/fake/fake_security_connector.h )
   s.files += %w( src/core/lib/security/security_connector/load_system_roots.h )
@@ -665,6 +666,7 @@ Gem::Specification.new do |s|
   s.files += %w( src/core/lib/security/credentials/oauth2/oauth2_credentials.cc )
   s.files += %w( src/core/lib/security/credentials/plugin/plugin_credentials.cc )
   s.files += %w( src/core/lib/security/credentials/ssl/ssl_credentials.cc )
+  s.files += %w( src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc )
   s.files += %w( src/core/lib/security/security_connector/alts/alts_security_connector.cc )
   s.files += %w( src/core/lib/security/security_connector/fake/fake_security_connector.cc )
   s.files += %w( src/core/lib/security/security_connector/load_system_roots_fallback.cc )

+ 1 - 0
grpc.gyp

@@ -465,6 +465,7 @@
         'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
         'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
         'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
+        'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
         'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
         'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
         'src/core/lib/security/security_connector/load_system_roots_fallback.cc',

+ 195 - 0
include/grpc/grpc_security.h

@@ -609,6 +609,201 @@ GRPCAPI grpc_channel_credentials* grpc_local_credentials_create(
 GRPCAPI grpc_server_credentials* grpc_local_server_credentials_create(
     grpc_local_connect_type type);
 
+/** --- SPIFFE and HTTPS-based TLS channel/server credentials ---
+ * It is used for experimental purpose for now and subject to change. */
+
+/** Config for TLS key materials. It is used for
+ *  experimental purpose for now and subject to change. */
+typedef struct grpc_tls_key_materials_config grpc_tls_key_materials_config;
+
+/** Config for TLS credential reload. It is used for
+ *  experimental purpose for now and subject to change. */
+typedef struct grpc_tls_credential_reload_config
+    grpc_tls_credential_reload_config;
+
+/** Config for TLS server authorization check. It is used for
+ *  experimental purpose for now and subject to change. */
+typedef struct grpc_tls_server_authorization_check_config
+    grpc_tls_server_authorization_check_config;
+
+/** TLS credentials options. It is used for
+ *  experimental purpose for now and subject to change. */
+typedef struct grpc_tls_credentials_options grpc_tls_credentials_options;
+
+/** Create an empty TLS credentials options. It is used for
+ *  experimental purpose for now and subject to change. */
+GRPCAPI grpc_tls_credentials_options* grpc_tls_credentials_options_create();
+
+/** Set grpc_ssl_client_certificate_request_type field in credentials options
+    with the provided type. options should not be NULL.
+    It returns 1 on success and 0 on failure. It is used for
+    experimental purpose for now and subject to change. */
+GRPCAPI int grpc_tls_credentials_options_set_cert_request_type(
+    grpc_tls_credentials_options* options,
+    grpc_ssl_client_certificate_request_type type);
+
+/** Set grpc_tls_key_materials_config field in credentials options
+    with the provided config struct whose ownership is transferred.
+    Both parameters should not be NULL.
+    It returns 1 on success and 0 on failure. It is used for
+    experimental purpose for now and subject to change. */
+GRPCAPI int grpc_tls_credentials_options_set_key_materials_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_key_materials_config* config);
+
+/** Set grpc_tls_credential_reload_config field in credentials options
+    with the provided config struct whose ownership is transferred.
+    Both parameters should not be NULL.
+    It returns 1 on success and 0 on failure. It is used for
+    experimental purpose for now and subject to change. */
+GRPCAPI int grpc_tls_credentials_options_set_credential_reload_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_credential_reload_config* config);
+
+/** Set grpc_tls_server_authorization_check_config field in credentials options
+    with the provided config struct whose ownership is transferred.
+    Both parameters should not be NULL.
+    It returns 1 on success and 0 on failure. It is used for
+    experimental purpose for now and subject to change. */
+GRPCAPI int grpc_tls_credentials_options_set_server_authorization_check_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_server_authorization_check_config* config);
+
+/** --- TLS key materials config. ---
+    It is used for experimental purpose for now and subject to change. */
+
+/** Create an empty grpc_tls_key_materials_config instance.
+ *  It is used for experimental purpose for now and subject to change. */
+GRPCAPI grpc_tls_key_materials_config* grpc_tls_key_materials_config_create();
+
+/** Set grpc_tls_key_materials_config instance with provided a TLS certificate.
+    config will take the ownership of pem_root_certs and pem_key_cert_pairs.
+    It's valid for the caller to provide nullptr pem_root_certs, in which case
+    the gRPC-provided root cert will be used. pem_key_cert_pairs should not be
+    NULL. It returns 1 on success and 0 on failure. It is used for
+    experimental purpose for now and subject to change.
+ */
+GRPCAPI int grpc_tls_key_materials_config_set_key_materials(
+    grpc_tls_key_materials_config* config, const char* pem_root_certs,
+    const grpc_ssl_pem_key_cert_pair** pem_key_cert_pairs,
+    size_t num_key_cert_pairs);
+
+/** --- TLS credential reload config. ---
+    It is used for experimental purpose for now and subject to change.*/
+
+typedef struct grpc_tls_credential_reload_arg grpc_tls_credential_reload_arg;
+
+/** A callback function provided by gRPC to handle the result of credential
+    reload. It is used when schedule API is implemented asynchronously and
+    serves to bring the control back to grpc C core. It is used for
+    experimental purpose for now and subject to change. */
+typedef void (*grpc_tls_on_credential_reload_done_cb)(
+    grpc_tls_credential_reload_arg* arg);
+
+/** A struct containing all information necessary to schedule/cancel
+    a credential reload request. cb and cb_user_data represent a gRPC-provided
+    callback and an argument passed to it. key_materials is an in/output
+    parameter containing currently used/newly reloaded credentials. status and
+    error_details are used to hold information about errors occurred when a
+    credential reload request is scheduled/cancelled. It is used for
+    experimental purpose for now and subject to change. */
+struct grpc_tls_credential_reload_arg {
+  grpc_tls_on_credential_reload_done_cb cb;
+  void* cb_user_data;
+  grpc_tls_key_materials_config* key_materials_config;
+  grpc_status_code status;
+  const char* error_details;
+};
+
+/** Create a grpc_tls_credential_reload_config instance.
+    - config_user_data is config-specific, read-only user data
+      that works for all channels created with a credential using the config.
+    - schedule is a pointer to an application-provided callback used to invoke
+      credential reload API. The implementation of this method has to be
+      non-blocking, but can be performed synchronously or asynchronously.
+      1) If processing occurs synchronously, it populates arg->key_materials,
+      arg->status, and arg->error_details and returns zero.
+      2) If processing occurs asynchronously, it returns a non-zero value.
+      The application then invokes arg->cb when processing is completed. Note
+      that arg->cb cannot be invoked before schedule API returns.
+    - cancel is a pointer to an application-provided callback used to cancel
+      a credential reload request scheduled via an asynchronous schedule API.
+      arg is used to pinpoint an exact reloading request to be cancelled.
+      The operation may not have any effect if the request has already been
+      processed.
+    - destruct is a pointer to an application-provided callback used to clean up
+      any data associated with the config.
+    It is used for experimental purpose for now and subject to change.
+*/
+GRPCAPI grpc_tls_credential_reload_config*
+grpc_tls_credential_reload_config_create(
+    const void* config_user_data,
+    int (*schedule)(void* config_user_data,
+                    grpc_tls_credential_reload_arg* arg),
+    void (*cancel)(void* config_user_data, grpc_tls_credential_reload_arg* arg),
+    void (*destruct)(void* config_user_data));
+
+/** --- TLS server authorization check config. ---
+ *  It is used for experimental purpose for now and subject to change. */
+
+typedef struct grpc_tls_server_authorization_check_arg
+    grpc_tls_server_authorization_check_arg;
+
+/** callback function provided by gRPC used to handle the result of server
+    authorization check. It is used when schedule API is implemented
+    asynchronously, and serves to bring the control back to gRPC C core. It is
+    used for experimental purpose for now and subject to change. */
+typedef void (*grpc_tls_on_server_authorization_check_done_cb)(
+    grpc_tls_server_authorization_check_arg* arg);
+
+/** A struct containing all information necessary to schedule/cancel a server
+   authorization check request. cb and cb_user_data represent a gRPC-provided
+   callback and an argument passed to it. result will store the result of
+   server authorization check. target_name is the name of an endpoint the
+   channel is connecting to and certificate represents a complete certificate
+   chain including both signing and leaf certificates. status and error_details
+   contain information about errors occurred when a server authorization check
+   request is scheduled/cancelled. It is used for experimental purpose for now
+   and subject to change.*/
+struct grpc_tls_server_authorization_check_arg {
+  grpc_tls_on_server_authorization_check_done_cb cb;
+  void* cb_user_data;
+  int result;
+  const char* target_name;
+  const char* peer_cert;
+  grpc_status_code status;
+  const char* error_details;
+};
+
+/** Create a grpc_tls_server_authorization_check_config instance.
+    - config_user_data is config-specific, read-only user data
+      that works for all channels created with a credential using the config.
+    - schedule is a pointer to an application-provided callback used to invoke
+      server authorization check API. The implementation of this method has to
+      be non-blocking, but can be performed synchronously or asynchronously.
+      1)If processing occurs synchronously, it populates arg->result,
+      arg->status, and arg->error_details and returns zero.
+      2) If processing occurs asynchronously, it returns a non-zero value. The
+      application then invokes arg->cb when processing is completed. Note that
+      arg->cb cannot be invoked before schedule API returns.
+    - cancel is a pointer to an application-provided callback used to cancel a
+      server authorization check request scheduled via an asynchronous schedule
+      API. arg is used to pinpoint an exact check request to be cancelled. The
+      operation may not have any effect if the request has already been
+      processed.
+    - destruct is a pointer to an application-provided callback used to clean up
+      any data associated with the config.
+    It is used for experimental purpose for now and subject to change.
+*/
+GRPCAPI grpc_tls_server_authorization_check_config*
+grpc_tls_server_authorization_check_config_create(
+    const void* config_user_data,
+    int (*schedule)(void* config_user_data,
+                    grpc_tls_server_authorization_check_arg* arg),
+    void (*cancel)(void* config_user_data,
+                   grpc_tls_server_authorization_check_arg* arg),
+    void (*destruct)(void* config_user_data));
+
 #ifdef __cplusplus
 }
 #endif

+ 4 - 0
include/grpc/impl/codegen/grpc_types.h

@@ -693,6 +693,10 @@ typedef struct grpc_experimental_completion_queue_functor {
       pointer to this functor and a boolean that indicates whether the
       operation succeeded (non-zero) or failed (zero) */
   void (*functor_run)(struct grpc_experimental_completion_queue_functor*, int);
+
+  /** The following fields are not API. They are meant for internal use. */
+  int internal_success;
+  struct grpc_experimental_completion_queue_functor* internal_next;
 } grpc_experimental_completion_queue_functor;
 
 /* The upgrade to version 2 is currently experimental. */

+ 31 - 0
include/grpc/impl/codegen/port_platform.h

@@ -189,6 +189,8 @@
 #define GPR_PLATFORM_STRING "ios"
 #define GPR_CPU_IPHONE 1
 #define GPR_PTHREAD_TLS 1
+/* the c-ares resolver isnt safe to enable on iOS */
+#define GRPC_ARES 0
 #else /* TARGET_OS_IPHONE */
 #define GPR_PLATFORM_STRING "osx"
 #ifdef __MAC_OS_X_VERSION_MIN_REQUIRED
@@ -520,6 +522,35 @@ typedef unsigned __int64 uint64_t;
 #define CENSUSAPI GRPCAPI
 #endif
 
+#ifndef GPR_HAS_ATTRIBUTE
+#ifdef __has_attribute
+#define GPR_HAS_ATTRIBUTE(a) __has_attribute(a)
+#else
+#define GPR_HAS_ATTRIBUTE(a) 0
+#endif
+#endif /* GPR_HAS_ATTRIBUTE */
+
+#ifndef GPR_ATTRIBUTE_NOINLINE
+#if GPR_HAS_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
+#define GPR_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define GPR_HAS_ATTRIBUTE_NOINLINE 1
+#else
+#define GPR_ATTRIBUTE_NOINLINE
+#endif
+#endif /* GPR_ATTRIBUTE_NOINLINE */
+
+#ifndef GPR_ATTRIBUTE_WEAK
+/* Attribute weak is broken on LLVM/windows:
+ * https://bugs.llvm.org/show_bug.cgi?id=37598 */
+#if (GPR_HAS_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__))) && \
+    !(defined(__llvm__) && defined(_WIN32))
+#define GPR_ATTRIBUTE_WEAK __attribute__((weak))
+#define GPR_HAS_ATTRIBUTE_WEAK 1
+#else
+#define GPR_ATTRIBUTE_WEAK
+#endif
+#endif /* GPR_ATTRIBUTE_WEAK */
+
 #ifndef GPR_ATTRIBUTE_NO_TSAN /* (1) */
 #if defined(__has_feature)
 #if __has_feature(thread_sanitizer)

+ 1 - 1
include/grpc/impl/codegen/slice.h

@@ -81,8 +81,8 @@ struct grpc_slice {
   struct grpc_slice_refcount* refcount;
   union grpc_slice_data {
     struct grpc_slice_refcounted {
-      uint8_t* bytes;
       size_t length;
+      uint8_t* bytes;
     } refcounted;
     struct grpc_slice_inlined {
       uint8_t length;

+ 9 - 0
include/grpcpp/impl/codegen/call_op_set.h

@@ -32,6 +32,7 @@
 #include <grpcpp/impl/codegen/call_hook.h>
 #include <grpcpp/impl/codegen/call_op_set_interface.h>
 #include <grpcpp/impl/codegen/client_context.h>
+#include <grpcpp/impl/codegen/completion_queue.h>
 #include <grpcpp/impl/codegen/completion_queue_tag.h>
 #include <grpcpp/impl/codegen/config.h>
 #include <grpcpp/impl/codegen/core_codegen_interface.h>
@@ -877,6 +878,8 @@ class CallOpSet : public CallOpSetInterface,
 
   bool FinalizeResult(void** tag, bool* status) override {
     if (done_intercepting_) {
+      // Complete the avalanching since we are done with this batch of ops
+      call_.cq()->CompleteAvalanching();
       // We have already finished intercepting and filling in the results. This
       // round trip from the core needed to be made because interceptors were
       // run
@@ -961,6 +964,12 @@ class CallOpSet : public CallOpSetInterface,
     this->Op4::SetInterceptionHookPoint(&interceptor_methods_);
     this->Op5::SetInterceptionHookPoint(&interceptor_methods_);
     this->Op6::SetInterceptionHookPoint(&interceptor_methods_);
+    if (interceptor_methods_.InterceptorsListEmpty()) {
+      return true;
+    }
+    // This call will go through interceptors and would need to
+    // schedule new batches, so delay completion queue shutdown
+    call_.cq()->RegisterAvalanching();
     return interceptor_methods_.RunInterceptors();
   }
   // Returns true if no interceptors need to be run

+ 12 - 1
include/grpcpp/impl/codegen/completion_queue.h

@@ -84,6 +84,8 @@ template <StatusCode code>
 class ErrorMethodHandler;
 template <class InputMessage, class OutputMessage>
 class BlockingUnaryCallImpl;
+template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+class CallOpSet;
 }  // namespace internal
 
 extern CoreCodegenInterface* g_core_codegen_interface;
@@ -278,6 +280,10 @@ class CompletionQueue : private GrpcLibraryCodegen {
   // Friends that need access to constructor for callback CQ
   friend class ::grpc::Channel;
 
+  // For access to Register/CompleteAvalanching
+  template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6>
+  friend class ::grpc::internal::CallOpSet;
+
   /// EXPERIMENTAL
   /// Creates a Thread Local cache to store the first event
   /// On this completion queue queued from this thread.  Once
@@ -361,7 +367,12 @@ class CompletionQueue : private GrpcLibraryCodegen {
     gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
                                  static_cast<gpr_atm>(1));
   }
-  void CompleteAvalanching();
+  void CompleteAvalanching() {
+    if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
+                                     static_cast<gpr_atm>(-1)) == 1) {
+      g_core_codegen_interface->grpc_completion_queue_shutdown(cq_);
+    }
+  }
 
   grpc_completion_queue* cq_;  // owned
 

+ 1 - 0
include/grpcpp/impl/codegen/core_codegen.h

@@ -42,6 +42,7 @@ class CoreCodegen final : public CoreCodegenInterface {
       void* reserved) override;
   grpc_completion_queue* grpc_completion_queue_create_for_pluck(
       void* reserved) override;
+  void grpc_completion_queue_shutdown(grpc_completion_queue* cq) override;
   void grpc_completion_queue_destroy(grpc_completion_queue* cq) override;
   grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
                                          gpr_timespec deadline,

+ 1 - 0
include/grpcpp/impl/codegen/core_codegen_interface.h

@@ -52,6 +52,7 @@ class CoreCodegenInterface {
       void* reserved) = 0;
   virtual grpc_completion_queue* grpc_completion_queue_create_for_pluck(
       void* reserved) = 0;
+  virtual void grpc_completion_queue_shutdown(grpc_completion_queue* cq) = 0;
   virtual void grpc_completion_queue_destroy(grpc_completion_queue* cq) = 0;
   virtual grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq,
                                                  void* tag,

+ 23 - 4
include/grpcpp/impl/codegen/interceptor.h

@@ -107,6 +107,24 @@ class InterceptorBatchMethods {
   /// of the hijacking interceptor.
   virtual void Hijack() = 0;
 
+  /// Send Message Methods
+  /// GetSerializedSendMessage and GetSendMessage/ModifySendMessage are the
+  /// available methods to view and modify the request payload. An interceptor
+  /// can access the payload in either serialized form or non-serialized form
+  /// but not both at the same time.
+  /// gRPC performs serialization in a lazy manner, which means
+  /// that a call to GetSerializedSendMessage will result in a serialization
+  /// operation if the payload stored is not in the serialized form already; the
+  /// non-serialized form will be lost and GetSendMessage will no longer return
+  /// a valid pointer, and this will remain true for later interceptors too.
+  /// This can change however if ModifySendMessage is used to replace the
+  /// current payload. Note that ModifySendMessage requires a new payload
+  /// message in the non-serialized form. This will overwrite the existing
+  /// payload irrespective of whether it had been serialized earlier. Also note
+  /// that gRPC Async API requires early serialization of the payload which
+  /// means that the payload would be available in the serialized form only
+  /// unless an interceptor replaces the payload with ModifySendMessage.
+
   /// Returns a modifable ByteBuffer holding the serialized form of the message
   /// that is going to be sent. Valid for PRE_SEND_MESSAGE interceptions.
   /// A return value of nullptr indicates that this ByteBuffer is not valid.
@@ -114,15 +132,16 @@ class InterceptorBatchMethods {
 
   /// Returns a non-modifiable pointer to the non-serialized form of the message
   /// to be sent. Valid for PRE_SEND_MESSAGE interceptions. A return value of
-  /// nullptr indicates that this field is not valid. Also note that this is
-  /// only supported for sync and callback APIs at the present moment.
+  /// nullptr indicates that this field is not valid.
   virtual const void* GetSendMessage() = 0;
 
   /// Overwrites the message to be sent with \a message. \a message should be in
   /// the non-serialized form expected by the method. Valid for PRE_SEND_MESSAGE
   /// interceptions. Note that the interceptor is responsible for maintaining
-  /// the life of the message for the duration on the send operation, i.e., till
-  /// POST_SEND_MESSAGE.
+  /// the life of the message till it is serialized or it receives the
+  /// POST_SEND_MESSAGE interception point, whichever happens earlier. The
+  /// modifying interceptor may itself force early serialization by calling
+  /// GetSerializedSendMessage.
   virtual void ModifySendMessage(const void* message) = 0;
 
   /// Checks whether the SEND MESSAGE op succeeded. Valid for POST_SEND_MESSAGE

+ 23 - 4
include/grpcpp/impl/codegen/interceptor_common.h

@@ -219,10 +219,29 @@ class InterceptorBatchMethodsImpl
   // Alternatively, RunInterceptors(std::function<void(void)> f) can be used.
   void SetCallOpSetInterface(CallOpSetInterface* ops) { ops_ = ops; }
 
-  // Returns true if no interceptors are run. This should be used only by
-  // subclasses of CallOpSetInterface. SetCall and SetCallOpSetInterface should
-  // have been called before this. After all the interceptors are done running,
-  // either ContinueFillOpsAfterInterception or
+  // SetCall should have been called before this.
+  // Returns true if the interceptors list is empty
+  bool InterceptorsListEmpty() {
+    auto* client_rpc_info = call_->client_rpc_info();
+    if (client_rpc_info != nullptr) {
+      if (client_rpc_info->interceptors_.size() == 0) {
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    auto* server_rpc_info = call_->server_rpc_info();
+    if (server_rpc_info == nullptr ||
+        server_rpc_info->interceptors_.size() == 0) {
+      return true;
+    }
+    return false;
+  }
+
+  // This should be used only by subclasses of CallOpSetInterface. SetCall and
+  // SetCallOpSetInterface should have been called before this. After all the
+  // interceptors are done running, either ContinueFillOpsAfterInterception or
   // ContinueFinalizeOpsAfterInterception will be called. Note that neither of
   // them is invoked if there were no interceptors registered.
   bool RunInterceptors() {

+ 21 - 2
include/grpcpp/server.h

@@ -26,6 +26,7 @@
 #include <vector>
 
 #include <grpc/compression.h>
+#include <grpc/support/atm.h>
 #include <grpcpp/completion_queue.h>
 #include <grpcpp/impl/call.h>
 #include <grpcpp/impl/codegen/client_interceptor.h>
@@ -248,8 +249,15 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
   /// the \a sync_server_cqs)
   std::vector<std::unique_ptr<SyncRequestThreadManager>> sync_req_mgrs_;
 
-  /// Outstanding callback requests
-  std::vector<std::unique_ptr<CallbackRequest>> callback_reqs_;
+  // Outstanding unmatched callback requests, indexed by method.
+  // NOTE: Using a gpr_atm rather than atomic_int because atomic_int isn't
+  //       copyable or movable and thus will cause compilation errors. We
+  //       actually only want to extend the vector before the threaded use
+  //       starts, but this is still a limitation.
+  std::vector<gpr_atm> callback_unmatched_reqs_count_;
+
+  // List of callback requests to start when server actually starts.
+  std::list<CallbackRequest*> callback_reqs_to_start_;
 
   // Server status
   std::mutex mu_;
@@ -259,6 +267,17 @@ class Server : public ServerInterface, private GrpcLibraryCodegen {
 
   std::condition_variable shutdown_cv_;
 
+  // It is ok (but not required) to nest callback_reqs_mu_ under mu_ .
+  // Incrementing callback_reqs_outstanding_ is ok without a lock but it must be
+  // decremented under the lock in case it is the last request and enables the
+  // server shutdown. The increment is performance-critical since it happens
+  // during periods of increasing load; the decrement happens only when memory
+  // is maxed out, during server shutdown, or (possibly in a future version)
+  // during decreasing load, so it is less performance-critical.
+  std::mutex callback_reqs_mu_;
+  std::condition_variable callback_reqs_done_cv_;
+  std::atomic_int callback_reqs_outstanding_{0};
+
   std::shared_ptr<GlobalCallbacks> global_callbacks_;
 
   std::vector<grpc::string> services_;

+ 2 - 0
package.xml

@@ -229,6 +229,7 @@
     <file baseinstalldir="/" name="src/core/lib/security/credentials/oauth2/oauth2_credentials.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/credentials/plugin/plugin_credentials.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/credentials/ssl/ssl_credentials.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/alts/alts_security_connector.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/fake/fake_security_connector.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/load_system_roots.h" role="src" />
@@ -670,6 +671,7 @@
     <file baseinstalldir="/" name="src/core/lib/security/credentials/oauth2/oauth2_credentials.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/credentials/plugin/plugin_credentials.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/credentials/ssl/ssl_credentials.cc" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/alts/alts_security_connector.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/fake/fake_security_connector.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/security/security_connector/load_system_roots_fallback.cc" role="src" />

+ 25 - 5
src/compiler/csharp_generator.cc

@@ -199,6 +199,21 @@ std::string GetCSharpMethodType(MethodType method_type) {
   return "";
 }
 
+std::string GetCSharpServerMethodType(MethodType method_type) {
+  switch (method_type) {
+    case METHODTYPE_NO_STREAMING:
+      return "grpc::UnaryServerMethod";
+    case METHODTYPE_CLIENT_STREAMING:
+      return "grpc::ClientStreamingServerMethod";
+    case METHODTYPE_SERVER_STREAMING:
+      return "grpc::ServerStreamingServerMethod";
+    case METHODTYPE_BIDI_STREAMING:
+      return "grpc::DuplexStreamingServerMethod";
+  }
+  GOOGLE_LOG(FATAL) << "Can't get here.";
+  return "";
+}
+
 std::string GetServiceNameFieldName() { return "__ServiceName"; }
 
 std::string GetMarshallerFieldName(const Descriptor* message) {
@@ -612,8 +627,9 @@ void GenerateBindServiceMethod(Printer* out, const ServiceDescriptor* service) {
 void GenerateBindServiceWithBinderMethod(Printer* out,
                                          const ServiceDescriptor* service) {
   out->Print(
-      "/// <summary>Register service method implementations with a service "
-      "binder. Useful when customizing the service binding logic.\n"
+      "/// <summary>Register service method with a service "
+      "binder with or without implementation. Useful when customizing the  "
+      "service binding logic.\n"
       "/// Note: this method is part of an experimental API that can change or "
       "be "
       "removed without any prior notice.</summary>\n");
@@ -635,9 +651,13 @@ void GenerateBindServiceWithBinderMethod(Printer* out,
   for (int i = 0; i < service->method_count(); i++) {
     const MethodDescriptor* method = service->method(i);
     out->Print(
-        "serviceBinder.AddMethod($methodfield$, serviceImpl.$methodname$);\n",
-        "methodfield", GetMethodFieldName(method), "methodname",
-        method->name());
+        "serviceBinder.AddMethod($methodfield$, serviceImpl == null ? null : "
+        "new $servermethodtype$<$inputtype$, $outputtype$>("
+        "serviceImpl.$methodname$));\n",
+        "methodfield", GetMethodFieldName(method), "servermethodtype",
+        GetCSharpServerMethodType(GetMethodType(method)), "inputtype",
+        GetClassName(method->input_type()), "outputtype",
+        GetClassName(method->output_type()), "methodname", method->name());
   }
 
   out->Outdent();

+ 45 - 69
src/core/ext/filters/client_channel/client_channel.cc

@@ -394,7 +394,7 @@ struct subchannel_batch_data {
 
   gpr_refcount refs;
   grpc_call_element* elem;
-  grpc_subchannel_call* subchannel_call;  // Holds a ref.
+  grpc_core::RefCountedPtr<grpc_core::SubchannelCall> subchannel_call;
   // The batch to use in the subchannel call.
   // Its payload field points to subchannel_call_retry_state.batch_payload.
   grpc_transport_stream_op_batch batch;
@@ -478,7 +478,7 @@ struct pending_batch {
   bool send_ops_cached;
 };
 
-/** Call data.  Holds a pointer to grpc_subchannel_call and the
+/** Call data.  Holds a pointer to SubchannelCall and the
     associated machinery to create such a pointer.
     Handles queueing of stream ops until a call object is ready, waiting
     for initial metadata before trying to create a call object,
@@ -504,10 +504,6 @@ struct call_data {
         last_attempt_got_server_pushback(false) {}
 
   ~call_data() {
-    if (GPR_LIKELY(subchannel_call != nullptr)) {
-      GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call,
-                                 "client_channel_destroy_call");
-    }
     grpc_slice_unref_internal(path);
     GRPC_ERROR_UNREF(cancel_error);
     for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
@@ -536,7 +532,7 @@ struct call_data {
   grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
   grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
 
-  grpc_subchannel_call* subchannel_call = nullptr;
+  grpc_core::RefCountedPtr<grpc_core::SubchannelCall> subchannel_call;
 
   // Set when we get a cancel_stream op.
   grpc_error* cancel_error = GRPC_ERROR_NONE;
@@ -807,8 +803,8 @@ static void pending_batches_add(grpc_call_element* elem,
           calld->subchannel_call == nullptr
               ? nullptr
               : static_cast<subchannel_call_retry_state*>(
-                    grpc_connected_subchannel_call_get_parent_data(
-                        calld->subchannel_call));
+
+                    calld->subchannel_call->GetParentData());
       retry_commit(elem, retry_state);
       // If we are not going to retry and have not yet started, pretend
       // retries are disabled so that we don't bother with retry overhead.
@@ -896,10 +892,10 @@ static void resume_pending_batch_in_call_combiner(void* arg,
                                                   grpc_error* ignored) {
   grpc_transport_stream_op_batch* batch =
       static_cast<grpc_transport_stream_op_batch*>(arg);
-  grpc_subchannel_call* subchannel_call =
-      static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+  grpc_core::SubchannelCall* subchannel_call =
+      static_cast<grpc_core::SubchannelCall*>(batch->handler_private.extra_arg);
   // Note: This will release the call combiner.
-  grpc_subchannel_call_process_op(subchannel_call, batch);
+  subchannel_call->StartTransportStreamOpBatch(batch);
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
@@ -919,7 +915,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
     gpr_log(GPR_INFO,
             "chand=%p calld=%p: starting %" PRIuPTR
             " pending batches on subchannel_call=%p",
-            chand, calld, num_batches, calld->subchannel_call);
+            chand, calld, num_batches, calld->subchannel_call.get());
   }
   grpc_core::CallCombinerClosureList closures;
   for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
@@ -930,7 +926,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
         maybe_inject_recv_trailing_metadata_ready_for_lb(
             *calld->request->pick(), batch);
       }
-      batch->handler_private.extra_arg = calld->subchannel_call;
+      batch->handler_private.extra_arg = calld->subchannel_call.get();
       GRPC_CLOSURE_INIT(&batch->handler_private.closure,
                         resume_pending_batch_in_call_combiner, batch,
                         grpc_schedule_on_exec_ctx);
@@ -1019,12 +1015,7 @@ static void do_retry(grpc_call_element* elem,
   const ClientChannelMethodParams::RetryPolicy* retry_policy =
       calld->method_params->retry_policy();
   GPR_ASSERT(retry_policy != nullptr);
-  // Reset subchannel call and connected subchannel.
-  if (calld->subchannel_call != nullptr) {
-    GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
-                               "client_channel_call_retry");
-    calld->subchannel_call = nullptr;
-  }
+  calld->subchannel_call.reset();
   if (calld->have_request) {
     calld->have_request = false;
     calld->request.Destroy();
@@ -1078,8 +1069,7 @@ static bool maybe_retry(grpc_call_element* elem,
   subchannel_call_retry_state* retry_state = nullptr;
   if (batch_data != nullptr) {
     retry_state = static_cast<subchannel_call_retry_state*>(
-        grpc_connected_subchannel_call_get_parent_data(
-            batch_data->subchannel_call));
+        batch_data->subchannel_call->GetParentData());
     if (retry_state->retry_dispatched) {
       if (grpc_client_channel_trace.enabled()) {
         gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
@@ -1180,13 +1170,10 @@ namespace {
 subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
                                              call_data* calld, int refcount,
                                              bool set_on_complete)
-    : elem(elem),
-      subchannel_call(GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call,
-                                               "batch_data_create")) {
+    : elem(elem), subchannel_call(calld->subchannel_call) {
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              calld->subchannel_call));
+          calld->subchannel_call->GetParentData());
   batch.payload = &retry_state->batch_payload;
   gpr_ref_init(&refs, refcount);
   if (set_on_complete) {
@@ -1200,7 +1187,7 @@ subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
 void subchannel_batch_data::destroy() {
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(subchannel_call));
+          subchannel_call->GetParentData());
   if (batch.send_initial_metadata) {
     grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
   }
@@ -1213,7 +1200,7 @@ void subchannel_batch_data::destroy() {
   if (batch.recv_trailing_metadata) {
     grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
   }
-  GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "batch_data_unref");
+  subchannel_call.reset();
   call_data* calld = static_cast<call_data*>(elem->call_data);
   GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
 }
@@ -1260,8 +1247,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
   // Return metadata.
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   grpc_metadata_batch_move(
       &retry_state->recv_initial_metadata,
       pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
@@ -1293,8 +1279,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   retry_state->completed_recv_initial_metadata = true;
   // If a retry was already dispatched, then we're not going to use the
   // result of this recv_initial_metadata op, so do nothing.
@@ -1355,8 +1340,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
   // Return payload.
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   *pending->batch->payload->recv_message.recv_message =
       std::move(retry_state->recv_message);
   // Update bookkeeping.
@@ -1384,8 +1368,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   ++retry_state->completed_recv_message_count;
   // If a retry was already dispatched, then we're not going to use the
   // result of this recv_message op, so do nothing.
@@ -1473,8 +1456,7 @@ static void add_closure_for_recv_trailing_metadata_ready(
   // Return metadata.
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   grpc_metadata_batch_move(
       &retry_state->recv_trailing_metadata,
       pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata);
@@ -1576,8 +1558,7 @@ static void run_closures_for_completed_call(subchannel_batch_data* batch_data,
   call_data* calld = static_cast<call_data*>(elem->call_data);
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   // Construct list of closures to execute.
   grpc_core::CallCombinerClosureList closures;
   // First, add closure for recv_trailing_metadata_ready.
@@ -1611,8 +1592,7 @@ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   retry_state->completed_recv_trailing_metadata = true;
   // Get the call's status and check for server pushback metadata.
   grpc_status_code status = GRPC_STATUS_OK;
@@ -1735,8 +1715,7 @@ static void on_complete(void* arg, grpc_error* error) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              batch_data->subchannel_call));
+          batch_data->subchannel_call->GetParentData());
   // Update bookkeeping in retry_state.
   if (batch_data->batch.send_initial_metadata) {
     retry_state->completed_send_initial_metadata = true;
@@ -1792,10 +1771,10 @@ static void on_complete(void* arg, grpc_error* error) {
 static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
   grpc_transport_stream_op_batch* batch =
       static_cast<grpc_transport_stream_op_batch*>(arg);
-  grpc_subchannel_call* subchannel_call =
-      static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
+  grpc_core::SubchannelCall* subchannel_call =
+      static_cast<grpc_core::SubchannelCall*>(batch->handler_private.extra_arg);
   // Note: This will release the call combiner.
-  grpc_subchannel_call_process_op(subchannel_call, batch);
+  subchannel_call->StartTransportStreamOpBatch(batch);
 }
 
 // Adds a closure to closures that will execute batch in the call combiner.
@@ -1804,7 +1783,7 @@ static void add_closure_for_subchannel_batch(
     grpc_core::CallCombinerClosureList* closures) {
   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  batch->handler_private.extra_arg = calld->subchannel_call;
+  batch->handler_private.extra_arg = calld->subchannel_call.get();
   GRPC_CLOSURE_INIT(&batch->handler_private.closure,
                     start_batch_in_call_combiner, batch,
                     grpc_schedule_on_exec_ctx);
@@ -1978,8 +1957,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              calld->subchannel_call));
+          calld->subchannel_call->GetParentData());
   // Create batch_data with 2 refs, since this batch will be unreffed twice:
   // once for the recv_trailing_metadata_ready callback when the subchannel
   // batch returns, and again when we actually get a recv_trailing_metadata
@@ -1989,7 +1967,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
   add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
   retry_state->recv_trailing_metadata_internal_batch = batch_data;
   // Note: This will release the call combiner.
-  grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
+  calld->subchannel_call->StartTransportStreamOpBatch(&batch_data->batch);
 }
 
 // If there are any cached send ops that need to be replayed on the
@@ -2196,8 +2174,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
   }
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
-          grpc_connected_subchannel_call_get_parent_data(
-              calld->subchannel_call));
+          calld->subchannel_call->GetParentData());
   // Construct list of closures to execute, one for each pending batch.
   grpc_core::CallCombinerClosureList closures;
   // Replay previously-returned send_* ops if needed.
@@ -2220,7 +2197,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
     gpr_log(GPR_INFO,
             "chand=%p calld=%p: starting %" PRIuPTR
             " retriable batches on subchannel_call=%p",
-            chand, calld, closures.size(), calld->subchannel_call);
+            chand, calld, closures.size(), calld->subchannel_call.get());
   }
   // Note: This will yield the call combiner.
   closures.RunClosures(calld->call_combiner);
@@ -2245,22 +2222,22 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
       calld->call_combiner,                             // call_combiner
       parent_data_size                                  // parent_data_size
   };
-  grpc_error* new_error =
-      calld->request->pick()->connected_subchannel->CreateCall(
-          call_args, &calld->subchannel_call);
+  grpc_error* new_error = GRPC_ERROR_NONE;
+  calld->subchannel_call =
+      calld->request->pick()->connected_subchannel->CreateCall(call_args,
+                                                               &new_error);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
-            chand, calld, calld->subchannel_call, grpc_error_string(new_error));
+            chand, calld, calld->subchannel_call.get(),
+            grpc_error_string(new_error));
   }
   if (GPR_UNLIKELY(new_error != GRPC_ERROR_NONE)) {
     new_error = grpc_error_add_child(new_error, error);
     pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
   } else {
     if (parent_data_size > 0) {
-      new (grpc_connected_subchannel_call_get_parent_data(
-          calld->subchannel_call))
-          subchannel_call_retry_state(
-              calld->request->pick()->subchannel_call_context);
+      new (calld->subchannel_call->GetParentData()) subchannel_call_retry_state(
+          calld->request->pick()->subchannel_call_context);
     }
     pending_batches_resume(elem);
   }
@@ -2488,7 +2465,7 @@ static void cc_start_transport_stream_op_batch(
           batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
     } else {
       // Note: This will release the call combiner.
-      grpc_subchannel_call_process_op(calld->subchannel_call, batch);
+      calld->subchannel_call->StartTransportStreamOpBatch(batch);
     }
     return;
   }
@@ -2502,7 +2479,7 @@ static void cc_start_transport_stream_op_batch(
     if (grpc_client_channel_trace.enabled()) {
       gpr_log(GPR_INFO,
               "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
-              calld, calld->subchannel_call);
+              calld, calld->subchannel_call.get());
     }
     pending_batches_resume(elem);
     return;
@@ -2545,8 +2522,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
                                  grpc_closure* then_schedule_closure) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
   if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
-    grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
-                                             then_schedule_closure);
+    calld->subchannel_call->SetAfterCallStackDestroy(then_schedule_closure);
     then_schedule_closure = nullptr;
   }
   calld->~call_data();
@@ -2752,8 +2728,8 @@ void grpc_client_channel_watch_connectivity_state(
       GRPC_ERROR_NONE);
 }
 
-grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
-    grpc_call_element* elem) {
+grpc_core::RefCountedPtr<grpc_core::SubchannelCall>
+grpc_client_channel_get_subchannel_call(grpc_call_element* elem) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
   return calld->subchannel_call;
 }

+ 2 - 2
src/core/ext/filters/client_channel/client_channel.h

@@ -60,7 +60,7 @@ void grpc_client_channel_watch_connectivity_state(
     grpc_closure* watcher_timer_init);
 
 /* Debug helper: pull the subchannel call from a call stack element */
-grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
-    grpc_call_element* elem);
+grpc_core::RefCountedPtr<grpc_core::SubchannelCall>
+grpc_client_channel_get_subchannel_call(grpc_call_element* elem);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */

+ 5 - 6
src/core/ext/filters/client_channel/client_channel_channelz.cc

@@ -113,12 +113,11 @@ RefCountedPtr<ChannelNode> ClientChannelNode::MakeClientChannelNode(
                                            is_top_level_channel);
 }
 
-SubchannelNode::SubchannelNode(grpc_subchannel* subchannel,
+SubchannelNode::SubchannelNode(Subchannel* subchannel,
                                size_t channel_tracer_max_nodes)
     : BaseNode(EntityType::kSubchannel),
       subchannel_(subchannel),
-      target_(
-          UniquePtr<char>(gpr_strdup(grpc_subchannel_get_target(subchannel_)))),
+      target_(UniquePtr<char>(gpr_strdup(subchannel_->GetTargetAddress()))),
       trace_(channel_tracer_max_nodes) {}
 
 SubchannelNode::~SubchannelNode() {}
@@ -128,8 +127,8 @@ void SubchannelNode::PopulateConnectivityState(grpc_json* json) {
   if (subchannel_ == nullptr) {
     state = GRPC_CHANNEL_SHUTDOWN;
   } else {
-    state = grpc_subchannel_check_connectivity(
-        subchannel_, nullptr, true /* inhibit_health_checking */);
+    state = subchannel_->CheckConnectivity(nullptr,
+                                           true /* inhibit_health_checking */);
   }
   json = grpc_json_create_child(nullptr, json, "state", nullptr,
                                 GRPC_JSON_OBJECT, false);
@@ -170,7 +169,7 @@ grpc_json* SubchannelNode::RenderJson() {
   call_counter_.PopulateCallCounts(json);
   json = top_level_json;
   // populate the child socket.
-  intptr_t socket_uuid = grpc_subchannel_get_child_socket_uuid(subchannel_);
+  intptr_t socket_uuid = subchannel_->GetChildSocketUuid();
   if (socket_uuid != 0) {
     grpc_json* array_parent = grpc_json_create_child(
         nullptr, json, "socketRef", nullptr, GRPC_JSON_ARRAY, false);

+ 5 - 4
src/core/ext/filters/client_channel/client_channel_channelz.h

@@ -26,9 +26,10 @@
 #include "src/core/lib/channel/channel_trace.h"
 #include "src/core/lib/channel/channelz.h"
 
-typedef struct grpc_subchannel grpc_subchannel;
-
 namespace grpc_core {
+
+class Subchannel;
+
 namespace channelz {
 
 // Subtype of ChannelNode that overrides and provides client_channel specific
@@ -59,7 +60,7 @@ class ClientChannelNode : public ChannelNode {
 // Handles channelz bookkeeping for sockets
 class SubchannelNode : public BaseNode {
  public:
-  SubchannelNode(grpc_subchannel* subchannel, size_t channel_tracer_max_nodes);
+  SubchannelNode(Subchannel* subchannel, size_t channel_tracer_max_nodes);
   ~SubchannelNode() override;
 
   void MarkSubchannelDestroyed() {
@@ -84,7 +85,7 @@ class SubchannelNode : public BaseNode {
   void RecordCallSucceeded() { call_counter_.RecordCallSucceeded(); }
 
  private:
-  grpc_subchannel* subchannel_;
+  Subchannel* subchannel_;
   UniquePtr<char> target_;
   CallCountingHelper call_counter_;
   ChannelTrace trace_;

+ 1 - 1
src/core/ext/filters/client_channel/client_channel_factory.cc

@@ -29,7 +29,7 @@ void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) {
   factory->vtable->unref(factory);
 }
 
-grpc_subchannel* grpc_client_channel_factory_create_subchannel(
+grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
     grpc_client_channel_factory* factory, const grpc_channel_args* args) {
   return factory->vtable->create_subchannel(factory, args);
 }

+ 3 - 3
src/core/ext/filters/client_channel/client_channel_factory.h

@@ -48,8 +48,8 @@ struct grpc_client_channel_factory {
 struct grpc_client_channel_factory_vtable {
   void (*ref)(grpc_client_channel_factory* factory);
   void (*unref)(grpc_client_channel_factory* factory);
-  grpc_subchannel* (*create_subchannel)(grpc_client_channel_factory* factory,
-                                        const grpc_channel_args* args);
+  grpc_core::Subchannel* (*create_subchannel)(
+      grpc_client_channel_factory* factory, const grpc_channel_args* args);
   grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory,
                                          const char* target,
                                          grpc_client_channel_type type,
@@ -60,7 +60,7 @@ void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
 void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory);
 
 /** Create a new grpc_subchannel */
-grpc_subchannel* grpc_client_channel_factory_create_subchannel(
+grpc_core::Subchannel* grpc_client_channel_factory_create_subchannel(
     grpc_client_channel_factory* factory, const grpc_channel_args* args);
 
 /** Create a new grpc_channel */

+ 9 - 10
src/core/ext/filters/client_channel/global_subchannel_pool.cc

@@ -54,9 +54,9 @@ RefCountedPtr<GlobalSubchannelPool> GlobalSubchannelPool::instance() {
   return *instance_;
 }
 
-grpc_subchannel* GlobalSubchannelPool::RegisterSubchannel(
-    SubchannelKey* key, grpc_subchannel* constructed) {
-  grpc_subchannel* c = nullptr;
+Subchannel* GlobalSubchannelPool::RegisterSubchannel(SubchannelKey* key,
+                                                     Subchannel* constructed) {
+  Subchannel* c = nullptr;
   // Compare and swap (CAS) loop:
   while (c == nullptr) {
     // Ref the shared map to have a local copy.
@@ -64,7 +64,7 @@ grpc_subchannel* GlobalSubchannelPool::RegisterSubchannel(
     grpc_avl old_map = grpc_avl_ref(subchannel_map_, nullptr);
     gpr_mu_unlock(&mu_);
     // Check to see if a subchannel already exists.
-    c = static_cast<grpc_subchannel*>(grpc_avl_get(old_map, key, nullptr));
+    c = static_cast<Subchannel*>(grpc_avl_get(old_map, key, nullptr));
     if (c != nullptr) {
       // The subchannel already exists. Reuse it.
       c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "subchannel_register+reuse");
@@ -121,15 +121,14 @@ void GlobalSubchannelPool::UnregisterSubchannel(SubchannelKey* key) {
   }
 }
 
-grpc_subchannel* GlobalSubchannelPool::FindSubchannel(SubchannelKey* key) {
+Subchannel* GlobalSubchannelPool::FindSubchannel(SubchannelKey* key) {
   // Lock, and take a reference to the subchannel map.
   // We don't need to do the search under a lock as AVL's are immutable.
   gpr_mu_lock(&mu_);
   grpc_avl index = grpc_avl_ref(subchannel_map_, nullptr);
   gpr_mu_unlock(&mu_);
-  grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
-      static_cast<grpc_subchannel*>(grpc_avl_get(index, key, nullptr)),
-      "found_from_pool");
+  Subchannel* c = static_cast<Subchannel*>(grpc_avl_get(index, key, nullptr));
+  if (c != nullptr) GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "found_from_pool");
   grpc_avl_unref(index, nullptr);
   return c;
 }
@@ -156,11 +155,11 @@ long sck_avl_compare(void* a, void* b, void* unused) {
 }
 
 void scv_avl_destroy(void* p, void* user_data) {
-  GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "global_subchannel_pool");
+  GRPC_SUBCHANNEL_WEAK_UNREF((Subchannel*)p, "global_subchannel_pool");
 }
 
 void* scv_avl_copy(void* p, void* unused) {
-  GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel*)p, "global_subchannel_pool");
+  GRPC_SUBCHANNEL_WEAK_REF((Subchannel*)p, "global_subchannel_pool");
   return p;
 }
 

+ 3 - 3
src/core/ext/filters/client_channel/global_subchannel_pool.h

@@ -45,10 +45,10 @@ class GlobalSubchannelPool final : public SubchannelPoolInterface {
   static RefCountedPtr<GlobalSubchannelPool> instance();
 
   // Implements interface methods.
-  grpc_subchannel* RegisterSubchannel(SubchannelKey* key,
-                                      grpc_subchannel* constructed) override;
+  Subchannel* RegisterSubchannel(SubchannelKey* key,
+                                 Subchannel* constructed) override;
   void UnregisterSubchannel(SubchannelKey* key) override;
-  grpc_subchannel* FindSubchannel(SubchannelKey* key) override;
+  Subchannel* FindSubchannel(SubchannelKey* key) override;
 
  private:
   // The singleton instance. (It's a pointer to RefCountedPtr so that this

+ 10 - 8
src/core/ext/filters/client_channel/health/health_check_client.cc

@@ -295,7 +295,9 @@ HealthCheckClient::CallState::~CallState() {
     gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
             health_check_client_.get(), this);
   }
-  if (call_ != nullptr) GRPC_SUBCHANNEL_CALL_UNREF(call_, "call_ended");
+  // The subchannel call is in the arena, so reset the pointer before we destroy
+  // the arena.
+  call_.reset();
   for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
     if (context_[i].destroy != nullptr) {
       context_[i].destroy(context_[i].value);
@@ -329,8 +331,8 @@ void HealthCheckClient::CallState::StartCall() {
       &call_combiner_,
       0,  // parent_data_size
   };
-  grpc_error* error =
-      health_check_client_->connected_subchannel_->CreateCall(args, &call_);
+  grpc_error* error = GRPC_ERROR_NONE;
+  call_ = health_check_client_->connected_subchannel_->CreateCall(args, &error);
   if (error != GRPC_ERROR_NONE) {
     gpr_log(GPR_ERROR,
             "HealthCheckClient %p CallState %p: error creating health "
@@ -423,14 +425,14 @@ void HealthCheckClient::CallState::StartBatchInCallCombiner(void* arg,
                                                             grpc_error* error) {
   grpc_transport_stream_op_batch* batch =
       static_cast<grpc_transport_stream_op_batch*>(arg);
-  grpc_subchannel_call* call =
-      static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
-  grpc_subchannel_call_process_op(call, batch);
+  SubchannelCall* call =
+      static_cast<SubchannelCall*>(batch->handler_private.extra_arg);
+  call->StartTransportStreamOpBatch(batch);
 }
 
 void HealthCheckClient::CallState::StartBatch(
     grpc_transport_stream_op_batch* batch) {
-  batch->handler_private.extra_arg = call_;
+  batch->handler_private.extra_arg = call_.get();
   GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
                     batch, grpc_schedule_on_exec_ctx);
   GRPC_CALL_COMBINER_START(&call_combiner_, &batch->handler_private.closure,
@@ -452,7 +454,7 @@ void HealthCheckClient::CallState::StartCancel(void* arg, grpc_error* error) {
       GRPC_CLOSURE_CREATE(OnCancelComplete, self, grpc_schedule_on_exec_ctx));
   batch->cancel_stream = true;
   batch->payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-  grpc_subchannel_call_process_op(self->call_, batch);
+  self->call_->StartTransportStreamOpBatch(batch);
 }
 
 void HealthCheckClient::CallState::Cancel() {

+ 1 - 1
src/core/ext/filters/client_channel/health/health_check_client.h

@@ -99,7 +99,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
     grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
 
     // The streaming call to the backend. Always non-NULL.
-    grpc_subchannel_call* call_;
+    RefCountedPtr<SubchannelCall> call_;
 
     grpc_transport_stream_op_batch_payload payload_;
     grpc_transport_stream_op_batch batch_;

+ 29 - 2
src/core/ext/filters/client_channel/lb_policy.cc

@@ -20,6 +20,7 @@
 
 #include "src/core/ext/filters/client_channel/lb_policy.h"
 
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
 #include "src/core/lib/iomgr/combiner.h"
 
 grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
@@ -27,11 +28,37 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
 
 namespace grpc_core {
 
-LoadBalancingPolicy::LoadBalancingPolicy(const Args& args)
+grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig(
+    const grpc_json* lb_config_array) {
+  if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) {
+    return nullptr;
+  }
+  // Find the first LB policy that this client supports.
+  for (const grpc_json* lb_config = lb_config_array->child;
+       lb_config != nullptr; lb_config = lb_config->next) {
+    if (lb_config->type != GRPC_JSON_OBJECT) return nullptr;
+    grpc_json* policy = nullptr;
+    for (grpc_json* field = lb_config->child; field != nullptr;
+         field = field->next) {
+      if (field->key == nullptr || field->type != GRPC_JSON_OBJECT)
+        return nullptr;
+      if (policy != nullptr) return nullptr;  // Violate "oneof" type.
+      policy = field;
+    }
+    if (policy == nullptr) return nullptr;
+    // If we support this policy, then select it.
+    if (LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(policy->key)) {
+      return policy;
+    }
+  }
+  return nullptr;
+}
+
+LoadBalancingPolicy::LoadBalancingPolicy(Args args)
     : InternallyRefCounted(&grpc_trace_lb_policy_refcount),
       combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
       client_channel_factory_(args.client_channel_factory),
-      subchannel_pool_(*args.subchannel_pool),
+      subchannel_pool_(std::move(args.subchannel_pool)),
       interested_parties_(grpc_pollset_set_create()),
       request_reresolution_(nullptr) {}
 

+ 10 - 6
src/core/ext/filters/client_channel/lb_policy.h

@@ -55,7 +55,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
     /// Used to create channels and subchannels.
     grpc_client_channel_factory* client_channel_factory = nullptr;
     /// Subchannel pool.
-    RefCountedPtr<SubchannelPoolInterface>* subchannel_pool;
+    RefCountedPtr<SubchannelPoolInterface> subchannel_pool;
     /// Channel args from the resolver.
     /// Note that the LB policy gets the set of addresses from the
     /// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.
@@ -179,6 +179,10 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
         GRPC_ERROR_NONE);
   }
 
+  /// Returns the JSON node of policy (with both policy name and config content)
+  /// given the JSON node of a LoadBalancingConfig array.
+  static grpc_json* ParseLoadBalancingConfig(const grpc_json* lb_config_array);
+
   /// Sets the re-resolution closure to \a request_reresolution.
   void SetReresolutionClosureLocked(grpc_closure* request_reresolution) {
     GPR_ASSERT(request_reresolution_ == nullptr);
@@ -187,10 +191,10 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
 
   grpc_pollset_set* interested_parties() const { return interested_parties_; }
 
-  /// Returns a pointer to the subchannel pool of type
-  /// RefCountedPtr<SubchannelPoolInterface>.
-  RefCountedPtr<SubchannelPoolInterface>* subchannel_pool() {
-    return &subchannel_pool_;
+  // Callers that need their own reference can call the returned
+  // object's Ref() method.
+  SubchannelPoolInterface* subchannel_pool() const {
+    return subchannel_pool_.get();
   }
 
   GRPC_ABSTRACT_BASE_CLASS
@@ -198,7 +202,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
  protected:
   GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
 
-  explicit LoadBalancingPolicy(const Args& args);
+  explicit LoadBalancingPolicy(Args args);
   virtual ~LoadBalancingPolicy();
 
   grpc_combiner* combiner() const { return combiner_; }

+ 10 - 10
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc

@@ -125,7 +125,7 @@ constexpr char kGrpclb[] = "grpclb";
 
 class GrpcLb : public LoadBalancingPolicy {
  public:
-  explicit GrpcLb(const Args& args);
+  explicit GrpcLb(Args args);
 
   const char* name() const override { return kGrpclb; }
 
@@ -273,7 +273,7 @@ class GrpcLb : public LoadBalancingPolicy {
   // Methods for dealing with the RR policy.
   void CreateOrUpdateRoundRobinPolicyLocked();
   grpc_channel_args* CreateRoundRobinPolicyArgsLocked();
-  void CreateRoundRobinPolicyLocked(const Args& args);
+  void CreateRoundRobinPolicyLocked(Args args);
   bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
                                       grpc_error** error);
   void UpdateConnectivityStateFromRoundRobinPolicyLocked(
@@ -973,8 +973,8 @@ grpc_channel_args* BuildBalancerChannelArgs(
 // ctor and dtor
 //
 
-GrpcLb::GrpcLb(const LoadBalancingPolicy::Args& args)
-    : LoadBalancingPolicy(args),
+GrpcLb::GrpcLb(LoadBalancingPolicy::Args args)
+    : LoadBalancingPolicy(std::move(args)),
       response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()),
       lb_call_backoff_(
           BackOff::Options()
@@ -1588,10 +1588,10 @@ bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp,
   return pick_done;
 }
 
-void GrpcLb::CreateRoundRobinPolicyLocked(const Args& args) {
+void GrpcLb::CreateRoundRobinPolicyLocked(Args args) {
   GPR_ASSERT(rr_policy_ == nullptr);
   rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
-      "round_robin", args);
+      "round_robin", std::move(args));
   if (GPR_UNLIKELY(rr_policy_ == nullptr)) {
     gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy",
             this);
@@ -1693,8 +1693,8 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
     lb_policy_args.combiner = combiner();
     lb_policy_args.client_channel_factory = client_channel_factory();
     lb_policy_args.args = args;
-    lb_policy_args.subchannel_pool = subchannel_pool();
-    CreateRoundRobinPolicyLocked(lb_policy_args);
+    lb_policy_args.subchannel_pool = subchannel_pool()->Ref();
+    CreateRoundRobinPolicyLocked(std::move(lb_policy_args));
   }
   grpc_channel_args_destroy(args);
 }
@@ -1802,7 +1802,7 @@ void GrpcLb::OnRoundRobinConnectivityChangedLocked(void* arg,
 class GrpcLbFactory : public LoadBalancingPolicyFactory {
  public:
   OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const LoadBalancingPolicy::Args& args) const override {
+      LoadBalancingPolicy::Args args) const override {
     /* Count the number of gRPC-LB addresses. There must be at least one. */
     const ServerAddressList* addresses =
         FindServerAddressListChannelArg(args.args);
@@ -1815,7 +1815,7 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
       }
     }
     if (!found_balancer) return nullptr;
-    return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(args));
+    return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(std::move(args)));
   }
 
   const char* name() const override { return kGrpclb; }

+ 5 - 5
src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc

@@ -46,7 +46,7 @@ constexpr char kPickFirst[] = "pick_first";
 
 class PickFirst : public LoadBalancingPolicy {
  public:
-  explicit PickFirst(const Args& args);
+  explicit PickFirst(Args args);
 
   const char* name() const override { return kPickFirst; }
 
@@ -79,7 +79,7 @@ class PickFirst : public LoadBalancingPolicy {
     PickFirstSubchannelData(
         SubchannelList<PickFirstSubchannelList, PickFirstSubchannelData>*
             subchannel_list,
-        const ServerAddress& address, grpc_subchannel* subchannel,
+        const ServerAddress& address, Subchannel* subchannel,
         grpc_combiner* combiner)
         : SubchannelData(subchannel_list, address, subchannel, combiner) {}
 
@@ -154,7 +154,7 @@ class PickFirst : public LoadBalancingPolicy {
   channelz::ChildRefsList child_channels_;
 };
 
-PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
+PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
   GPR_ASSERT(args.client_channel_factory != nullptr);
   gpr_mu_init(&child_refs_mu_);
   grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
@@ -619,8 +619,8 @@ void PickFirst::PickFirstSubchannelData::
 class PickFirstFactory : public LoadBalancingPolicyFactory {
  public:
   OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const LoadBalancingPolicy::Args& args) const override {
-    return OrphanablePtr<LoadBalancingPolicy>(New<PickFirst>(args));
+      LoadBalancingPolicy::Args args) const override {
+    return OrphanablePtr<LoadBalancingPolicy>(New<PickFirst>(std::move(args)));
   }
 
   const char* name() const override { return kPickFirst; }

+ 5 - 5
src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc

@@ -56,7 +56,7 @@ constexpr char kRoundRobin[] = "round_robin";
 
 class RoundRobin : public LoadBalancingPolicy {
  public:
-  explicit RoundRobin(const Args& args);
+  explicit RoundRobin(Args args);
 
   const char* name() const override { return kRoundRobin; }
 
@@ -94,7 +94,7 @@ class RoundRobin : public LoadBalancingPolicy {
     RoundRobinSubchannelData(
         SubchannelList<RoundRobinSubchannelList, RoundRobinSubchannelData>*
             subchannel_list,
-        const ServerAddress& address, grpc_subchannel* subchannel,
+        const ServerAddress& address, Subchannel* subchannel,
         grpc_combiner* combiner)
         : SubchannelData(subchannel_list, address, subchannel, combiner) {}
 
@@ -210,7 +210,7 @@ class RoundRobin : public LoadBalancingPolicy {
   channelz::ChildRefsList child_channels_;
 };
 
-RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
+RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
   GPR_ASSERT(args.client_channel_factory != nullptr);
   gpr_mu_init(&child_refs_mu_);
   grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
@@ -697,8 +697,8 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args,
 class RoundRobinFactory : public LoadBalancingPolicyFactory {
  public:
   OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const LoadBalancingPolicy::Args& args) const override {
-    return OrphanablePtr<LoadBalancingPolicy>(New<RoundRobin>(args));
+      LoadBalancingPolicy::Args args) const override {
+    return OrphanablePtr<LoadBalancingPolicy>(New<RoundRobin>(std::move(args)));
   }
 
   const char* name() const override { return kRoundRobin; }

+ 20 - 21
src/core/ext/filters/client_channel/lb_policy/subchannel_list.h

@@ -88,7 +88,7 @@ class SubchannelData {
   }
 
   // Returns a pointer to the subchannel.
-  grpc_subchannel* subchannel() const { return subchannel_; }
+  Subchannel* subchannel() const { return subchannel_; }
 
   // Returns the connected subchannel.  Will be null if the subchannel
   // is not connected.
@@ -103,8 +103,8 @@ class SubchannelData {
   // ProcessConnectivityChangeLocked()).
   grpc_connectivity_state CheckConnectivityStateLocked(grpc_error** error) {
     GPR_ASSERT(!connectivity_notification_pending_);
-    pending_connectivity_state_unsafe_ = grpc_subchannel_check_connectivity(
-        subchannel(), error, subchannel_list_->inhibit_health_checking());
+    pending_connectivity_state_unsafe_ = subchannel()->CheckConnectivity(
+        error, subchannel_list_->inhibit_health_checking());
     UpdateConnectedSubchannelLocked();
     return pending_connectivity_state_unsafe_;
   }
@@ -142,7 +142,7 @@ class SubchannelData {
  protected:
   SubchannelData(
       SubchannelList<SubchannelListType, SubchannelDataType>* subchannel_list,
-      const ServerAddress& address, grpc_subchannel* subchannel,
+      const ServerAddress& address, Subchannel* subchannel,
       grpc_combiner* combiner);
 
   virtual ~SubchannelData();
@@ -170,7 +170,7 @@ class SubchannelData {
   SubchannelList<SubchannelListType, SubchannelDataType>* subchannel_list_;
 
   // The subchannel and connected subchannel.
-  grpc_subchannel* subchannel_;
+  Subchannel* subchannel_;
   RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
 
   // Notification that connectivity has changed on subchannel.
@@ -203,7 +203,7 @@ class SubchannelList : public InternallyRefCounted<SubchannelListType> {
     for (size_t i = 0; i < subchannels_.size(); ++i) {
       if (subchannels_[i].subchannel() != nullptr) {
         grpc_core::channelz::SubchannelNode* subchannel_node =
-            grpc_subchannel_get_channelz_node(subchannels_[i].subchannel());
+            subchannels_[i].subchannel()->channelz_node();
         if (subchannel_node != nullptr) {
           refs_list->push_back(subchannel_node->uuid());
         }
@@ -276,7 +276,7 @@ class SubchannelList : public InternallyRefCounted<SubchannelListType> {
 template <typename SubchannelListType, typename SubchannelDataType>
 SubchannelData<SubchannelListType, SubchannelDataType>::SubchannelData(
     SubchannelList<SubchannelListType, SubchannelDataType>* subchannel_list,
-    const ServerAddress& address, grpc_subchannel* subchannel,
+    const ServerAddress& address, Subchannel* subchannel,
     grpc_combiner* combiner)
     : subchannel_list_(subchannel_list),
       subchannel_(subchannel),
@@ -317,7 +317,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
 void SubchannelData<SubchannelListType,
                     SubchannelDataType>::ResetBackoffLocked() {
   if (subchannel_ != nullptr) {
-    grpc_subchannel_reset_backoff(subchannel_);
+    subchannel_->ResetBackoff();
   }
 }
 
@@ -337,8 +337,8 @@ void SubchannelData<SubchannelListType,
   GPR_ASSERT(!connectivity_notification_pending_);
   connectivity_notification_pending_ = true;
   subchannel_list()->Ref(DEBUG_LOCATION, "connectivity_watch").release();
-  grpc_subchannel_notify_on_state_change(
-      subchannel_, subchannel_list_->policy()->interested_parties(),
+  subchannel_->NotifyOnStateChange(
+      subchannel_list_->policy()->interested_parties(),
       &pending_connectivity_state_unsafe_, &connectivity_changed_closure_,
       subchannel_list_->inhibit_health_checking());
 }
@@ -357,8 +357,8 @@ void SubchannelData<SubchannelListType,
             grpc_connectivity_state_name(pending_connectivity_state_unsafe_));
   }
   GPR_ASSERT(connectivity_notification_pending_);
-  grpc_subchannel_notify_on_state_change(
-      subchannel_, subchannel_list_->policy()->interested_parties(),
+  subchannel_->NotifyOnStateChange(
+      subchannel_list_->policy()->interested_parties(),
       &pending_connectivity_state_unsafe_, &connectivity_changed_closure_,
       subchannel_list_->inhibit_health_checking());
 }
@@ -391,9 +391,9 @@ void SubchannelData<SubchannelListType, SubchannelDataType>::
             subchannel_, reason);
   }
   GPR_ASSERT(connectivity_notification_pending_);
-  grpc_subchannel_notify_on_state_change(
-      subchannel_, nullptr, nullptr, &connectivity_changed_closure_,
-      subchannel_list_->inhibit_health_checking());
+  subchannel_->NotifyOnStateChange(nullptr, nullptr,
+                                   &connectivity_changed_closure_,
+                                   subchannel_list_->inhibit_health_checking());
 }
 
 template <typename SubchannelListType, typename SubchannelDataType>
@@ -401,8 +401,7 @@ bool SubchannelData<SubchannelListType,
                     SubchannelDataType>::UpdateConnectedSubchannelLocked() {
   // If the subchannel is READY, take a ref to the connected subchannel.
   if (pending_connectivity_state_unsafe_ == GRPC_CHANNEL_READY) {
-    connected_subchannel_ =
-        grpc_subchannel_get_connected_subchannel(subchannel_);
+    connected_subchannel_ = subchannel_->connected_subchannel();
     // If the subchannel became disconnected between the time that READY
     // was reported and the time we got here (e.g., between when a
     // notification callback is scheduled and when it was actually run in
@@ -514,11 +513,11 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
     // policy, which does not use a SubchannelList.
     GPR_ASSERT(!addresses[i].IsBalancer());
     InlinedVector<grpc_arg, 4> args_to_add;
-    args_to_add.emplace_back(SubchannelPoolInterface::CreateChannelArg(
-        policy_->subchannel_pool()->get()));
+    args_to_add.emplace_back(
+        SubchannelPoolInterface::CreateChannelArg(policy_->subchannel_pool()));
     const size_t subchannel_address_arg_index = args_to_add.size();
     args_to_add.emplace_back(
-        grpc_create_subchannel_address_arg(&addresses[i].address()));
+        Subchannel::CreateSubchannelAddressArg(&addresses[i].address()));
     if (addresses[i].args() != nullptr) {
       for (size_t j = 0; j < addresses[i].args()->num_args; ++j) {
         args_to_add.emplace_back(addresses[i].args()->args[j]);
@@ -528,7 +527,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
         &args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove),
         args_to_add.data(), args_to_add.size());
     gpr_free(args_to_add[subchannel_address_arg_index].value.string);
-    grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
+    Subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
         client_channel_factory, new_args);
     grpc_channel_args_destroy(new_args);
     if (subchannel == nullptr) {

+ 80 - 13
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc

@@ -100,6 +100,7 @@
 #include "src/core/lib/surface/call.h"
 #include "src/core/lib/surface/channel.h"
 #include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/transport/service_config.h"
 #include "src/core/lib/transport/static_metadata.h"
 
 #define GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS 1
@@ -118,7 +119,7 @@ constexpr char kXds[] = "xds_experimental";
 
 class XdsLb : public LoadBalancingPolicy {
  public:
-  explicit XdsLb(const Args& args);
+  explicit XdsLb(Args args);
 
   const char* name() const override { return kXds; }
 
@@ -247,6 +248,12 @@ class XdsLb : public LoadBalancingPolicy {
   // Helper function used in ctor and UpdateLocked().
   void ProcessChannelArgsLocked(const grpc_channel_args& args);
 
+  // Parses the xds config given the JSON node of the first child of XdsConfig.
+  // If parsing succeeds, updates \a balancer_name, and updates \a
+  // child_policy_json_dump_ and \a fallback_policy_json_dump_ if they are also
+  // found. Does nothing upon failure.
+  void ParseLbConfig(grpc_json* xds_config_json);
+
   // Methods for dealing with the balancer channel and call.
   void StartPickingLocked();
   void StartBalancerCallLocked();
@@ -265,7 +272,7 @@ class XdsLb : public LoadBalancingPolicy {
   // Methods for dealing with the child policy.
   void CreateOrUpdateChildPolicyLocked();
   grpc_channel_args* CreateChildPolicyArgsLocked();
-  void CreateChildPolicyLocked(const Args& args);
+  void CreateChildPolicyLocked(const char* name, Args args);
   bool PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
                                  grpc_error** error);
   void UpdateConnectivityStateFromChildPolicyLocked(
@@ -278,6 +285,9 @@ class XdsLb : public LoadBalancingPolicy {
   // Who the client is trying to communicate with.
   const char* server_name_ = nullptr;
 
+  // Name of the balancer to connect to.
+  UniquePtr<char> balancer_name_;
+
   // Current channel args from the resolver.
   grpc_channel_args* args_ = nullptr;
 
@@ -318,6 +328,7 @@ class XdsLb : public LoadBalancingPolicy {
 
   // Timeout in milliseconds for before using fallback backend addresses.
   // 0 means not using fallback.
+  UniquePtr<char> fallback_policy_json_string_;
   int lb_fallback_timeout_ms_ = 0;
   // The backend addresses from the resolver.
   UniquePtr<ServerAddressList> fallback_backend_addresses_;
@@ -331,6 +342,7 @@ class XdsLb : public LoadBalancingPolicy {
 
   // The policy to use for the backends.
   OrphanablePtr<LoadBalancingPolicy> child_policy_;
+  UniquePtr<char> child_policy_json_string_;
   grpc_connectivity_state child_connectivity_state_;
   grpc_closure on_child_connectivity_changed_;
   grpc_closure on_child_request_reresolution_;
@@ -892,8 +904,8 @@ grpc_channel_args* BuildBalancerChannelArgs(
 //
 
 // TODO(vishalpowar): Use lb_config in args to configure LB policy.
-XdsLb::XdsLb(const LoadBalancingPolicy::Args& args)
-    : LoadBalancingPolicy(args),
+XdsLb::XdsLb(LoadBalancingPolicy::Args args)
+    : LoadBalancingPolicy(std::move(args)),
       response_generator_(MakeRefCounted<FakeResolverResponseGenerator>()),
       lb_call_backoff_(
           BackOff::Options()
@@ -934,6 +946,8 @@ XdsLb::XdsLb(const LoadBalancingPolicy::Args& args)
   arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
   lb_fallback_timeout_ms_ = grpc_channel_arg_get_integer(
       arg, {GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
+  // Parse the LB config.
+  ParseLbConfig(args.lb_config);
   // Process channel args.
   ProcessChannelArgsLocked(*args.args);
 }
@@ -1184,8 +1198,44 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
   grpc_channel_args_destroy(lb_channel_args);
 }
 
-// TODO(vishalpowar): Use lb_config to configure LB policy.
+void XdsLb::ParseLbConfig(grpc_json* xds_config_json) {
+  const char* balancer_name = nullptr;
+  grpc_json* child_policy = nullptr;
+  grpc_json* fallback_policy = nullptr;
+  for (grpc_json* field = xds_config_json; field != nullptr;
+       field = field->next) {
+    if (field->key == nullptr) return;
+    if (strcmp(field->key, "balancerName") == 0) {
+      if (balancer_name != nullptr) return;  // Duplicate.
+      if (field->type != GRPC_JSON_STRING) return;
+      balancer_name = field->value;
+    } else if (strcmp(field->key, "childPolicy") == 0) {
+      if (child_policy != nullptr) return;  // Duplicate.
+      child_policy = ParseLoadBalancingConfig(field);
+    } else if (strcmp(field->key, "fallbackPolicy") == 0) {
+      if (fallback_policy != nullptr) return;  // Duplicate.
+      fallback_policy = ParseLoadBalancingConfig(field);
+    }
+  }
+  if (balancer_name == nullptr) return;  // Required field.
+  if (child_policy != nullptr) {
+    child_policy_json_string_ =
+        UniquePtr<char>(grpc_json_dump_to_string(child_policy, 0 /* indent */));
+  }
+  if (fallback_policy != nullptr) {
+    fallback_policy_json_string_ = UniquePtr<char>(
+        grpc_json_dump_to_string(fallback_policy, 0 /* indent */));
+  }
+  balancer_name_ = UniquePtr<char>(gpr_strdup(balancer_name));
+}
+
 void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
+  ParseLbConfig(lb_config);
+  // TODO(juanlishen): Pass fallback policy config update after fallback policy
+  // is added.
+  if (balancer_name_ == nullptr) {
+    gpr_log(GPR_ERROR, "[xdslb %p] LB config parsing fails.", this);
+  }
   ProcessChannelArgsLocked(args);
   // Update the existing child policy.
   // Note: We have disabled fallback mode in the code, so this child policy must
@@ -1436,10 +1486,10 @@ bool XdsLb::PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
   return pick_done;
 }
 
-void XdsLb::CreateChildPolicyLocked(const Args& args) {
+void XdsLb::CreateChildPolicyLocked(const char* name, Args args) {
   GPR_ASSERT(child_policy_ == nullptr);
   child_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
-      "round_robin", args);
+      name, std::move(args));
   if (GPR_UNLIKELY(child_policy_ == nullptr)) {
     gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a child policy", this);
     return;
@@ -1512,26 +1562,43 @@ void XdsLb::CreateOrUpdateChildPolicyLocked() {
   if (shutting_down_) return;
   grpc_channel_args* args = CreateChildPolicyArgsLocked();
   GPR_ASSERT(args != nullptr);
+  const char* child_policy_name = nullptr;
+  grpc_json* child_policy_config = nullptr;
+  grpc_json* child_policy_json =
+      grpc_json_parse_string(child_policy_json_string_.get());
+  // TODO(juanlishen): If the child policy is not configured via service config,
+  // use whatever algorithm is specified by the balancer.
+  if (child_policy_json != nullptr) {
+    child_policy_name = child_policy_json->key;
+    child_policy_config = child_policy_json->child;
+  } else {
+    if (grpc_lb_xds_trace.enabled()) {
+      gpr_log(GPR_INFO, "[xdslb %p] No valid child policy LB config", this);
+    }
+    child_policy_name = "round_robin";
+  }
+  // TODO(juanlishen): Switch policy according to child_policy_config->key.
   if (child_policy_ != nullptr) {
     if (grpc_lb_xds_trace.enabled()) {
       gpr_log(GPR_INFO, "[xdslb %p] Updating the child policy %p", this,
               child_policy_.get());
     }
-    // TODO(vishalpowar): Pass the correct LB config.
-    child_policy_->UpdateLocked(*args, nullptr);
+    child_policy_->UpdateLocked(*args, child_policy_config);
   } else {
     LoadBalancingPolicy::Args lb_policy_args;
     lb_policy_args.combiner = combiner();
     lb_policy_args.client_channel_factory = client_channel_factory();
-    lb_policy_args.subchannel_pool = subchannel_pool();
+    lb_policy_args.subchannel_pool = subchannel_pool()->Ref();
     lb_policy_args.args = args;
-    CreateChildPolicyLocked(lb_policy_args);
+    lb_policy_args.lb_config = child_policy_config;
+    CreateChildPolicyLocked(child_policy_name, std::move(lb_policy_args));
     if (grpc_lb_xds_trace.enabled()) {
       gpr_log(GPR_INFO, "[xdslb %p] Created a new child policy %p", this,
               child_policy_.get());
     }
   }
   grpc_channel_args_destroy(args);
+  grpc_json_destroy(child_policy_json);
 }
 
 void XdsLb::OnChildPolicyRequestReresolutionLocked(void* arg,
@@ -1637,7 +1704,7 @@ void XdsLb::OnChildPolicyConnectivityChangedLocked(void* arg,
 class XdsFactory : public LoadBalancingPolicyFactory {
  public:
   OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const LoadBalancingPolicy::Args& args) const override {
+      LoadBalancingPolicy::Args args) const override {
     /* Count the number of gRPC-LB addresses. There must be at least one. */
     const ServerAddressList* addresses =
         FindServerAddressListChannelArg(args.args);
@@ -1650,7 +1717,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
       }
     }
     if (!found_balancer_address) return nullptr;
-    return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(args));
+    return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(std::move(args)));
   }
 
   const char* name() const override { return kXds; }

+ 6 - 1
src/core/ext/filters/client_channel/lb_policy_factory.h

@@ -31,7 +31,12 @@ class LoadBalancingPolicyFactory {
  public:
   /// Returns a new LB policy instance.
   virtual OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const LoadBalancingPolicy::Args& args) const GRPC_ABSTRACT;
+      LoadBalancingPolicy::Args args) const {
+    std::move(args);  // Suppress clang-tidy complaint.
+    // The rest of this is copied from the GRPC_ABSTRACT macro.
+    gpr_log(GPR_ERROR, "Function marked GRPC_ABSTRACT was not implemented");
+    GPR_ASSERT(false);
+  }
 
   /// Returns the LB policy name that this factory provides.
   /// Caller does NOT take ownership of result.

+ 2 - 2
src/core/ext/filters/client_channel/lb_policy_registry.cc

@@ -84,14 +84,14 @@ void LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
 
 OrphanablePtr<LoadBalancingPolicy>
 LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
-    const char* name, const LoadBalancingPolicy::Args& args) {
+    const char* name, LoadBalancingPolicy::Args args) {
   GPR_ASSERT(g_state != nullptr);
   // Find factory.
   LoadBalancingPolicyFactory* factory =
       g_state->GetLoadBalancingPolicyFactory(name);
   if (factory == nullptr) return nullptr;  // Specified name not found.
   // Create policy via factory.
-  return factory->CreateLoadBalancingPolicy(args);
+  return factory->CreateLoadBalancingPolicy(std::move(args));
 }
 
 bool LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(const char* name) {

+ 1 - 1
src/core/ext/filters/client_channel/lb_policy_registry.h

@@ -46,7 +46,7 @@ class LoadBalancingPolicyRegistry {
 
   /// Creates an LB policy of the type specified by \a name.
   static OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
-      const char* name, const LoadBalancingPolicy::Args& args);
+      const char* name, LoadBalancingPolicy::Args args);
 
   /// Returns true if the LB policy factory specified by \a name exists in this
   /// registry.

+ 7 - 7
src/core/ext/filters/client_channel/local_subchannel_pool.cc

@@ -32,11 +32,11 @@ LocalSubchannelPool::~LocalSubchannelPool() {
   grpc_avl_unref(subchannel_map_, nullptr);
 }
 
-grpc_subchannel* LocalSubchannelPool::RegisterSubchannel(
-    SubchannelKey* key, grpc_subchannel* constructed) {
+Subchannel* LocalSubchannelPool::RegisterSubchannel(SubchannelKey* key,
+                                                    Subchannel* constructed) {
   // Check to see if a subchannel already exists.
-  grpc_subchannel* c = static_cast<grpc_subchannel*>(
-      grpc_avl_get(subchannel_map_, key, nullptr));
+  Subchannel* c =
+      static_cast<Subchannel*>(grpc_avl_get(subchannel_map_, key, nullptr));
   if (c != nullptr) {
     // The subchannel already exists. Reuse it.
     c = GRPC_SUBCHANNEL_REF(c, "subchannel_register+reuse");
@@ -54,9 +54,9 @@ void LocalSubchannelPool::UnregisterSubchannel(SubchannelKey* key) {
   subchannel_map_ = grpc_avl_remove(subchannel_map_, key, nullptr);
 }
 
-grpc_subchannel* LocalSubchannelPool::FindSubchannel(SubchannelKey* key) {
-  grpc_subchannel* c = static_cast<grpc_subchannel*>(
-      grpc_avl_get(subchannel_map_, key, nullptr));
+Subchannel* LocalSubchannelPool::FindSubchannel(SubchannelKey* key) {
+  Subchannel* c =
+      static_cast<Subchannel*>(grpc_avl_get(subchannel_map_, key, nullptr));
   return c == nullptr ? c : GRPC_SUBCHANNEL_REF(c, "found_from_pool");
 }
 

+ 3 - 3
src/core/ext/filters/client_channel/local_subchannel_pool.h

@@ -39,10 +39,10 @@ class LocalSubchannelPool final : public SubchannelPoolInterface {
 
   // Implements interface methods.
   // Thread-unsafe. Intended to be invoked within the client_channel combiner.
-  grpc_subchannel* RegisterSubchannel(SubchannelKey* key,
-                                      grpc_subchannel* constructed) override;
+  Subchannel* RegisterSubchannel(SubchannelKey* key,
+                                 Subchannel* constructed) override;
   void UnregisterSubchannel(SubchannelKey* key) override;
-  grpc_subchannel* FindSubchannel(SubchannelKey* key) override;
+  Subchannel* FindSubchannel(SubchannelKey* key) override;
 
  private:
   // The vtable for subchannel operations in an AVL tree.

+ 1 - 1
src/core/ext/filters/client_channel/request_routing.cc

@@ -676,7 +676,7 @@ void RequestRouter::CreateNewLbPolicyLocked(
   LoadBalancingPolicy::Args lb_policy_args;
   lb_policy_args.combiner = combiner_;
   lb_policy_args.client_channel_factory = client_channel_factory_;
-  lb_policy_args.subchannel_pool = &subchannel_pool_;
+  lb_policy_args.subchannel_pool = subchannel_pool_;
   lb_policy_args.args = resolver_result_;
   lb_policy_args.lb_config = lb_config;
   OrphanablePtr<LoadBalancingPolicy> new_lb_policy =

+ 1 - 2
src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc

@@ -478,8 +478,7 @@ static grpc_address_resolver_vtable ares_resolver = {
     grpc_resolve_address_ares, blocking_resolve_address_ares};
 
 static bool should_use_ares(const char* resolver_env) {
-  return resolver_env == nullptr || strlen(resolver_env) == 0 ||
-         gpr_stricmp(resolver_env, "ares") == 0;
+  return resolver_env != nullptr && gpr_stricmp(resolver_env, "ares") == 0;
 }
 
 void grpc_resolver_dns_ares_init() {

+ 2 - 2
src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc

@@ -548,13 +548,13 @@ static grpc_ares_request* grpc_dns_lookup_ares_locked_impl(
       r, name, default_port);
   // Early out if the target is an ipv4 or ipv6 literal.
   if (resolve_as_ip_literal_locked(name, default_port, addrs)) {
-    GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
+    grpc_ares_complete_request_locked(r);
     return r;
   }
   // Early out if the target is localhost and we're on Windows.
   if (grpc_ares_maybe_resolve_localhost_manually_locked(name, default_port,
                                                         addrs)) {
-    GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
+    grpc_ares_complete_request_locked(r);
     return r;
   }
   // Don't query for SRV and TXT records if the target is "localhost", so

+ 7 - 35
src/core/ext/filters/client_channel/resolver_result_parsing.cc

@@ -141,42 +141,14 @@ void ProcessedResolverResult::ParseServiceConfig(
 void ProcessedResolverResult::ParseLbConfigFromServiceConfig(
     const grpc_json* field) {
   if (lb_policy_config_ != nullptr) return;  // Already found.
-  // Find the LB config global parameter.
-  if (field->key == nullptr || strcmp(field->key, "loadBalancingConfig") != 0 ||
-      field->type != GRPC_JSON_ARRAY) {
-    return;  // Not valid lb config array.
+  if (field->key == nullptr || strcmp(field->key, "loadBalancingConfig") != 0) {
+    return;  // Not the LB config global parameter.
   }
-  // Find the first LB policy that this client supports.
-  for (grpc_json* lb_config = field->child; lb_config != nullptr;
-       lb_config = lb_config->next) {
-    if (lb_config->type != GRPC_JSON_OBJECT) return;
-    // Find the policy object.
-    grpc_json* policy = nullptr;
-    for (grpc_json* field = lb_config->child; field != nullptr;
-         field = field->next) {
-      if (field->key == nullptr || strcmp(field->key, "policy") != 0 ||
-          field->type != GRPC_JSON_OBJECT) {
-        return;
-      }
-      if (policy != nullptr) return;  // Duplicate.
-      policy = field;
-    }
-    // Find the specific policy content since the policy object is of type
-    // "oneof".
-    grpc_json* policy_content = nullptr;
-    for (grpc_json* field = policy->child; field != nullptr;
-         field = field->next) {
-      if (field->key == nullptr || field->type != GRPC_JSON_OBJECT) return;
-      if (policy_content != nullptr) return;  // Violate "oneof" type.
-      policy_content = field;
-    }
-    // If we support this policy, then select it.
-    if (grpc_core::LoadBalancingPolicyRegistry::LoadBalancingPolicyExists(
-            policy_content->key)) {
-      lb_policy_name_.reset(gpr_strdup(policy_content->key));
-      lb_policy_config_ = policy_content->child;
-      return;
-    }
+  const grpc_json* policy =
+      LoadBalancingPolicy::ParseLoadBalancingConfig(field);
+  if (policy != nullptr) {
+    lb_policy_name_.reset(gpr_strdup(policy->key));
+    lb_policy_config_ = policy->child;
   }
 }
 

+ 686 - 770
src/core/ext/filters/client_channel/subchannel.cc

@@ -44,7 +44,6 @@
 #include "src/core/lib/gprpp/mutex_lock.h"
 #include "src/core/lib/gprpp/ref_counted_ptr.h"
 #include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/profiling/timers.h"
 #include "src/core/lib/slice/slice_internal.h"
 #include "src/core/lib/surface/channel.h"
@@ -55,153 +54,256 @@
 #include "src/core/lib/transport/status_metadata.h"
 #include "src/core/lib/uri/uri_parser.h"
 
+// Strong and weak refs.
 #define INTERNAL_REF_BITS 16
 #define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1))
 
+// Backoff parameters.
 #define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1
 #define GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER 1.6
 #define GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS 20
 #define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
 #define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
 
-typedef struct external_state_watcher {
-  grpc_subchannel* subchannel;
-  grpc_pollset_set* pollset_set;
-  grpc_closure* notify;
-  grpc_closure closure;
-  struct external_state_watcher* next;
-  struct external_state_watcher* prev;
-} external_state_watcher;
+// Conversion between subchannel call and call stack.
+#define SUBCHANNEL_CALL_TO_CALL_STACK(call) \
+  (grpc_call_stack*)((char*)(call) +        \
+                     GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
+#define CALL_STACK_TO_SUBCHANNEL_CALL(callstack) \
+  (SubchannelCall*)(((char*)(call_stack)) -      \
+                    GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
 
 namespace grpc_core {
 
-class ConnectedSubchannelStateWatcher;
-
-}  // namespace grpc_core
+//
+// ConnectedSubchannel
+//
 
-struct grpc_subchannel {
-  /** The subchannel pool this subchannel is in */
-  grpc_core::RefCountedPtr<grpc_core::SubchannelPoolInterface> subchannel_pool;
+ConnectedSubchannel::ConnectedSubchannel(
+    grpc_channel_stack* channel_stack, const grpc_channel_args* args,
+    RefCountedPtr<channelz::SubchannelNode> channelz_subchannel,
+    intptr_t socket_uuid)
+    : RefCounted<ConnectedSubchannel>(&grpc_trace_stream_refcount),
+      channel_stack_(channel_stack),
+      args_(grpc_channel_args_copy(args)),
+      channelz_subchannel_(std::move(channelz_subchannel)),
+      socket_uuid_(socket_uuid) {}
 
-  grpc_connector* connector;
+ConnectedSubchannel::~ConnectedSubchannel() {
+  grpc_channel_args_destroy(args_);
+  GRPC_CHANNEL_STACK_UNREF(channel_stack_, "connected_subchannel_dtor");
+}
 
-  /** refcount
-      - lower INTERNAL_REF_BITS bits are for internal references:
-        these do not keep the subchannel open.
-      - upper remaining bits are for public references: these do
-        keep the subchannel open */
-  gpr_atm ref_pair;
+void ConnectedSubchannel::NotifyOnStateChange(
+    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
+    grpc_closure* closure) {
+  grpc_transport_op* op = grpc_make_transport_op(nullptr);
+  grpc_channel_element* elem;
+  op->connectivity_state = state;
+  op->on_connectivity_state_change = closure;
+  op->bind_pollset_set = interested_parties;
+  elem = grpc_channel_stack_element(channel_stack_, 0);
+  elem->filter->start_transport_op(elem, op);
+}
 
-  /** channel arguments */
-  grpc_channel_args* args;
+void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
+                               grpc_closure* on_ack) {
+  grpc_transport_op* op = grpc_make_transport_op(nullptr);
+  grpc_channel_element* elem;
+  op->send_ping.on_initiate = on_initiate;
+  op->send_ping.on_ack = on_ack;
+  elem = grpc_channel_stack_element(channel_stack_, 0);
+  elem->filter->start_transport_op(elem, op);
+}
 
-  grpc_core::SubchannelKey* key;
+namespace {
 
-  /** set during connection */
-  grpc_connect_out_args connecting_result;
+void SubchannelCallDestroy(void* arg, grpc_error* error) {
+  GPR_TIMER_SCOPE("subchannel_call_destroy", 0);
+  SubchannelCall* call = static_cast<SubchannelCall*>(arg);
+  grpc_closure* after_call_stack_destroy = call->after_call_stack_destroy();
+  call->~SubchannelCall();
+  // This should be the last step to destroy the subchannel call, because
+  // call->after_call_stack_destroy(), if not null, will free the call arena.
+  grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(call), nullptr,
+                          after_call_stack_destroy);
+}
 
-  /** callback for connection finishing */
-  grpc_closure on_connected;
+}  // namespace
 
-  /** callback for our alarm */
-  grpc_closure on_alarm;
+RefCountedPtr<SubchannelCall> ConnectedSubchannel::CreateCall(
+    const CallArgs& args, grpc_error** error) {
+  const size_t allocation_size =
+      GetInitialCallSizeEstimate(args.parent_data_size);
+  RefCountedPtr<SubchannelCall> call(
+      new (gpr_arena_alloc(args.arena, allocation_size))
+          SubchannelCall(Ref(DEBUG_LOCATION, "subchannel_call"), args));
+  grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call.get());
+  const grpc_call_element_args call_args = {
+      callstk,           /* call_stack */
+      nullptr,           /* server_transport_data */
+      args.context,      /* context */
+      args.path,         /* path */
+      args.start_time,   /* start_time */
+      args.deadline,     /* deadline */
+      args.arena,        /* arena */
+      args.call_combiner /* call_combiner */
+  };
+  *error = grpc_call_stack_init(channel_stack_, 1, SubchannelCallDestroy,
+                                call.get(), &call_args);
+  if (GPR_UNLIKELY(*error != GRPC_ERROR_NONE)) {
+    const char* error_string = grpc_error_string(*error);
+    gpr_log(GPR_ERROR, "error: %s", error_string);
+    return call;
+  }
+  grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
+  if (channelz_subchannel_ != nullptr) {
+    channelz_subchannel_->RecordCallStarted();
+  }
+  return call;
+}
 
-  /** pollset_set tracking who's interested in a connection
-      being setup */
-  grpc_pollset_set* pollset_set;
+size_t ConnectedSubchannel::GetInitialCallSizeEstimate(
+    size_t parent_data_size) const {
+  size_t allocation_size =
+      GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall));
+  if (parent_data_size > 0) {
+    allocation_size +=
+        GPR_ROUND_UP_TO_ALIGNMENT_SIZE(channel_stack_->call_stack_size) +
+        parent_data_size;
+  } else {
+    allocation_size += channel_stack_->call_stack_size;
+  }
+  return allocation_size;
+}
 
-  grpc_core::UniquePtr<char> health_check_service_name;
+//
+// SubchannelCall
+//
 
-  /** mutex protecting remaining elements */
-  gpr_mu mu;
+void SubchannelCall::StartTransportStreamOpBatch(
+    grpc_transport_stream_op_batch* batch) {
+  GPR_TIMER_SCOPE("subchannel_call_process_op", 0);
+  MaybeInterceptRecvTrailingMetadata(batch);
+  grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(this);
+  grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
+  GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
+  top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
+}
 
-  /** active connection, or null */
-  grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel> connected_subchannel;
-  grpc_core::OrphanablePtr<grpc_core::ConnectedSubchannelStateWatcher>
-      connected_subchannel_watcher;
+void* SubchannelCall::GetParentData() {
+  grpc_channel_stack* chanstk = connected_subchannel_->channel_stack();
+  return (char*)this + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
+         GPR_ROUND_UP_TO_ALIGNMENT_SIZE(chanstk->call_stack_size);
+}
 
-  /** have we seen a disconnection? */
-  bool disconnected;
-  /** are we connecting */
-  bool connecting;
+grpc_call_stack* SubchannelCall::GetCallStack() {
+  return SUBCHANNEL_CALL_TO_CALL_STACK(this);
+}
 
-  /** connectivity state tracking */
-  grpc_connectivity_state_tracker state_tracker;
-  grpc_connectivity_state_tracker state_and_health_tracker;
+void SubchannelCall::SetAfterCallStackDestroy(grpc_closure* closure) {
+  GPR_ASSERT(after_call_stack_destroy_ == nullptr);
+  GPR_ASSERT(closure != nullptr);
+  after_call_stack_destroy_ = closure;
+}
 
-  external_state_watcher root_external_state_watcher;
+RefCountedPtr<SubchannelCall> SubchannelCall::Ref() {
+  IncrementRefCount();
+  return RefCountedPtr<SubchannelCall>(this);
+}
 
-  /** backoff state */
-  grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
-  grpc_millis next_attempt_deadline;
-  grpc_millis min_connect_timeout_ms;
+RefCountedPtr<SubchannelCall> SubchannelCall::Ref(
+    const grpc_core::DebugLocation& location, const char* reason) {
+  IncrementRefCount(location, reason);
+  return RefCountedPtr<SubchannelCall>(this);
+}
 
-  /** do we have an active alarm? */
-  bool have_alarm;
-  /** have we started the backoff loop */
-  bool backoff_begun;
-  // reset_backoff() was called while alarm was pending
-  bool retry_immediately;
-  /** our alarm */
-  grpc_timer alarm;
+void SubchannelCall::Unref() {
+  GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
+}
 
-  grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
-      channelz_subchannel;
-};
+void SubchannelCall::Unref(const DebugLocation& location, const char* reason) {
+  GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
+}
 
-struct grpc_subchannel_call {
-  grpc_subchannel_call(grpc_core::ConnectedSubchannel* connection,
-                       const grpc_core::ConnectedSubchannel::CallArgs& args)
-      : connection(connection), deadline(args.deadline) {}
-
-  grpc_core::ConnectedSubchannel* connection;
-  grpc_closure* schedule_closure_after_destroy = nullptr;
-  // state needed to support channelz interception of recv trailing metadata.
-  grpc_closure recv_trailing_metadata_ready;
-  grpc_closure* original_recv_trailing_metadata;
-  grpc_metadata_batch* recv_trailing_metadata = nullptr;
-  grpc_millis deadline;
-};
+void SubchannelCall::MaybeInterceptRecvTrailingMetadata(
+    grpc_transport_stream_op_batch* batch) {
+  // only intercept payloads with recv trailing.
+  if (!batch->recv_trailing_metadata) {
+    return;
+  }
+  // only add interceptor is channelz is enabled.
+  if (connected_subchannel_->channelz_subchannel() == nullptr) {
+    return;
+  }
+  GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
+                    this, grpc_schedule_on_exec_ctx);
+  // save some state needed for the interception callback.
+  GPR_ASSERT(recv_trailing_metadata_ == nullptr);
+  recv_trailing_metadata_ =
+      batch->payload->recv_trailing_metadata.recv_trailing_metadata;
+  original_recv_trailing_metadata_ =
+      batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
+  batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
+      &recv_trailing_metadata_ready_;
+}
 
-static void maybe_start_connecting_locked(grpc_subchannel* c);
+namespace {
 
-static const char* subchannel_connectivity_state_change_string(
-    grpc_connectivity_state state) {
-  switch (state) {
-    case GRPC_CHANNEL_IDLE:
-      return "Subchannel state change to IDLE";
-    case GRPC_CHANNEL_CONNECTING:
-      return "Subchannel state change to CONNECTING";
-    case GRPC_CHANNEL_READY:
-      return "Subchannel state change to READY";
-    case GRPC_CHANNEL_TRANSIENT_FAILURE:
-      return "Subchannel state change to TRANSIENT_FAILURE";
-    case GRPC_CHANNEL_SHUTDOWN:
-      return "Subchannel state change to SHUTDOWN";
+// Sets *status based on the rest of the parameters.
+void GetCallStatus(grpc_status_code* status, grpc_millis deadline,
+                   grpc_metadata_batch* md_batch, grpc_error* error) {
+  if (error != GRPC_ERROR_NONE) {
+    grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr);
+  } else {
+    if (md_batch->idx.named.grpc_status != nullptr) {
+      *status = grpc_get_status_code_from_metadata(
+          md_batch->idx.named.grpc_status->md);
+    } else {
+      *status = GRPC_STATUS_UNKNOWN;
+    }
   }
-  GPR_UNREACHABLE_CODE(return "UNKNOWN");
+  GRPC_ERROR_UNREF(error);
 }
 
-static void set_subchannel_connectivity_state_locked(
-    grpc_subchannel* c, grpc_connectivity_state state, grpc_error* error,
-    const char* reason) {
-  if (c->channelz_subchannel != nullptr) {
-    c->channelz_subchannel->AddTraceEvent(
-        grpc_core::channelz::ChannelTrace::Severity::Info,
-        grpc_slice_from_static_string(
-            subchannel_connectivity_state_change_string(state)));
+}  // namespace
+
+void SubchannelCall::RecvTrailingMetadataReady(void* arg, grpc_error* error) {
+  SubchannelCall* call = static_cast<SubchannelCall*>(arg);
+  GPR_ASSERT(call->recv_trailing_metadata_ != nullptr);
+  grpc_status_code status = GRPC_STATUS_OK;
+  GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_,
+                GRPC_ERROR_REF(error));
+  channelz::SubchannelNode* channelz_subchannel =
+      call->connected_subchannel_->channelz_subchannel();
+  GPR_ASSERT(channelz_subchannel != nullptr);
+  if (status == GRPC_STATUS_OK) {
+    channelz_subchannel->RecordCallSucceeded();
+  } else {
+    channelz_subchannel->RecordCallFailed();
   }
-  grpc_connectivity_state_set(&c->state_tracker, state, error, reason);
+  GRPC_CLOSURE_RUN(call->original_recv_trailing_metadata_,
+                   GRPC_ERROR_REF(error));
 }
 
-namespace grpc_core {
+void SubchannelCall::IncrementRefCount() {
+  GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
+}
 
-class ConnectedSubchannelStateWatcher
+void SubchannelCall::IncrementRefCount(const grpc_core::DebugLocation& location,
+                                       const char* reason) {
+  GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
+}
+
+//
+// Subchannel::ConnectedSubchannelStateWatcher
+//
+
+class Subchannel::ConnectedSubchannelStateWatcher
     : public InternallyRefCounted<ConnectedSubchannelStateWatcher> {
  public:
   // Must be instantiated while holding c->mu.
-  explicit ConnectedSubchannelStateWatcher(grpc_subchannel* c)
-      : subchannel_(c) {
+  explicit ConnectedSubchannelStateWatcher(Subchannel* c) : subchannel_(c) {
     // Steal subchannel ref for connecting.
     GRPC_SUBCHANNEL_WEAK_REF(subchannel_, "state_watcher");
     GRPC_SUBCHANNEL_WEAK_UNREF(subchannel_, "connecting");
@@ -209,15 +311,15 @@ class ConnectedSubchannelStateWatcher
     // Callback uses initial ref to this.
     GRPC_CLOSURE_INIT(&on_connectivity_changed_, OnConnectivityChanged, this,
                       grpc_schedule_on_exec_ctx);
-    c->connected_subchannel->NotifyOnStateChange(c->pollset_set,
-                                                 &pending_connectivity_state_,
-                                                 &on_connectivity_changed_);
+    c->connected_subchannel_->NotifyOnStateChange(c->pollset_set_,
+                                                  &pending_connectivity_state_,
+                                                  &on_connectivity_changed_);
     // Start health check if needed.
     grpc_connectivity_state health_state = GRPC_CHANNEL_READY;
-    if (c->health_check_service_name != nullptr) {
-      health_check_client_ = grpc_core::MakeOrphanable<HealthCheckClient>(
-          c->health_check_service_name.get(), c->connected_subchannel,
-          c->pollset_set, c->channelz_subchannel);
+    if (c->health_check_service_name_ != nullptr) {
+      health_check_client_ = MakeOrphanable<HealthCheckClient>(
+          c->health_check_service_name_.get(), c->connected_subchannel_,
+          c->pollset_set_, c->channelz_node_);
       GRPC_CLOSURE_INIT(&on_health_changed_, OnHealthChanged, this,
                         grpc_schedule_on_exec_ctx);
       Ref().release();  // Ref for health callback tracked manually.
@@ -226,9 +328,9 @@ class ConnectedSubchannelStateWatcher
       health_state = GRPC_CHANNEL_CONNECTING;
     }
     // Report initial state.
-    set_subchannel_connectivity_state_locked(
-        c, GRPC_CHANNEL_READY, GRPC_ERROR_NONE, "subchannel_connected");
-    grpc_connectivity_state_set(&c->state_and_health_tracker, health_state,
+    c->SetConnectivityStateLocked(GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
+                                  "subchannel_connected");
+    grpc_connectivity_state_set(&c->state_and_health_tracker_, health_state,
                                 GRPC_ERROR_NONE, "subchannel_connected");
   }
 
@@ -236,38 +338,39 @@ class ConnectedSubchannelStateWatcher
     GRPC_SUBCHANNEL_WEAK_UNREF(subchannel_, "state_watcher");
   }
 
+  // Must be called while holding subchannel_->mu.
   void Orphan() override { health_check_client_.reset(); }
 
  private:
   static void OnConnectivityChanged(void* arg, grpc_error* error) {
     auto* self = static_cast<ConnectedSubchannelStateWatcher*>(arg);
-    grpc_subchannel* c = self->subchannel_;
+    Subchannel* c = self->subchannel_;
     {
-      MutexLock lock(&c->mu);
+      MutexLock lock(&c->mu_);
       switch (self->pending_connectivity_state_) {
         case GRPC_CHANNEL_TRANSIENT_FAILURE:
         case GRPC_CHANNEL_SHUTDOWN: {
-          if (!c->disconnected && c->connected_subchannel != nullptr) {
+          if (!c->disconnected_ && c->connected_subchannel_ != nullptr) {
             if (grpc_trace_stream_refcount.enabled()) {
               gpr_log(GPR_INFO,
                       "Connected subchannel %p of subchannel %p has gone into "
                       "%s. Attempting to reconnect.",
-                      c->connected_subchannel.get(), c,
+                      c->connected_subchannel_.get(), c,
                       grpc_connectivity_state_name(
                           self->pending_connectivity_state_));
             }
-            c->connected_subchannel.reset();
-            c->connected_subchannel_watcher.reset();
+            c->connected_subchannel_.reset();
+            c->connected_subchannel_watcher_.reset();
             self->last_connectivity_state_ = GRPC_CHANNEL_TRANSIENT_FAILURE;
-            set_subchannel_connectivity_state_locked(
-                c, GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(error),
-                "reflect_child");
-            grpc_connectivity_state_set(&c->state_and_health_tracker,
+            c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
+                                          GRPC_ERROR_REF(error),
+                                          "reflect_child");
+            grpc_connectivity_state_set(&c->state_and_health_tracker_,
                                         GRPC_CHANNEL_TRANSIENT_FAILURE,
                                         GRPC_ERROR_REF(error), "reflect_child");
-            c->backoff_begun = false;
-            c->backoff->Reset();
-            maybe_start_connecting_locked(c);
+            c->backoff_begun_ = false;
+            c->backoff_.Reset();
+            c->MaybeStartConnectingLocked();
           } else {
             self->last_connectivity_state_ = GRPC_CHANNEL_SHUTDOWN;
           }
@@ -280,15 +383,14 @@ class ConnectedSubchannelStateWatcher
           // this watch from.  And a connected subchannel should never go
           // from READY to CONNECTING or IDLE.
           self->last_connectivity_state_ = self->pending_connectivity_state_;
-          set_subchannel_connectivity_state_locked(
-              c, self->pending_connectivity_state_, GRPC_ERROR_REF(error),
-              "reflect_child");
+          c->SetConnectivityStateLocked(self->pending_connectivity_state_,
+                                        GRPC_ERROR_REF(error), "reflect_child");
           if (self->pending_connectivity_state_ != GRPC_CHANNEL_READY) {
-            grpc_connectivity_state_set(&c->state_and_health_tracker,
+            grpc_connectivity_state_set(&c->state_and_health_tracker_,
                                         self->pending_connectivity_state_,
                                         GRPC_ERROR_REF(error), "reflect_child");
           }
-          c->connected_subchannel->NotifyOnStateChange(
+          c->connected_subchannel_->NotifyOnStateChange(
               nullptr, &self->pending_connectivity_state_,
               &self->on_connectivity_changed_);
           self = nullptr;  // So we don't unref below.
@@ -302,178 +404,82 @@ class ConnectedSubchannelStateWatcher
 
   static void OnHealthChanged(void* arg, grpc_error* error) {
     auto* self = static_cast<ConnectedSubchannelStateWatcher*>(arg);
-    if (self->health_state_ == GRPC_CHANNEL_SHUTDOWN) {
-      self->Unref();
-      return;
-    }
-    grpc_subchannel* c = self->subchannel_;
-    MutexLock lock(&c->mu);
-    if (self->last_connectivity_state_ == GRPC_CHANNEL_READY) {
-      grpc_connectivity_state_set(&c->state_and_health_tracker,
-                                  self->health_state_, GRPC_ERROR_REF(error),
-                                  "health_changed");
+    Subchannel* c = self->subchannel_;
+    {
+      MutexLock lock(&c->mu_);
+      if (self->health_state_ != GRPC_CHANNEL_SHUTDOWN) {
+        if (self->last_connectivity_state_ == GRPC_CHANNEL_READY) {
+          grpc_connectivity_state_set(&c->state_and_health_tracker_,
+                                      self->health_state_,
+                                      GRPC_ERROR_REF(error), "health_changed");
+        }
+        self->health_check_client_->NotifyOnHealthChange(
+            &self->health_state_, &self->on_health_changed_);
+        self = nullptr;  // So we don't unref below.
+      }
     }
-    self->health_check_client_->NotifyOnHealthChange(&self->health_state_,
-                                                     &self->on_health_changed_);
+    // Don't unref until we've released the lock, because this might
+    // cause the subchannel (which contains the lock) to be destroyed.
+    if (self != nullptr) self->Unref();
   }
 
-  grpc_subchannel* subchannel_;
+  Subchannel* subchannel_;
   grpc_closure on_connectivity_changed_;
   grpc_connectivity_state pending_connectivity_state_ = GRPC_CHANNEL_READY;
   grpc_connectivity_state last_connectivity_state_ = GRPC_CHANNEL_READY;
-  grpc_core::OrphanablePtr<grpc_core::HealthCheckClient> health_check_client_;
+  OrphanablePtr<HealthCheckClient> health_check_client_;
   grpc_closure on_health_changed_;
   grpc_connectivity_state health_state_ = GRPC_CHANNEL_CONNECTING;
 };
 
-}  // namespace grpc_core
-
-#define SUBCHANNEL_CALL_TO_CALL_STACK(call)                          \
-  (grpc_call_stack*)((char*)(call) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
-                                         sizeof(grpc_subchannel_call)))
-#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack)           \
-  (grpc_subchannel_call*)(((char*)(call_stack)) -         \
-                          GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
-                              sizeof(grpc_subchannel_call)))
-
-static void on_subchannel_connected(void* subchannel, grpc_error* error);
-
-#ifndef NDEBUG
-#define REF_REASON reason
-#define REF_MUTATE_EXTRA_ARGS \
-  GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char* purpose
-#define REF_MUTATE_PURPOSE(x) , file, line, reason, x
-#else
-#define REF_REASON ""
-#define REF_MUTATE_EXTRA_ARGS
-#define REF_MUTATE_PURPOSE(x)
-#endif
-
-/*
- * connection implementation
- */
-
-static void connection_destroy(void* arg, grpc_error* error) {
-  grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
-  grpc_channel_stack_destroy(stk);
-  gpr_free(stk);
-}
-
-/*
- * grpc_subchannel implementation
- */
-
-static void subchannel_destroy(void* arg, grpc_error* error) {
-  grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
-  if (c->channelz_subchannel != nullptr) {
-    c->channelz_subchannel->AddTraceEvent(
-        grpc_core::channelz::ChannelTrace::Severity::Info,
-        grpc_slice_from_static_string("Subchannel destroyed"));
-    c->channelz_subchannel->MarkSubchannelDestroyed();
-    c->channelz_subchannel.reset();
-  }
-  c->health_check_service_name.reset();
-  grpc_channel_args_destroy(c->args);
-  grpc_connectivity_state_destroy(&c->state_tracker);
-  grpc_connectivity_state_destroy(&c->state_and_health_tracker);
-  grpc_connector_unref(c->connector);
-  grpc_pollset_set_destroy(c->pollset_set);
-  grpc_core::Delete(c->key);
-  gpr_mu_destroy(&c->mu);
-  gpr_free(c);
-}
+//
+// Subchannel::ExternalStateWatcher
+//
 
-static gpr_atm ref_mutate(grpc_subchannel* c, gpr_atm delta,
-                          int barrier REF_MUTATE_EXTRA_ARGS) {
-  gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
-                            : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
-#ifndef NDEBUG
-  if (grpc_trace_stream_refcount.enabled()) {
-    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
-            "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c,
-            purpose, old_val, old_val + delta, reason);
+struct Subchannel::ExternalStateWatcher {
+  ExternalStateWatcher(Subchannel* subchannel, grpc_pollset_set* pollset_set,
+                       grpc_closure* notify)
+      : subchannel(subchannel), pollset_set(pollset_set), notify(notify) {
+    GRPC_SUBCHANNEL_WEAK_REF(subchannel, "external_state_watcher+init");
+    GRPC_CLOSURE_INIT(&on_state_changed, OnStateChanged, this,
+                      grpc_schedule_on_exec_ctx);
   }
-#endif
-  return old_val;
-}
-
-grpc_subchannel* grpc_subchannel_ref(
-    grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  gpr_atm old_refs;
-  old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
-                        0 REF_MUTATE_PURPOSE("STRONG_REF"));
-  GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0);
-  return c;
-}
 
-grpc_subchannel* grpc_subchannel_weak_ref(
-    grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  gpr_atm old_refs;
-  old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
-  GPR_ASSERT(old_refs != 0);
-  return c;
-}
-
-grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
-    grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  if (!c) return nullptr;
-  for (;;) {
-    gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair);
-    if (old_refs >= (1 << INTERNAL_REF_BITS)) {
-      gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS);
-      if (gpr_atm_rel_cas(&c->ref_pair, old_refs, new_refs)) {
-        return c;
-      }
-    } else {
-      return nullptr;
+  static void OnStateChanged(void* arg, grpc_error* error) {
+    ExternalStateWatcher* w = static_cast<ExternalStateWatcher*>(arg);
+    grpc_closure* follow_up = w->notify;
+    if (w->pollset_set != nullptr) {
+      grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set_,
+                                       w->pollset_set);
     }
+    gpr_mu_lock(&w->subchannel->mu_);
+    if (w->subchannel->external_state_watcher_list_ == w) {
+      w->subchannel->external_state_watcher_list_ = w->next;
+    }
+    if (w->next != nullptr) w->next->prev = w->prev;
+    if (w->prev != nullptr) w->prev->next = w->next;
+    gpr_mu_unlock(&w->subchannel->mu_);
+    GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher+done");
+    Delete(w);
+    GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
   }
-}
 
-static void disconnect(grpc_subchannel* c) {
-  // The subchannel_pool is only used once here in this subchannel, so the
-  // access can be outside of the lock.
-  if (c->subchannel_pool != nullptr) {
-    c->subchannel_pool->UnregisterSubchannel(c->key);
-    c->subchannel_pool.reset();
-  }
-  gpr_mu_lock(&c->mu);
-  GPR_ASSERT(!c->disconnected);
-  c->disconnected = true;
-  grpc_connector_shutdown(c->connector, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
-                                            "Subchannel disconnected"));
-  c->connected_subchannel.reset();
-  c->connected_subchannel_watcher.reset();
-  gpr_mu_unlock(&c->mu);
-}
+  Subchannel* subchannel;
+  grpc_pollset_set* pollset_set;
+  grpc_closure* notify;
+  grpc_closure on_state_changed;
+  ExternalStateWatcher* next = nullptr;
+  ExternalStateWatcher* prev = nullptr;
+};
 
-void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  gpr_atm old_refs;
-  // add a weak ref and subtract a strong ref (atomically)
-  old_refs = ref_mutate(
-      c, static_cast<gpr_atm>(1) - static_cast<gpr_atm>(1 << INTERNAL_REF_BITS),
-      1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
-  if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
-    disconnect(c);
-  }
-  GRPC_SUBCHANNEL_WEAK_UNREF(c, "strong-unref");
-}
+//
+// Subchannel
+//
 
-void grpc_subchannel_weak_unref(
-    grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  gpr_atm old_refs;
-  old_refs = ref_mutate(c, -static_cast<gpr_atm>(1),
-                        1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
-  if (old_refs == 1) {
-    GRPC_CLOSURE_SCHED(
-        GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
-        GRPC_ERROR_NONE);
-  }
-}
+namespace {
 
-static void parse_args_for_backoff_values(
-    const grpc_channel_args* args, grpc_core::BackOff::Options* backoff_options,
-    grpc_millis* min_connect_timeout_ms) {
+BackOff::Options ParseArgsForBackoffValues(
+    const grpc_channel_args* args, grpc_millis* min_connect_timeout_ms) {
   grpc_millis initial_backoff_ms =
       GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000;
   *min_connect_timeout_ms =
@@ -510,7 +516,8 @@ static void parse_args_for_backoff_values(
       }
     }
   }
-  backoff_options->set_initial_backoff(initial_backoff_ms)
+  return BackOff::Options()
+      .set_initial_backoff(initial_backoff_ms)
       .set_multiplier(fixed_reconnect_backoff
                           ? 1.0
                           : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER)
@@ -519,9 +526,6 @@ static void parse_args_for_backoff_values(
       .set_max_backoff(max_backoff_ms);
 }
 
-namespace grpc_core {
-namespace {
-
 struct HealthCheckParams {
   UniquePtr<char> service_name;
 
@@ -542,31 +546,19 @@ struct HealthCheckParams {
 };
 
 }  // namespace
-}  // namespace grpc_core
 
-grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
-                                        const grpc_channel_args* args) {
-  grpc_core::SubchannelKey* key =
-      grpc_core::New<grpc_core::SubchannelKey>(args);
-  grpc_core::SubchannelPoolInterface* subchannel_pool =
-      grpc_core::SubchannelPoolInterface::GetSubchannelPoolFromChannelArgs(
-          args);
-  GPR_ASSERT(subchannel_pool != nullptr);
-  grpc_subchannel* c = subchannel_pool->FindSubchannel(key);
-  if (c != nullptr) {
-    grpc_core::Delete(key);
-    return c;
-  }
+Subchannel::Subchannel(SubchannelKey* key, grpc_connector* connector,
+                       const grpc_channel_args* args)
+    : key_(key),
+      connector_(connector),
+      backoff_(ParseArgsForBackoffValues(args, &min_connect_timeout_ms_)) {
   GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
-  c = static_cast<grpc_subchannel*>(gpr_zalloc(sizeof(*c)));
-  c->key = key;
-  gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
-  c->connector = connector;
-  grpc_connector_ref(c->connector);
-  c->pollset_set = grpc_pollset_set_create();
+  gpr_atm_no_barrier_store(&ref_pair_, 1 << INTERNAL_REF_BITS);
+  grpc_connector_ref(connector_);
+  pollset_set_ = grpc_pollset_set_create();
   grpc_resolved_address* addr =
       static_cast<grpc_resolved_address*>(gpr_malloc(sizeof(*addr)));
-  grpc_get_subchannel_address_arg(args, addr);
+  GetAddressFromSubchannelAddressArg(args, addr);
   grpc_resolved_address* new_address = nullptr;
   grpc_channel_args* new_args = nullptr;
   if (grpc_proxy_mappers_map_address(addr, args, &new_address, &new_args)) {
@@ -575,568 +567,492 @@ grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
     addr = new_address;
   }
   static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS};
-  grpc_arg new_arg = grpc_create_subchannel_address_arg(addr);
+  grpc_arg new_arg = CreateSubchannelAddressArg(addr);
   gpr_free(addr);
-  c->args = grpc_channel_args_copy_and_add_and_remove(
+  args_ = grpc_channel_args_copy_and_add_and_remove(
       new_args != nullptr ? new_args : args, keys_to_remove,
       GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1);
   gpr_free(new_arg.value.string);
   if (new_args != nullptr) grpc_channel_args_destroy(new_args);
-  c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
-      &c->root_external_state_watcher;
-  GRPC_CLOSURE_INIT(&c->on_connected, on_subchannel_connected, c,
+  GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this,
                     grpc_schedule_on_exec_ctx);
-  grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
+  grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
                                "subchannel");
-  grpc_connectivity_state_init(&c->state_and_health_tracker, GRPC_CHANNEL_IDLE,
+  grpc_connectivity_state_init(&state_and_health_tracker_, GRPC_CHANNEL_IDLE,
                                "subchannel");
-  grpc_core::BackOff::Options backoff_options;
-  parse_args_for_backoff_values(args, &backoff_options,
-                                &c->min_connect_timeout_ms);
-  c->backoff.Init(backoff_options);
-  gpr_mu_init(&c->mu);
-
+  gpr_mu_init(&mu_);
   // Check whether we should enable health checking.
   const char* service_config_json = grpc_channel_arg_get_string(
-      grpc_channel_args_find(c->args, GRPC_ARG_SERVICE_CONFIG));
+      grpc_channel_args_find(args_, GRPC_ARG_SERVICE_CONFIG));
   if (service_config_json != nullptr) {
-    grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
-        grpc_core::ServiceConfig::Create(service_config_json);
+    UniquePtr<ServiceConfig> service_config =
+        ServiceConfig::Create(service_config_json);
     if (service_config != nullptr) {
-      grpc_core::HealthCheckParams params;
-      service_config->ParseGlobalParams(grpc_core::HealthCheckParams::Parse,
-                                        &params);
-      c->health_check_service_name = std::move(params.service_name);
+      HealthCheckParams params;
+      service_config->ParseGlobalParams(HealthCheckParams::Parse, &params);
+      health_check_service_name_ = std::move(params.service_name);
     }
   }
-
-  const grpc_arg* arg =
-      grpc_channel_args_find(c->args, GRPC_ARG_ENABLE_CHANNELZ);
-  bool channelz_enabled =
+  const grpc_arg* arg = grpc_channel_args_find(args_, GRPC_ARG_ENABLE_CHANNELZ);
+  const bool channelz_enabled =
       grpc_channel_arg_get_bool(arg, GRPC_ENABLE_CHANNELZ_DEFAULT);
   arg = grpc_channel_args_find(
-      c->args, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE);
+      args_, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE);
   const grpc_integer_options options = {
       GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX};
   size_t channel_tracer_max_memory =
       (size_t)grpc_channel_arg_get_integer(arg, options);
   if (channelz_enabled) {
-    c->channelz_subchannel =
-        grpc_core::MakeRefCounted<grpc_core::channelz::SubchannelNode>(
-            c, channel_tracer_max_memory);
-    c->channelz_subchannel->AddTraceEvent(
-        grpc_core::channelz::ChannelTrace::Severity::Info,
-        grpc_slice_from_static_string("Subchannel created"));
+    channelz_node_ = MakeRefCounted<channelz::SubchannelNode>(
+        this, channel_tracer_max_memory);
+    channelz_node_->AddTraceEvent(
+        channelz::ChannelTrace::Severity::Info,
+        grpc_slice_from_static_string("subchannel created"));
+  }
+}
+
+Subchannel::~Subchannel() {
+  if (channelz_node_ != nullptr) {
+    channelz_node_->AddTraceEvent(
+        channelz::ChannelTrace::Severity::Info,
+        grpc_slice_from_static_string("Subchannel destroyed"));
+    channelz_node_->MarkSubchannelDestroyed();
+  }
+  grpc_channel_args_destroy(args_);
+  grpc_connectivity_state_destroy(&state_tracker_);
+  grpc_connectivity_state_destroy(&state_and_health_tracker_);
+  grpc_connector_unref(connector_);
+  grpc_pollset_set_destroy(pollset_set_);
+  Delete(key_);
+  gpr_mu_destroy(&mu_);
+}
+
+Subchannel* Subchannel::Create(grpc_connector* connector,
+                               const grpc_channel_args* args) {
+  SubchannelKey* key = New<SubchannelKey>(args);
+  SubchannelPoolInterface* subchannel_pool =
+      SubchannelPoolInterface::GetSubchannelPoolFromChannelArgs(args);
+  GPR_ASSERT(subchannel_pool != nullptr);
+  Subchannel* c = subchannel_pool->FindSubchannel(key);
+  if (c != nullptr) {
+    Delete(key);
+    return c;
   }
+  c = New<Subchannel>(key, connector, args);
   // Try to register the subchannel before setting the subchannel pool.
   // Otherwise, in case of a registration race, unreffing c in
-  // RegisterSubchannel() will cause c to be tried to be unregistered, while its
-  // key maps to a different subchannel.
-  grpc_subchannel* registered = subchannel_pool->RegisterSubchannel(key, c);
-  if (registered == c) c->subchannel_pool = subchannel_pool->Ref();
+  // RegisterSubchannel() will cause c to be tried to be unregistered, while
+  // its key maps to a different subchannel.
+  Subchannel* registered = subchannel_pool->RegisterSubchannel(key, c);
+  if (registered == c) c->subchannel_pool_ = subchannel_pool->Ref();
   return registered;
 }
 
-grpc_core::channelz::SubchannelNode* grpc_subchannel_get_channelz_node(
-    grpc_subchannel* subchannel) {
-  return subchannel->channelz_subchannel.get();
+Subchannel* Subchannel::Ref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  gpr_atm old_refs;
+  old_refs = RefMutate((1 << INTERNAL_REF_BITS),
+                       0 GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE("STRONG_REF"));
+  GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0);
+  return this;
 }
 
-intptr_t grpc_subchannel_get_child_socket_uuid(grpc_subchannel* subchannel) {
-  if (subchannel->connected_subchannel != nullptr) {
-    return subchannel->connected_subchannel->socket_uuid();
-  } else {
-    return 0;
+void Subchannel::Unref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  gpr_atm old_refs;
+  // add a weak ref and subtract a strong ref (atomically)
+  old_refs = RefMutate(
+      static_cast<gpr_atm>(1) - static_cast<gpr_atm>(1 << INTERNAL_REF_BITS),
+      1 GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE("STRONG_UNREF"));
+  if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
+    Disconnect();
   }
+  GRPC_SUBCHANNEL_WEAK_UNREF(this, "strong-unref");
 }
 
-static void continue_connect_locked(grpc_subchannel* c) {
-  grpc_connect_in_args args;
-  args.interested_parties = c->pollset_set;
-  const grpc_millis min_deadline =
-      c->min_connect_timeout_ms + grpc_core::ExecCtx::Get()->Now();
-  c->next_attempt_deadline = c->backoff->NextAttemptTime();
-  args.deadline = std::max(c->next_attempt_deadline, min_deadline);
-  args.channel_args = c->args;
-  set_subchannel_connectivity_state_locked(c, GRPC_CHANNEL_CONNECTING,
-                                           GRPC_ERROR_NONE, "connecting");
-  grpc_connectivity_state_set(&c->state_and_health_tracker,
-                              GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
-                              "connecting");
-  grpc_connector_connect(c->connector, &args, &c->connecting_result,
-                         &c->on_connected);
+Subchannel* Subchannel::WeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  gpr_atm old_refs;
+  old_refs = RefMutate(1, 0 GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE("WEAK_REF"));
+  GPR_ASSERT(old_refs != 0);
+  return this;
 }
 
-grpc_connectivity_state grpc_subchannel_check_connectivity(
-    grpc_subchannel* c, grpc_error** error, bool inhibit_health_checks) {
-  gpr_mu_lock(&c->mu);
-  grpc_connectivity_state_tracker* tracker =
-      inhibit_health_checks ? &c->state_tracker : &c->state_and_health_tracker;
-  grpc_connectivity_state state = grpc_connectivity_state_get(tracker, error);
-  gpr_mu_unlock(&c->mu);
-  return state;
+namespace {
+
+void subchannel_destroy(void* arg, grpc_error* error) {
+  Subchannel* self = static_cast<Subchannel*>(arg);
+  Delete(self);
 }
 
-static void on_external_state_watcher_done(void* arg, grpc_error* error) {
-  external_state_watcher* w = static_cast<external_state_watcher*>(arg);
-  grpc_closure* follow_up = w->notify;
-  if (w->pollset_set != nullptr) {
-    grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set,
-                                     w->pollset_set);
+}  // namespace
+
+void Subchannel::WeakUnref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  gpr_atm old_refs;
+  old_refs = RefMutate(-static_cast<gpr_atm>(1),
+                       1 GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE("WEAK_UNREF"));
+  if (old_refs == 1) {
+    GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(subchannel_destroy, this,
+                                           grpc_schedule_on_exec_ctx),
+                       GRPC_ERROR_NONE);
   }
-  gpr_mu_lock(&w->subchannel->mu);
-  w->next->prev = w->prev;
-  w->prev->next = w->next;
-  gpr_mu_unlock(&w->subchannel->mu);
-  GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher");
-  gpr_free(w);
-  GRPC_CLOSURE_SCHED(follow_up, GRPC_ERROR_REF(error));
 }
 
-static void on_alarm(void* arg, grpc_error* error) {
-  grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
-  gpr_mu_lock(&c->mu);
-  c->have_alarm = false;
-  if (c->disconnected) {
-    error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
-                                                             &error, 1);
-  } else if (c->retry_immediately) {
-    c->retry_immediately = false;
-    error = GRPC_ERROR_NONE;
-  } else {
-    GRPC_ERROR_REF(error);
-  }
-  if (error == GRPC_ERROR_NONE) {
-    gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
-    continue_connect_locked(c);
-    gpr_mu_unlock(&c->mu);
-  } else {
-    gpr_mu_unlock(&c->mu);
-    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
+Subchannel* Subchannel::RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  for (;;) {
+    gpr_atm old_refs = gpr_atm_acq_load(&ref_pair_);
+    if (old_refs >= (1 << INTERNAL_REF_BITS)) {
+      gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS);
+      if (gpr_atm_rel_cas(&ref_pair_, old_refs, new_refs)) {
+        return this;
+      }
+    } else {
+      return nullptr;
+    }
   }
-  GRPC_ERROR_UNREF(error);
 }
 
-static void maybe_start_connecting_locked(grpc_subchannel* c) {
-  if (c->disconnected) {
-    /* Don't try to connect if we're already disconnected */
-    return;
-  }
-  if (c->connecting) {
-    /* Already connecting: don't restart */
-    return;
-  }
-  if (c->connected_subchannel != nullptr) {
-    /* Already connected: don't restart */
-    return;
-  }
-  if (!grpc_connectivity_state_has_watchers(&c->state_tracker) &&
-      !grpc_connectivity_state_has_watchers(&c->state_and_health_tracker)) {
-    /* Nobody is interested in connecting: so don't just yet */
-    return;
-  }
-  c->connecting = true;
-  GRPC_SUBCHANNEL_WEAK_REF(c, "connecting");
-  if (!c->backoff_begun) {
-    c->backoff_begun = true;
-    continue_connect_locked(c);
+intptr_t Subchannel::GetChildSocketUuid() {
+  if (connected_subchannel_ != nullptr) {
+    return connected_subchannel_->socket_uuid();
   } else {
-    GPR_ASSERT(!c->have_alarm);
-    c->have_alarm = true;
-    const grpc_millis time_til_next =
-        c->next_attempt_deadline - grpc_core::ExecCtx::Get()->Now();
-    if (time_til_next <= 0) {
-      gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", c);
-    } else {
-      gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRId64 " milliseconds", c,
-              time_til_next);
-    }
-    GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
-    grpc_timer_init(&c->alarm, c->next_attempt_deadline, &c->on_alarm);
+    return 0;
   }
 }
 
-void grpc_subchannel_notify_on_state_change(
-    grpc_subchannel* c, grpc_pollset_set* interested_parties,
-    grpc_connectivity_state* state, grpc_closure* notify,
-    bool inhibit_health_checks) {
+const char* Subchannel::GetTargetAddress() {
+  const grpc_arg* addr_arg =
+      grpc_channel_args_find(args_, GRPC_ARG_SUBCHANNEL_ADDRESS);
+  const char* addr_str = grpc_channel_arg_get_string(addr_arg);
+  GPR_ASSERT(addr_str != nullptr);  // Should have been set by LB policy.
+  return addr_str;
+}
+
+RefCountedPtr<ConnectedSubchannel> Subchannel::connected_subchannel() {
+  MutexLock lock(&mu_);
+  return connected_subchannel_;
+}
+
+channelz::SubchannelNode* Subchannel::channelz_node() {
+  return channelz_node_.get();
+}
+
+grpc_connectivity_state Subchannel::CheckConnectivity(
+    grpc_error** error, bool inhibit_health_checking) {
+  MutexLock lock(&mu_);
+  grpc_connectivity_state_tracker* tracker =
+      inhibit_health_checking ? &state_tracker_ : &state_and_health_tracker_;
+  grpc_connectivity_state state = grpc_connectivity_state_get(tracker, error);
+  return state;
+}
+
+void Subchannel::NotifyOnStateChange(grpc_pollset_set* interested_parties,
+                                     grpc_connectivity_state* state,
+                                     grpc_closure* notify,
+                                     bool inhibit_health_checking) {
   grpc_connectivity_state_tracker* tracker =
-      inhibit_health_checks ? &c->state_tracker : &c->state_and_health_tracker;
-  external_state_watcher* w;
+      inhibit_health_checking ? &state_tracker_ : &state_and_health_tracker_;
+  ExternalStateWatcher* w;
   if (state == nullptr) {
-    gpr_mu_lock(&c->mu);
-    for (w = c->root_external_state_watcher.next;
-         w != &c->root_external_state_watcher; w = w->next) {
+    MutexLock lock(&mu_);
+    for (w = external_state_watcher_list_; w != nullptr; w = w->next) {
       if (w->notify == notify) {
         grpc_connectivity_state_notify_on_state_change(tracker, nullptr,
-                                                       &w->closure);
+                                                       &w->on_state_changed);
       }
     }
-    gpr_mu_unlock(&c->mu);
   } else {
-    w = static_cast<external_state_watcher*>(gpr_malloc(sizeof(*w)));
-    w->subchannel = c;
-    w->pollset_set = interested_parties;
-    w->notify = notify;
-    GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w,
-                      grpc_schedule_on_exec_ctx);
+    w = New<ExternalStateWatcher>(this, interested_parties, notify);
     if (interested_parties != nullptr) {
-      grpc_pollset_set_add_pollset_set(c->pollset_set, interested_parties);
+      grpc_pollset_set_add_pollset_set(pollset_set_, interested_parties);
+    }
+    MutexLock lock(&mu_);
+    if (external_state_watcher_list_ != nullptr) {
+      w->next = external_state_watcher_list_;
+      w->next->prev = w;
     }
-    GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher");
-    gpr_mu_lock(&c->mu);
-    w->next = &c->root_external_state_watcher;
-    w->prev = w->next->prev;
-    w->next->prev = w->prev->next = w;
-    grpc_connectivity_state_notify_on_state_change(tracker, state, &w->closure);
-    maybe_start_connecting_locked(c);
-    gpr_mu_unlock(&c->mu);
+    external_state_watcher_list_ = w;
+    grpc_connectivity_state_notify_on_state_change(tracker, state,
+                                                   &w->on_state_changed);
+    MaybeStartConnectingLocked();
   }
 }
 
-static bool publish_transport_locked(grpc_subchannel* c) {
-  /* construct channel stack */
-  grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
-  grpc_channel_stack_builder_set_channel_arguments(
-      builder, c->connecting_result.channel_args);
-  grpc_channel_stack_builder_set_transport(builder,
-                                           c->connecting_result.transport);
-
-  if (!grpc_channel_init_create_stack(builder, GRPC_CLIENT_SUBCHANNEL)) {
-    grpc_channel_stack_builder_destroy(builder);
-    return false;
-  }
-  grpc_channel_stack* stk;
-  grpc_error* error = grpc_channel_stack_builder_finish(
-      builder, 0, 1, connection_destroy, nullptr,
-      reinterpret_cast<void**>(&stk));
-  if (error != GRPC_ERROR_NONE) {
-    grpc_transport_destroy(c->connecting_result.transport);
-    gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
-            grpc_error_string(error));
-    GRPC_ERROR_UNREF(error);
-    return false;
-  }
-  intptr_t socket_uuid = c->connecting_result.socket_uuid;
-  memset(&c->connecting_result, 0, sizeof(c->connecting_result));
-
-  if (c->disconnected) {
-    grpc_channel_stack_destroy(stk);
-    gpr_free(stk);
-    return false;
+void Subchannel::ResetBackoff() {
+  MutexLock lock(&mu_);
+  backoff_.Reset();
+  if (have_retry_alarm_) {
+    retry_immediately_ = true;
+    grpc_timer_cancel(&retry_alarm_);
+  } else {
+    backoff_begun_ = false;
+    MaybeStartConnectingLocked();
   }
-
-  /* publish */
-  c->connected_subchannel.reset(grpc_core::New<grpc_core::ConnectedSubchannel>(
-      stk, c->args, c->channelz_subchannel, socket_uuid));
-  gpr_log(GPR_INFO, "New connected subchannel at %p for subchannel %p",
-          c->connected_subchannel.get(), c);
-
-  // Instantiate state watcher.  Will clean itself up.
-  c->connected_subchannel_watcher =
-      grpc_core::MakeOrphanable<grpc_core::ConnectedSubchannelStateWatcher>(c);
-
-  return true;
 }
 
-static void on_subchannel_connected(void* arg, grpc_error* error) {
-  grpc_subchannel* c = static_cast<grpc_subchannel*>(arg);
-  grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
-
-  GRPC_SUBCHANNEL_WEAK_REF(c, "on_subchannel_connected");
-  gpr_mu_lock(&c->mu);
-  c->connecting = false;
-  if (c->connecting_result.transport != nullptr &&
-      publish_transport_locked(c)) {
-    /* do nothing, transport was published */
-  } else if (c->disconnected) {
-    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
-  } else {
-    set_subchannel_connectivity_state_locked(
-        c, GRPC_CHANNEL_TRANSIENT_FAILURE,
-        grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                               "Connect Failed", &error, 1),
-                           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
-        "connect_failed");
-    grpc_connectivity_state_set(
-        &c->state_and_health_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
-        grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                               "Connect Failed", &error, 1),
-                           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
-        "connect_failed");
-
-    const char* errmsg = grpc_error_string(error);
-    gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
-
-    maybe_start_connecting_locked(c);
-    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
-  }
-  gpr_mu_unlock(&c->mu);
-  GRPC_SUBCHANNEL_WEAK_UNREF(c, "connected");
-  grpc_channel_args_destroy(delete_channel_args);
+grpc_arg Subchannel::CreateSubchannelAddressArg(
+    const grpc_resolved_address* addr) {
+  return grpc_channel_arg_string_create(
+      (char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
+      addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
 }
 
-void grpc_subchannel_reset_backoff(grpc_subchannel* subchannel) {
-  gpr_mu_lock(&subchannel->mu);
-  subchannel->backoff->Reset();
-  if (subchannel->have_alarm) {
-    subchannel->retry_immediately = true;
-    grpc_timer_cancel(&subchannel->alarm);
-  } else {
-    subchannel->backoff_begun = false;
-    maybe_start_connecting_locked(subchannel);
-  }
-  gpr_mu_unlock(&subchannel->mu);
+const char* Subchannel::GetUriFromSubchannelAddressArg(
+    const grpc_channel_args* args) {
+  const grpc_arg* addr_arg =
+      grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS);
+  const char* addr_str = grpc_channel_arg_get_string(addr_arg);
+  GPR_ASSERT(addr_str != nullptr);  // Should have been set by LB policy.
+  return addr_str;
 }
 
-/*
- * grpc_subchannel_call implementation
- */
+namespace {
 
-static void subchannel_call_destroy(void* call, grpc_error* error) {
-  GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0);
-  grpc_subchannel_call* c = static_cast<grpc_subchannel_call*>(call);
-  grpc_core::ConnectedSubchannel* connection = c->connection;
-  grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
-                          c->schedule_closure_after_destroy);
-  connection->Unref(DEBUG_LOCATION, "subchannel_call");
-  c->~grpc_subchannel_call();
+void UriToSockaddr(const char* uri_str, grpc_resolved_address* addr) {
+  grpc_uri* uri = grpc_uri_parse(uri_str, 0 /* suppress_errors */);
+  GPR_ASSERT(uri != nullptr);
+  if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr));
+  grpc_uri_destroy(uri);
 }
 
-void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
-                                              grpc_closure* closure) {
-  GPR_ASSERT(call->schedule_closure_after_destroy == nullptr);
-  GPR_ASSERT(closure != nullptr);
-  call->schedule_closure_after_destroy = closure;
-}
+}  // namespace
 
-grpc_subchannel_call* grpc_subchannel_call_ref(
-    grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
-  return c;
+void Subchannel::GetAddressFromSubchannelAddressArg(
+    const grpc_channel_args* args, grpc_resolved_address* addr) {
+  const char* addr_uri_str = GetUriFromSubchannelAddressArg(args);
+  memset(addr, 0, sizeof(*addr));
+  if (*addr_uri_str != '\0') {
+    UriToSockaddr(addr_uri_str, addr);
+  }
 }
 
-void grpc_subchannel_call_unref(
-    grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
-}
+namespace {
 
-// Sets *status based on md_batch and error.
-static void get_call_status(grpc_subchannel_call* call,
-                            grpc_metadata_batch* md_batch, grpc_error* error,
-                            grpc_status_code* status) {
-  if (error != GRPC_ERROR_NONE) {
-    grpc_error_get_status(error, call->deadline, status, nullptr, nullptr,
-                          nullptr);
-  } else {
-    if (md_batch->idx.named.grpc_status != nullptr) {
-      *status = grpc_get_status_code_from_metadata(
-          md_batch->idx.named.grpc_status->md);
-    } else {
-      *status = GRPC_STATUS_UNKNOWN;
-    }
+// Returns a string indicating the subchannel's connectivity state change to
+// \a state.
+const char* SubchannelConnectivityStateChangeString(
+    grpc_connectivity_state state) {
+  switch (state) {
+    case GRPC_CHANNEL_IDLE:
+      return "Subchannel state change to IDLE";
+    case GRPC_CHANNEL_CONNECTING:
+      return "Subchannel state change to CONNECTING";
+    case GRPC_CHANNEL_READY:
+      return "Subchannel state change to READY";
+    case GRPC_CHANNEL_TRANSIENT_FAILURE:
+      return "Subchannel state change to TRANSIENT_FAILURE";
+    case GRPC_CHANNEL_SHUTDOWN:
+      return "Subchannel state change to SHUTDOWN";
   }
-  GRPC_ERROR_UNREF(error);
+  GPR_UNREACHABLE_CODE(return "UNKNOWN");
 }
 
-static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
-  grpc_subchannel_call* call = static_cast<grpc_subchannel_call*>(arg);
-  GPR_ASSERT(call->recv_trailing_metadata != nullptr);
-  grpc_status_code status = GRPC_STATUS_OK;
-  grpc_metadata_batch* md_batch = call->recv_trailing_metadata;
-  get_call_status(call, md_batch, GRPC_ERROR_REF(error), &status);
-  grpc_core::channelz::SubchannelNode* channelz_subchannel =
-      call->connection->channelz_subchannel();
-  GPR_ASSERT(channelz_subchannel != nullptr);
-  if (status == GRPC_STATUS_OK) {
-    channelz_subchannel->RecordCallSucceeded();
-  } else {
-    channelz_subchannel->RecordCallFailed();
+}  // namespace
+
+void Subchannel::SetConnectivityStateLocked(grpc_connectivity_state state,
+                                            grpc_error* error,
+                                            const char* reason) {
+  if (channelz_node_ != nullptr) {
+    channelz_node_->AddTraceEvent(
+        channelz::ChannelTrace::Severity::Info,
+        grpc_slice_from_static_string(
+            SubchannelConnectivityStateChangeString(state)));
   }
-  GRPC_CLOSURE_RUN(call->original_recv_trailing_metadata,
-                   GRPC_ERROR_REF(error));
+  grpc_connectivity_state_set(&state_tracker_, state, error, reason);
 }
 
-// If channelz is enabled, intercept recv_trailing so that we may check the
-// status and associate it to a subchannel.
-static void maybe_intercept_recv_trailing_metadata(
-    grpc_subchannel_call* call, grpc_transport_stream_op_batch* batch) {
-  // only intercept payloads with recv trailing.
-  if (!batch->recv_trailing_metadata) {
+void Subchannel::MaybeStartConnectingLocked() {
+  if (disconnected_) {
+    // Don't try to connect if we're already disconnected.
     return;
   }
-  // only add interceptor is channelz is enabled.
-  if (call->connection->channelz_subchannel() == nullptr) {
+  if (connecting_) {
+    // Already connecting: don't restart.
     return;
   }
-  GRPC_CLOSURE_INIT(&call->recv_trailing_metadata_ready,
-                    recv_trailing_metadata_ready, call,
-                    grpc_schedule_on_exec_ctx);
-  // save some state needed for the interception callback.
-  GPR_ASSERT(call->recv_trailing_metadata == nullptr);
-  call->recv_trailing_metadata =
-      batch->payload->recv_trailing_metadata.recv_trailing_metadata;
-  call->original_recv_trailing_metadata =
-      batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
-  batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
-      &call->recv_trailing_metadata_ready;
-}
-
-void grpc_subchannel_call_process_op(grpc_subchannel_call* call,
-                                     grpc_transport_stream_op_batch* batch) {
-  GPR_TIMER_SCOPE("grpc_subchannel_call_process_op", 0);
-  maybe_intercept_recv_trailing_metadata(call, batch);
-  grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
-  grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
-  GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
-  top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
+  if (connected_subchannel_ != nullptr) {
+    // Already connected: don't restart.
+    return;
+  }
+  if (!grpc_connectivity_state_has_watchers(&state_tracker_) &&
+      !grpc_connectivity_state_has_watchers(&state_and_health_tracker_)) {
+    // Nobody is interested in connecting: so don't just yet.
+    return;
+  }
+  connecting_ = true;
+  GRPC_SUBCHANNEL_WEAK_REF(this, "connecting");
+  if (!backoff_begun_) {
+    backoff_begun_ = true;
+    ContinueConnectingLocked();
+  } else {
+    GPR_ASSERT(!have_retry_alarm_);
+    have_retry_alarm_ = true;
+    const grpc_millis time_til_next =
+        next_attempt_deadline_ - ExecCtx::Get()->Now();
+    if (time_til_next <= 0) {
+      gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", this);
+    } else {
+      gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRId64 " milliseconds",
+              this, time_til_next);
+    }
+    GRPC_CLOSURE_INIT(&on_retry_alarm_, OnRetryAlarm, this,
+                      grpc_schedule_on_exec_ctx);
+    grpc_timer_init(&retry_alarm_, next_attempt_deadline_, &on_retry_alarm_);
+  }
 }
 
-grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel>
-grpc_subchannel_get_connected_subchannel(grpc_subchannel* c) {
-  gpr_mu_lock(&c->mu);
-  auto copy = c->connected_subchannel;
-  gpr_mu_unlock(&c->mu);
-  return copy;
+void Subchannel::OnRetryAlarm(void* arg, grpc_error* error) {
+  Subchannel* c = static_cast<Subchannel*>(arg);
+  gpr_mu_lock(&c->mu_);
+  c->have_retry_alarm_ = false;
+  if (c->disconnected_) {
+    error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected",
+                                                             &error, 1);
+  } else if (c->retry_immediately_) {
+    c->retry_immediately_ = false;
+    error = GRPC_ERROR_NONE;
+  } else {
+    GRPC_ERROR_REF(error);
+  }
+  if (error == GRPC_ERROR_NONE) {
+    gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
+    c->ContinueConnectingLocked();
+    gpr_mu_unlock(&c->mu_);
+  } else {
+    gpr_mu_unlock(&c->mu_);
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
+  }
+  GRPC_ERROR_UNREF(error);
 }
 
-void* grpc_connected_subchannel_call_get_parent_data(
-    grpc_subchannel_call* subchannel_call) {
-  grpc_channel_stack* chanstk = subchannel_call->connection->channel_stack();
-  return (char*)subchannel_call + sizeof(grpc_subchannel_call) +
-         chanstk->call_stack_size;
+void Subchannel::ContinueConnectingLocked() {
+  grpc_connect_in_args args;
+  args.interested_parties = pollset_set_;
+  const grpc_millis min_deadline =
+      min_connect_timeout_ms_ + ExecCtx::Get()->Now();
+  next_attempt_deadline_ = backoff_.NextAttemptTime();
+  args.deadline = std::max(next_attempt_deadline_, min_deadline);
+  args.channel_args = args_;
+  SetConnectivityStateLocked(GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
+                             "connecting");
+  grpc_connectivity_state_set(&state_and_health_tracker_,
+                              GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
+                              "connecting");
+  grpc_connector_connect(connector_, &args, &connecting_result_,
+                         &on_connecting_finished_);
 }
 
-grpc_call_stack* grpc_subchannel_call_get_call_stack(
-    grpc_subchannel_call* subchannel_call) {
-  return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
-}
+void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
+  auto* c = static_cast<Subchannel*>(arg);
+  grpc_channel_args* delete_channel_args = c->connecting_result_.channel_args;
+  GRPC_SUBCHANNEL_WEAK_REF(c, "on_connecting_finished");
+  gpr_mu_lock(&c->mu_);
+  c->connecting_ = false;
+  if (c->connecting_result_.transport != nullptr &&
+      c->PublishTransportLocked()) {
+    // Do nothing, transport was published.
+  } else if (c->disconnected_) {
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
+  } else {
+    c->SetConnectivityStateLocked(
+        GRPC_CHANNEL_TRANSIENT_FAILURE,
+        grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                               "Connect Failed", &error, 1),
+                           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+        "connect_failed");
+    grpc_connectivity_state_set(
+        &c->state_and_health_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
+        grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                               "Connect Failed", &error, 1),
+                           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
+        "connect_failed");
 
-static void grpc_uri_to_sockaddr(const char* uri_str,
-                                 grpc_resolved_address* addr) {
-  grpc_uri* uri = grpc_uri_parse(uri_str, 0 /* suppress_errors */);
-  GPR_ASSERT(uri != nullptr);
-  if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr));
-  grpc_uri_destroy(uri);
-}
+    const char* errmsg = grpc_error_string(error);
+    gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
 
-void grpc_get_subchannel_address_arg(const grpc_channel_args* args,
-                                     grpc_resolved_address* addr) {
-  const char* addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
-  memset(addr, 0, sizeof(*addr));
-  if (*addr_uri_str != '\0') {
-    grpc_uri_to_sockaddr(addr_uri_str, addr);
+    c->MaybeStartConnectingLocked();
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
   }
+  gpr_mu_unlock(&c->mu_);
+  GRPC_SUBCHANNEL_WEAK_UNREF(c, "on_connecting_finished");
+  grpc_channel_args_destroy(delete_channel_args);
 }
 
-const char* grpc_subchannel_get_target(grpc_subchannel* subchannel) {
-  const grpc_arg* addr_arg =
-      grpc_channel_args_find(subchannel->args, GRPC_ARG_SUBCHANNEL_ADDRESS);
-  const char* addr_str = grpc_channel_arg_get_string(addr_arg);
-  GPR_ASSERT(addr_str != nullptr);  // Should have been set by LB policy.
-  return addr_str;
-}
-
-const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args) {
-  const grpc_arg* addr_arg =
-      grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS);
-  const char* addr_str = grpc_channel_arg_get_string(addr_arg);
-  GPR_ASSERT(addr_str != nullptr);  // Should have been set by LB policy.
-  return addr_str;
-}
-
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) {
-  return grpc_channel_arg_string_create(
-      (char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
-      addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup(""));
-}
-
-namespace grpc_core {
-
-ConnectedSubchannel::ConnectedSubchannel(
-    grpc_channel_stack* channel_stack, const grpc_channel_args* args,
-    grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
-        channelz_subchannel,
-    intptr_t socket_uuid)
-    : RefCounted<ConnectedSubchannel>(&grpc_trace_stream_refcount),
-      channel_stack_(channel_stack),
-      args_(grpc_channel_args_copy(args)),
-      channelz_subchannel_(std::move(channelz_subchannel)),
-      socket_uuid_(socket_uuid) {}
+namespace {
 
-ConnectedSubchannel::~ConnectedSubchannel() {
-  grpc_channel_args_destroy(args_);
-  GRPC_CHANNEL_STACK_UNREF(channel_stack_, "connected_subchannel_dtor");
+void ConnectionDestroy(void* arg, grpc_error* error) {
+  grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
+  grpc_channel_stack_destroy(stk);
+  gpr_free(stk);
 }
 
-void ConnectedSubchannel::NotifyOnStateChange(
-    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
-    grpc_closure* closure) {
-  grpc_transport_op* op = grpc_make_transport_op(nullptr);
-  grpc_channel_element* elem;
-  op->connectivity_state = state;
-  op->on_connectivity_state_change = closure;
-  op->bind_pollset_set = interested_parties;
-  elem = grpc_channel_stack_element(channel_stack_, 0);
-  elem->filter->start_transport_op(elem, op);
-}
+}  // namespace
 
-void ConnectedSubchannel::Ping(grpc_closure* on_initiate,
-                               grpc_closure* on_ack) {
-  grpc_transport_op* op = grpc_make_transport_op(nullptr);
-  grpc_channel_element* elem;
-  op->send_ping.on_initiate = on_initiate;
-  op->send_ping.on_ack = on_ack;
-  elem = grpc_channel_stack_element(channel_stack_, 0);
-  elem->filter->start_transport_op(elem, op);
+bool Subchannel::PublishTransportLocked() {
+  // Construct channel stack.
+  grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
+  grpc_channel_stack_builder_set_channel_arguments(
+      builder, connecting_result_.channel_args);
+  grpc_channel_stack_builder_set_transport(builder,
+                                           connecting_result_.transport);
+  if (!grpc_channel_init_create_stack(builder, GRPC_CLIENT_SUBCHANNEL)) {
+    grpc_channel_stack_builder_destroy(builder);
+    return false;
+  }
+  grpc_channel_stack* stk;
+  grpc_error* error = grpc_channel_stack_builder_finish(
+      builder, 0, 1, ConnectionDestroy, nullptr,
+      reinterpret_cast<void**>(&stk));
+  if (error != GRPC_ERROR_NONE) {
+    grpc_transport_destroy(connecting_result_.transport);
+    gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
+            grpc_error_string(error));
+    GRPC_ERROR_UNREF(error);
+    return false;
+  }
+  intptr_t socket_uuid = connecting_result_.socket_uuid;
+  memset(&connecting_result_, 0, sizeof(connecting_result_));
+  if (disconnected_) {
+    grpc_channel_stack_destroy(stk);
+    gpr_free(stk);
+    return false;
+  }
+  // Publish.
+  connected_subchannel_.reset(
+      New<ConnectedSubchannel>(stk, args_, channelz_node_, socket_uuid));
+  gpr_log(GPR_INFO, "New connected subchannel at %p for subchannel %p",
+          connected_subchannel_.get(), this);
+  // Instantiate state watcher.  Will clean itself up.
+  connected_subchannel_watcher_ =
+      MakeOrphanable<ConnectedSubchannelStateWatcher>(this);
+  return true;
 }
 
-grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
-                                            grpc_subchannel_call** call) {
-  const size_t allocation_size =
-      GetInitialCallSizeEstimate(args.parent_data_size);
-  *call = new (gpr_arena_alloc(args.arena, allocation_size))
-      grpc_subchannel_call(this, args);
-  grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
-  RefCountedPtr<ConnectedSubchannel> connection =
-      Ref(DEBUG_LOCATION, "subchannel_call");
-  connection.release();  // Ref is passed to the grpc_subchannel_call object.
-  const grpc_call_element_args call_args = {
-      callstk,           /* call_stack */
-      nullptr,           /* server_transport_data */
-      args.context,      /* context */
-      args.path,         /* path */
-      args.start_time,   /* start_time */
-      args.deadline,     /* deadline */
-      args.arena,        /* arena */
-      args.call_combiner /* call_combiner */
-  };
-  grpc_error* error = grpc_call_stack_init(
-      channel_stack_, 1, subchannel_call_destroy, *call, &call_args);
-  if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
-    const char* error_string = grpc_error_string(error);
-    gpr_log(GPR_ERROR, "error: %s", error_string);
-    return error;
-  }
-  grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent);
-  if (channelz_subchannel_ != nullptr) {
-    channelz_subchannel_->RecordCallStarted();
+void Subchannel::Disconnect() {
+  // The subchannel_pool is only used once here in this subchannel, so the
+  // access can be outside of the lock.
+  if (subchannel_pool_ != nullptr) {
+    subchannel_pool_->UnregisterSubchannel(key_);
+    subchannel_pool_.reset();
   }
-  return GRPC_ERROR_NONE;
+  MutexLock lock(&mu_);
+  GPR_ASSERT(!disconnected_);
+  disconnected_ = true;
+  grpc_connector_shutdown(connector_, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                          "Subchannel disconnected"));
+  connected_subchannel_.reset();
+  connected_subchannel_watcher_.reset();
 }
 
-size_t ConnectedSubchannel::GetInitialCallSizeEstimate(
-    size_t parent_data_size) const {
-  size_t allocation_size =
-      GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_subchannel_call));
-  if (parent_data_size > 0) {
-    allocation_size +=
-        GPR_ROUND_UP_TO_ALIGNMENT_SIZE(channel_stack_->call_stack_size) +
-        parent_data_size;
-  } else {
-    allocation_size += channel_stack_->call_stack_size;
+gpr_atm Subchannel::RefMutate(
+    gpr_atm delta, int barrier GRPC_SUBCHANNEL_REF_MUTATE_EXTRA_ARGS) {
+  gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&ref_pair_, delta)
+                            : gpr_atm_no_barrier_fetch_add(&ref_pair_, delta);
+#ifndef NDEBUG
+  if (grpc_trace_stream_refcount.enabled()) {
+    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
+            "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", this,
+            purpose, old_val, old_val + delta, reason);
   }
-  return allocation_size;
+#endif
+  return old_val;
 }
 
 }  // namespace grpc_core

+ 220 - 111
src/core/ext/filters/client_channel/subchannel.h

@@ -24,53 +24,49 @@
 #include "src/core/ext/filters/client_channel/client_channel_channelz.h"
 #include "src/core/ext/filters/client_channel/connector.h"
 #include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
+#include "src/core/lib/backoff/backoff.h"
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/gpr/arena.h"
 #include "src/core/lib/gprpp/ref_counted.h"
 #include "src/core/lib/gprpp/ref_counted_ptr.h"
 #include "src/core/lib/iomgr/polling_entity.h"
+#include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/transport/connectivity_state.h"
 #include "src/core/lib/transport/metadata.h"
 
 // Channel arg containing a grpc_resolved_address to connect to.
 #define GRPC_ARG_SUBCHANNEL_ADDRESS "grpc.subchannel_address"
 
-/** A (sub-)channel that knows how to connect to exactly one target
-    address. Provides a target for load balancing. */
-typedef struct grpc_subchannel grpc_subchannel;
-typedef struct grpc_subchannel_call grpc_subchannel_call;
-
+// For debugging refcounting.
 #ifndef NDEBUG
-#define GRPC_SUBCHANNEL_REF(p, r) \
-  grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_REF(p, r) (p)->Ref(__FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
-  grpc_subchannel_ref_from_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_UNREF(p, r) \
-  grpc_subchannel_unref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
-  grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \
-  grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
-  grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
-  grpc_subchannel_call_unref((p), __FILE__, __LINE__, (r))
+  (p)->RefFromWeakRef(__FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_UNREF(p, r) (p)->Unref(__FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) (p)->WeakRef(__FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) (p)->WeakUnref(__FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS \
-  , const char *file, int line, const char *reason
+  const char *file, int line, const char *reason
+#define GRPC_SUBCHANNEL_REF_REASON reason
+#define GRPC_SUBCHANNEL_REF_MUTATE_EXTRA_ARGS \
+  , GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char* purpose
+#define GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE(x) , file, line, reason, x
 #else
-#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
-#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
-  grpc_subchannel_ref_from_weak_ref((p))
-#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
-#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
-#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p))
-#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
-#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
+#define GRPC_SUBCHANNEL_REF(p, r) (p)->Ref()
+#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) (p)->RefFromWeakRef()
+#define GRPC_SUBCHANNEL_UNREF(p, r) (p)->Unref()
+#define GRPC_SUBCHANNEL_WEAK_REF(p, r) (p)->WeakRef()
+#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) (p)->WeakUnref()
 #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
+#define GRPC_SUBCHANNEL_REF_REASON ""
+#define GRPC_SUBCHANNEL_REF_MUTATE_EXTRA_ARGS
+#define GRPC_SUBCHANNEL_REF_MUTATE_PURPOSE(x)
 #endif
 
 namespace grpc_core {
 
+class SubchannelCall;
+
 class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
  public:
   struct CallArgs {
@@ -86,8 +82,7 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
 
   ConnectedSubchannel(
       grpc_channel_stack* channel_stack, const grpc_channel_args* args,
-      grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
-          channelz_subchannel,
+      RefCountedPtr<channelz::SubchannelNode> channelz_subchannel,
       intptr_t socket_uuid);
   ~ConnectedSubchannel();
 
@@ -95,7 +90,8 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
                            grpc_connectivity_state* state,
                            grpc_closure* closure);
   void Ping(grpc_closure* on_initiate, grpc_closure* on_ack);
-  grpc_error* CreateCall(const CallArgs& args, grpc_subchannel_call** call);
+  RefCountedPtr<SubchannelCall> CreateCall(const CallArgs& args,
+                                           grpc_error** error);
 
   grpc_channel_stack* channel_stack() const { return channel_stack_; }
   const grpc_channel_args* args() const { return args_; }
@@ -111,91 +107,204 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
   grpc_channel_args* args_;
   // ref counted pointer to the channelz node in this connected subchannel's
   // owning subchannel.
-  grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode>
-      channelz_subchannel_;
+  RefCountedPtr<channelz::SubchannelNode> channelz_subchannel_;
   // uuid of this subchannel's socket. 0 if this subchannel is not connected.
   const intptr_t socket_uuid_;
 };
 
-}  // namespace grpc_core
+// Implements the interface of RefCounted<>.
+class SubchannelCall {
+ public:
+  SubchannelCall(RefCountedPtr<ConnectedSubchannel> connected_subchannel,
+                 const ConnectedSubchannel::CallArgs& args)
+      : connected_subchannel_(std::move(connected_subchannel)),
+        deadline_(args.deadline) {}
+
+  // Continues processing a transport stream op batch.
+  void StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch);
+
+  // Returns a pointer to the parent data associated with the subchannel call.
+  // The data will be of the size specified in \a parent_data_size field of
+  // the args passed to \a ConnectedSubchannel::CreateCall().
+  void* GetParentData();
+
+  // Returns the call stack of the subchannel call.
+  grpc_call_stack* GetCallStack();
+
+  grpc_closure* after_call_stack_destroy() const {
+    return after_call_stack_destroy_;
+  }
+
+  // Sets the 'then_schedule_closure' argument for call stack destruction.
+  // Must be called once per call.
+  void SetAfterCallStackDestroy(grpc_closure* closure);
+
+  // Interface of RefCounted<>.
+  RefCountedPtr<SubchannelCall> Ref() GRPC_MUST_USE_RESULT;
+  RefCountedPtr<SubchannelCall> Ref(const DebugLocation& location,
+                                    const char* reason) GRPC_MUST_USE_RESULT;
+  // When refcount drops to 0, destroys itself and the associated call stack,
+  // but does NOT free the memory because it's in the call arena.
+  void Unref();
+  void Unref(const DebugLocation& location, const char* reason);
+
+ private:
+  // Allow RefCountedPtr<> to access IncrementRefCount().
+  template <typename T>
+  friend class RefCountedPtr;
+
+  // If channelz is enabled, intercepts recv_trailing so that we may check the
+  // status and associate it to a subchannel.
+  void MaybeInterceptRecvTrailingMetadata(
+      grpc_transport_stream_op_batch* batch);
+
+  static void RecvTrailingMetadataReady(void* arg, grpc_error* error);
+
+  // Interface of RefCounted<>.
+  void IncrementRefCount();
+  void IncrementRefCount(const DebugLocation& location, const char* reason);
+
+  RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
+  grpc_closure* after_call_stack_destroy_ = nullptr;
+  // State needed to support channelz interception of recv trailing metadata.
+  grpc_closure recv_trailing_metadata_ready_;
+  grpc_closure* original_recv_trailing_metadata_ = nullptr;
+  grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
+  grpc_millis deadline_;
+};
+
+// A subchannel that knows how to connect to exactly one target address. It
+// provides a target for load balancing.
+class Subchannel {
+ public:
+  // The ctor and dtor are not intended to use directly.
+  Subchannel(SubchannelKey* key, grpc_connector* connector,
+             const grpc_channel_args* args);
+  ~Subchannel();
+
+  // Creates a subchannel given \a connector and \a args.
+  static Subchannel* Create(grpc_connector* connector,
+                            const grpc_channel_args* args);
+
+  // Strong and weak refcounting.
+  Subchannel* Ref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+  void Unref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+  Subchannel* WeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+  void WeakUnref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+  Subchannel* RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+
+  intptr_t GetChildSocketUuid();
 
-grpc_subchannel* grpc_subchannel_ref(
-    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
-    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_unref(
-    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel* grpc_subchannel_weak_ref(
-    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_weak_unref(
-    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-grpc_subchannel_call* grpc_subchannel_call_ref(
-    grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_unref(
-    grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-
-grpc_core::channelz::SubchannelNode* grpc_subchannel_get_channelz_node(
-    grpc_subchannel* subchannel);
-
-intptr_t grpc_subchannel_get_child_socket_uuid(grpc_subchannel* subchannel);
-
-/** Returns a pointer to the parent data associated with \a subchannel_call.
-    The data will be of the size specified in \a parent_data_size
-    field of the args passed to \a grpc_connected_subchannel_create_call(). */
-void* grpc_connected_subchannel_call_get_parent_data(
-    grpc_subchannel_call* subchannel_call);
-
-/** poll the current connectivity state of a channel */
-grpc_connectivity_state grpc_subchannel_check_connectivity(
-    grpc_subchannel* channel, grpc_error** error, bool inhibit_health_checking);
-
-/** Calls notify when the connectivity state of a channel becomes different
-    from *state.  Updates *state with the new state of the channel. */
-void grpc_subchannel_notify_on_state_change(
-    grpc_subchannel* channel, grpc_pollset_set* interested_parties,
-    grpc_connectivity_state* state, grpc_closure* notify,
-    bool inhibit_health_checks);
-
-/** retrieve the grpc_core::ConnectedSubchannel - or nullptr if not connected
- * (which may happen before it initially connects or during transient failures)
- * */
-grpc_core::RefCountedPtr<grpc_core::ConnectedSubchannel>
-grpc_subchannel_get_connected_subchannel(grpc_subchannel* c);
-
-// Resets the connection backoff of the subchannel.
-// TODO(roth): Move connection backoff out of subchannels and up into LB
-// policy code (probably by adding a SubchannelGroup between
-// SubchannelList and SubchannelData), at which point this method can
-// go away.
-void grpc_subchannel_reset_backoff(grpc_subchannel* subchannel);
-
-/** continue processing a transport op */
-void grpc_subchannel_call_process_op(grpc_subchannel_call* subchannel_call,
-                                     grpc_transport_stream_op_batch* op);
-
-/** Must be called once per call. Sets the 'then_schedule_closure' argument for
-    call stack destruction. */
-void grpc_subchannel_call_set_cleanup_closure(
-    grpc_subchannel_call* subchannel_call, grpc_closure* closure);
-
-grpc_call_stack* grpc_subchannel_call_get_call_stack(
-    grpc_subchannel_call* subchannel_call);
-
-/** create a subchannel given a connector */
-grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
-                                        const grpc_channel_args* args);
-
-/// Sets \a addr from \a args.
-void grpc_get_subchannel_address_arg(const grpc_channel_args* args,
-                                     grpc_resolved_address* addr);
-
-const char* grpc_subchannel_get_target(grpc_subchannel* subchannel);
-
-/// Returns the URI string for the address to connect to.
-const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args);
-
-/// Returns a new channel arg encoding the subchannel address as a string.
-/// Caller is responsible for freeing the string.
-grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr);
+  // Gets the string representing the subchannel address.
+  // Caller doesn't take ownership.
+  const char* GetTargetAddress();
+
+  // Gets the connected subchannel - or nullptr if not connected (which may
+  // happen before it initially connects or during transient failures).
+  RefCountedPtr<ConnectedSubchannel> connected_subchannel();
+
+  channelz::SubchannelNode* channelz_node();
+
+  // Polls the current connectivity state of the subchannel.
+  grpc_connectivity_state CheckConnectivity(grpc_error** error,
+                                            bool inhibit_health_checking);
+
+  // When the connectivity state of the subchannel changes from \a *state,
+  // invokes \a notify and updates \a *state with the new state.
+  void NotifyOnStateChange(grpc_pollset_set* interested_parties,
+                           grpc_connectivity_state* state, grpc_closure* notify,
+                           bool inhibit_health_checking);
+
+  // Resets the connection backoff of the subchannel.
+  // TODO(roth): Move connection backoff out of subchannels and up into LB
+  // policy code (probably by adding a SubchannelGroup between
+  // SubchannelList and SubchannelData), at which point this method can
+  // go away.
+  void ResetBackoff();
+
+  // Returns a new channel arg encoding the subchannel address as a URI
+  // string. Caller is responsible for freeing the string.
+  static grpc_arg CreateSubchannelAddressArg(const grpc_resolved_address* addr);
+
+  // Returns the URI string from the subchannel address arg in \a args.
+  static const char* GetUriFromSubchannelAddressArg(
+      const grpc_channel_args* args);
+
+  // Sets \a addr from the subchannel address arg in \a args.
+  static void GetAddressFromSubchannelAddressArg(const grpc_channel_args* args,
+                                                 grpc_resolved_address* addr);
+
+ private:
+  struct ExternalStateWatcher;
+  class ConnectedSubchannelStateWatcher;
+
+  // Sets the subchannel's connectivity state to \a state.
+  void SetConnectivityStateLocked(grpc_connectivity_state state,
+                                  grpc_error* error, const char* reason);
+
+  // Methods for connection.
+  void MaybeStartConnectingLocked();
+  static void OnRetryAlarm(void* arg, grpc_error* error);
+  void ContinueConnectingLocked();
+  static void OnConnectingFinished(void* arg, grpc_error* error);
+  bool PublishTransportLocked();
+  void Disconnect();
+
+  gpr_atm RefMutate(gpr_atm delta,
+                    int barrier GRPC_SUBCHANNEL_REF_MUTATE_EXTRA_ARGS);
+
+  // The subchannel pool this subchannel is in.
+  RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
+  // TODO(juanlishen): Consider using args_ as key_ directly.
+  // Subchannel key that identifies this subchannel in the subchannel pool.
+  SubchannelKey* key_;
+  // Channel args.
+  grpc_channel_args* args_;
+  // pollset_set tracking who's interested in a connection being setup.
+  grpc_pollset_set* pollset_set_;
+  // Protects the other members.
+  gpr_mu mu_;
+  // Refcount
+  //    - lower INTERNAL_REF_BITS bits are for internal references:
+  //      these do not keep the subchannel open.
+  //    - upper remaining bits are for public references: these do
+  //      keep the subchannel open
+  gpr_atm ref_pair_;
+
+  // Connection states.
+  grpc_connector* connector_ = nullptr;
+  // Set during connection.
+  grpc_connect_out_args connecting_result_;
+  grpc_closure on_connecting_finished_;
+  // Active connection, or null.
+  RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
+  OrphanablePtr<ConnectedSubchannelStateWatcher> connected_subchannel_watcher_;
+  bool connecting_ = false;
+  bool disconnected_ = false;
+
+  // Connectivity state tracking.
+  grpc_connectivity_state_tracker state_tracker_;
+  grpc_connectivity_state_tracker state_and_health_tracker_;
+  UniquePtr<char> health_check_service_name_;
+  ExternalStateWatcher* external_state_watcher_list_ = nullptr;
+
+  // Backoff state.
+  BackOff backoff_;
+  grpc_millis next_attempt_deadline_;
+  grpc_millis min_connect_timeout_ms_;
+  bool backoff_begun_ = false;
+
+  // Retry alarm.
+  grpc_timer retry_alarm_;
+  grpc_closure on_retry_alarm_;
+  bool have_retry_alarm_ = false;
+  // reset_backoff() was called while alarm was pending.
+  bool retry_immediately_ = false;
+
+  // Channelz tracking.
+  RefCountedPtr<channelz::SubchannelNode> channelz_node_;
+};
+
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H */

+ 5 - 5
src/core/ext/filters/client_channel/subchannel_pool_interface.h

@@ -26,10 +26,10 @@
 #include "src/core/lib/gprpp/abstract.h"
 #include "src/core/lib/gprpp/ref_counted.h"
 
-struct grpc_subchannel;
-
 namespace grpc_core {
 
+class Subchannel;
+
 extern TraceFlag grpc_subchannel_pool_trace;
 
 // A key that can uniquely identify a subchannel.
@@ -69,15 +69,15 @@ class SubchannelPoolInterface : public RefCounted<SubchannelPoolInterface> {
   // Registers a subchannel against a key. Returns the subchannel registered
   // with \a key, which may be different from \a constructed because we reuse
   // (instead of update) any existing subchannel already registered with \a key.
-  virtual grpc_subchannel* RegisterSubchannel(
-      SubchannelKey* key, grpc_subchannel* constructed) GRPC_ABSTRACT;
+  virtual Subchannel* RegisterSubchannel(SubchannelKey* key,
+                                         Subchannel* constructed) GRPC_ABSTRACT;
 
   // Removes the registered subchannel found by \a key.
   virtual void UnregisterSubchannel(SubchannelKey* key) GRPC_ABSTRACT;
 
   // Finds the subchannel registered for the given subchannel key. Returns NULL
   // if no such channel exists. Thread-safe.
-  virtual grpc_subchannel* FindSubchannel(SubchannelKey* key) GRPC_ABSTRACT;
+  virtual Subchannel* FindSubchannel(SubchannelKey* key) GRPC_ABSTRACT;
 
   // Creates a channel arg from \a subchannel pool.
   static grpc_arg CreateChannelArg(SubchannelPoolInterface* subchannel_pool);

+ 2 - 1
src/core/ext/transport/chttp2/client/chttp2_connector.cc

@@ -202,7 +202,8 @@ static void chttp2_connector_connect(grpc_connector* con,
                                      grpc_closure* notify) {
   chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
   grpc_resolved_address addr;
-  grpc_get_subchannel_address_arg(args->channel_args, &addr);
+  grpc_core::Subchannel::GetAddressFromSubchannelAddressArg(args->channel_args,
+                                                            &addr);
   gpr_mu_lock(&c->mu);
   GPR_ASSERT(c->notify == nullptr);
   c->notify = notify;

+ 2 - 2
src/core/ext/transport/chttp2/client/insecure/channel_create.cc

@@ -39,11 +39,11 @@ static void client_channel_factory_ref(
 static void client_channel_factory_unref(
     grpc_client_channel_factory* cc_factory) {}
 
-static grpc_subchannel* client_channel_factory_create_subchannel(
+static grpc_core::Subchannel* client_channel_factory_create_subchannel(
     grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
   grpc_channel_args* new_args = grpc_default_authority_add_if_not_present(args);
   grpc_connector* connector = grpc_chttp2_connector_create();
-  grpc_subchannel* s = grpc_subchannel_create(connector, new_args);
+  grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
   grpc_connector_unref(connector);
   grpc_channel_args_destroy(new_args);
   return s;

+ 4 - 3
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc

@@ -76,7 +76,8 @@ static grpc_channel_args* get_secure_naming_channel_args(
   grpc_core::UniquePtr<char> authority;
   if (target_authority_table != nullptr) {
     // Find the authority for the target.
-    const char* target_uri_str = grpc_get_subchannel_address_uri_arg(args);
+    const char* target_uri_str =
+        grpc_core::Subchannel::GetUriFromSubchannelAddressArg(args);
     grpc_uri* target_uri =
         grpc_uri_parse(target_uri_str, false /* suppress errors */);
     GPR_ASSERT(target_uri != nullptr);
@@ -138,7 +139,7 @@ static grpc_channel_args* get_secure_naming_channel_args(
   return new_args;
 }
 
-static grpc_subchannel* client_channel_factory_create_subchannel(
+static grpc_core::Subchannel* client_channel_factory_create_subchannel(
     grpc_client_channel_factory* cc_factory, const grpc_channel_args* args) {
   grpc_channel_args* new_args = get_secure_naming_channel_args(args);
   if (new_args == nullptr) {
@@ -147,7 +148,7 @@ static grpc_subchannel* client_channel_factory_create_subchannel(
     return nullptr;
   }
   grpc_connector* connector = grpc_chttp2_connector_create();
-  grpc_subchannel* s = grpc_subchannel_create(connector, new_args);
+  grpc_core::Subchannel* s = grpc_core::Subchannel::Create(connector, new_args);
   grpc_connector_unref(connector);
   grpc_channel_args_destroy(new_args);
   return s;

+ 5 - 0
src/core/ext/transport/chttp2/transport/chttp2_transport.cc

@@ -43,6 +43,7 @@
 #include "src/core/lib/gprpp/memory.h"
 #include "src/core/lib/http/parser.h"
 #include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr.h"
 #include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/profiling/timers.h"
 #include "src/core/lib/slice/slice_internal.h"
@@ -963,6 +964,10 @@ void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t,
 static grpc_closure_scheduler* write_scheduler(grpc_chttp2_transport* t,
                                                bool early_results_scheduled,
                                                bool partial_write) {
+  // If we're already in a background poller, don't offload this to an executor
+  if (grpc_iomgr_is_any_background_poller_thread()) {
+    return grpc_schedule_on_exec_ctx;
+  }
   /* if it's not the first write in a batch, always offload to the executor:
      we'll probably end up queuing against the kernel anyway, so we'll likely
      get better latency overall if we switch writing work elsewhere and continue

+ 1 - 1
src/core/ext/transport/chttp2/transport/hpack_encoder.cc

@@ -59,7 +59,7 @@
 static grpc_slice_refcount terminal_slice_refcount = {nullptr, nullptr};
 static const grpc_slice terminal_slice = {
     &terminal_slice_refcount, /* refcount */
-    {{nullptr, 0}}            /* data.refcounted */
+    {{0, nullptr}}            /* data.refcounted */
 };
 
 typedef struct {

+ 8 - 5
src/core/ext/transport/chttp2/transport/writing.cc

@@ -363,7 +363,6 @@ class DataSendContext {
     grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes,
                             is_last_frame_, &s_->stats.outgoing, &t_->outbuf);
     s_->flow_control->SentData(send_bytes);
-    s_->byte_counter += send_bytes;
     if (s_->compressed_data_buffer.length == 0) {
       s_->sending_bytes += s_->uncompressed_data_size;
     }
@@ -498,9 +497,6 @@ class StreamWriteContext {
         data_send_context.CompressMoreBytes();
       }
     }
-    if (s_->traced && grpc_endpoint_can_track_err(t_->ep)) {
-      grpc_core::ContextList::Append(&t_->cl, s_);
-    }
     write_context_->ResetPingClock();
     if (data_send_context.is_last_frame()) {
       SentLastFrame();
@@ -610,11 +606,18 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
      (according to available window sizes) and add to the output buffer */
   while (grpc_chttp2_stream* s = ctx.NextStream()) {
     StreamWriteContext stream_ctx(&ctx, s);
+    size_t orig_len = t->outbuf.length;
     stream_ctx.FlushInitialMetadata();
     stream_ctx.FlushWindowUpdates();
     stream_ctx.FlushData();
     stream_ctx.FlushTrailingMetadata();
-
+    if (t->outbuf.length > orig_len) {
+      /* Add this stream to the list of the contexts to be traced at TCP */
+      s->byte_counter += t->outbuf.length - orig_len;
+      if (s->traced && grpc_endpoint_can_track_err(t->ep)) {
+        grpc_core::ContextList::Append(&t->cl, s);
+      }
+    }
     if (stream_ctx.stream_became_writable()) {
       if (!grpc_chttp2_list_add_writing_stream(t, s)) {
         /* already in writing list: drop ref */

+ 2 - 2
src/core/lib/gprpp/optional.h

@@ -31,11 +31,11 @@ class Optional {
     set_ = true;
   }
 
-  bool has_value() { return set_; }
+  bool has_value() const { return set_; }
 
   void reset() { set_ = false; }
 
-  T value() { return value_; }
+  T value() const { return value_; }
 
  private:
   T value_;

+ 3 - 1
src/core/lib/iomgr/buffer_list.h

@@ -148,7 +148,9 @@ class TracedBuffer {
  public:
   /* Dummy shutdown function */
   static void Shutdown(grpc_core::TracedBuffer** head, void* remaining,
-                       grpc_error* shutdown_err) {}
+                       grpc_error* shutdown_err) {
+    GRPC_ERROR_UNREF(shutdown_err);
+  }
 };
 #endif /* GRPC_LINUX_ERRQUEUE */
 

+ 138 - 162
src/core/lib/iomgr/ev_epollex_linux.cc

@@ -45,6 +45,7 @@
 #include "src/core/lib/gpr/spinlock.h"
 #include "src/core/lib/gpr/tls.h"
 #include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/gprpp/inlined_vector.h"
 #include "src/core/lib/gprpp/manual_constructor.h"
 #include "src/core/lib/gprpp/mutex_lock.h"
 #include "src/core/lib/iomgr/block_annotate.h"
@@ -78,18 +79,6 @@ typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
 
 typedef struct pollable pollable;
 
-typedef struct cached_fd {
-  // Set to the grpc_fd's salt value. See 'salt' variable' in grpc_fd for more
-  // details
-  intptr_t salt;
-
-  // The underlying fd
-  int fd;
-
-  // A recency time counter that helps to determine the LRU fd in the cache
-  uint64_t last_used;
-} cached_fd;
-
 /// A pollable is something that can be polled: it has an epoll set to poll on,
 /// and a wakeup fd for kicks
 /// There are three broad types:
@@ -120,33 +109,6 @@ struct pollable {
   int event_cursor;
   int event_count;
   struct epoll_event events[MAX_EPOLL_EVENTS];
-
-  // We may be calling pollable_add_fd() on the same (pollable, fd) multiple
-  // times. To prevent pollable_add_fd() from making multiple sys calls to
-  // epoll_ctl() to add the fd, we maintain a cache of what fds are already
-  // present in the underlying epoll-set.
-  //
-  // Since this is not a correctness issue, we do not need to maintain all the
-  // fds in the cache. Hence we just use an LRU cache of size 'MAX_FDS_IN_CACHE'
-  //
-  // NOTE: An ideal implementation of this should do the following:
-  //  1) Add fds to the cache in pollable_add_fd() function (i.e whenever the fd
-  //     is added to the pollable's epoll set)
-  //  2) Remove the fd from the cache whenever the fd is removed from the
-  //     underlying epoll set (i.e whenever fd_orphan() is called).
-  //
-  // Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a
-  // lot of complexity since an fd can be present in multiple pollables. So our
-  // implementation ONLY DOES (1) and NOT (2).
-  //
-  // The cache_fd.salt variable helps here to maintain correctness (it serves as
-  // an epoch that differentiates one grpc_fd from the other even though both of
-  // them may have the same fd number)
-  //
-  // The following implements LRU-eviction cache of fds in this pollable
-  cached_fd fd_cache[MAX_FDS_IN_CACHE];
-  int fd_cache_size;
-  uint64_t fd_cache_counter;  // Recency timer tick counter
 };
 
 static const char* pollable_type_string(pollable_type t) {
@@ -189,37 +151,86 @@ static void pollable_unref(pollable* p, int line, const char* reason);
  * Fd Declarations
  */
 
-// Monotonically increasing Epoch counter that is assinged to each grpc_fd. See
-// the description of 'salt' variable in 'grpc_fd' for more details
-// TODO: (sreek/kpayson) gpr_atm is intptr_t which may not be wide-enough on
-// 32-bit systems. Change this to int_64 - atleast on 32-bit systems
-static gpr_atm g_fd_salt;
-
 struct grpc_fd {
-  int fd;
+  grpc_fd(int fd, const char* name, bool track_err)
+      : fd(fd), track_err(track_err) {
+    gpr_mu_init(&orphan_mu);
+    gpr_mu_init(&pollable_mu);
+    read_closure.InitEvent();
+    write_closure.InitEvent();
+    error_closure.InitEvent();
+
+    char* fd_name;
+    gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
+    grpc_iomgr_register_object(&iomgr_object, fd_name);
+#ifndef NDEBUG
+    if (grpc_trace_fd_refcount.enabled()) {
+      gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name);
+    }
+#endif
+    gpr_free(fd_name);
+  }
+
+  // This is really the dtor, but the poller threads waking up from
+  // epoll_wait() may access the (read|write|error)_closure after destruction.
+  // Since the object will be added to the free pool, this behavior is
+  // not going to cause issues, except spurious events if the FD is reused
+  // while the race happens.
+  void destroy() {
+    grpc_iomgr_unregister_object(&iomgr_object);
+
+    POLLABLE_UNREF(pollable_obj, "fd_pollable");
+    pollsets.clear();
+    gpr_mu_destroy(&pollable_mu);
+    gpr_mu_destroy(&orphan_mu);
+
+    read_closure.DestroyEvent();
+    write_closure.DestroyEvent();
+    error_closure.DestroyEvent();
+
+    invalidate();
+  }
 
-  // Since fd numbers can be reused (after old fds are closed), this serves as
-  // an epoch that uniquely identifies this fd (i.e the pair (salt, fd) is
-  // unique (until the salt counter (i.e g_fd_salt) overflows)
-  intptr_t salt;
+#ifndef NDEBUG
+  /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
+   * hard-to-debug cases where fd fields are accessed even after calling
+   * fd_destroy(). The following invalidates fd fields to make catching such
+   * errors easier */
+  void invalidate() {
+    fd = -1;
+    gpr_atm_no_barrier_store(&refst, -1);
+    memset(&orphan_mu, -1, sizeof(orphan_mu));
+    memset(&pollable_mu, -1, sizeof(pollable_mu));
+    pollable_obj = nullptr;
+    on_done_closure = nullptr;
+    memset(&iomgr_object, -1, sizeof(iomgr_object));
+    track_err = false;
+  }
+#else
+  void invalidate() {}
+#endif
+
+  int fd;
 
   // refst format:
   //     bit 0    : 1=Active / 0=Orphaned
   //     bits 1-n : refcount
   //  Ref/Unref by two to avoid altering the orphaned bit
-  gpr_atm refst;
+  gpr_atm refst = 1;
 
   gpr_mu orphan_mu;
 
+  // Protects pollable_obj and pollsets.
   gpr_mu pollable_mu;
-  pollable* pollable_obj;
+  grpc_core::InlinedVector<grpc_pollset*, 1> pollsets;  // Used in PO_MULTI.
+  pollable* pollable_obj = nullptr;                     // Used in PO_FD.
 
-  grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
-  grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
-  grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
+  grpc_core::LockfreeEvent read_closure;
+  grpc_core::LockfreeEvent write_closure;
+  grpc_core::LockfreeEvent error_closure;
 
-  struct grpc_fd* freelist_next;
-  grpc_closure* on_done_closure;
+  struct grpc_fd* freelist_next = nullptr;
+  grpc_closure* on_done_closure = nullptr;
 
   grpc_iomgr_object iomgr_object;
 
@@ -258,6 +269,7 @@ struct grpc_pollset_worker {
 struct grpc_pollset {
   gpr_mu mu;
   gpr_atm worker_count;
+  gpr_atm active_pollable_type;
   pollable* active_pollable;
   bool kicked_without_poller;
   grpc_closure* shutdown_closure;
@@ -337,39 +349,10 @@ static void ref_by(grpc_fd* fd, int n) {
   GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
 }
 
-#ifndef NDEBUG
-#define INVALIDATE_FD(fd) invalidate_fd(fd)
-/* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
- * hard to cases where fd fields are accessed even after calling fd_destroy().
- * The following invalidates fd fields to make catching such errors easier */
-static void invalidate_fd(grpc_fd* fd) {
-  fd->fd = -1;
-  fd->salt = -1;
-  gpr_atm_no_barrier_store(&fd->refst, -1);
-  memset(&fd->orphan_mu, -1, sizeof(fd->orphan_mu));
-  memset(&fd->pollable_mu, -1, sizeof(fd->pollable_mu));
-  fd->pollable_obj = nullptr;
-  fd->on_done_closure = nullptr;
-  memset(&fd->iomgr_object, -1, sizeof(fd->iomgr_object));
-  fd->track_err = false;
-}
-#else
-#define INVALIDATE_FD(fd)
-#endif
-
 /* Uninitialize and add to the freelist */
 static void fd_destroy(void* arg, grpc_error* error) {
   grpc_fd* fd = static_cast<grpc_fd*>(arg);
-  grpc_iomgr_unregister_object(&fd->iomgr_object);
-  POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
-  gpr_mu_destroy(&fd->pollable_mu);
-  gpr_mu_destroy(&fd->orphan_mu);
-
-  fd->read_closure->DestroyEvent();
-  fd->write_closure->DestroyEvent();
-  fd->error_closure->DestroyEvent();
-
-  INVALIDATE_FD(fd);
+  fd->destroy();
 
   /* Add the fd to the freelist */
   gpr_mu_lock(&fd_freelist_mu);
@@ -429,35 +412,9 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
 
   if (new_fd == nullptr) {
     new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
-    new_fd->read_closure.Init();
-    new_fd->write_closure.Init();
-    new_fd->error_closure.Init();
-  }
-
-  new_fd->fd = fd;
-  new_fd->salt = gpr_atm_no_barrier_fetch_add(&g_fd_salt, 1);
-  gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
-  gpr_mu_init(&new_fd->orphan_mu);
-  gpr_mu_init(&new_fd->pollable_mu);
-  new_fd->pollable_obj = nullptr;
-  new_fd->read_closure->InitEvent();
-  new_fd->write_closure->InitEvent();
-  new_fd->error_closure->InitEvent();
-  new_fd->freelist_next = nullptr;
-  new_fd->on_done_closure = nullptr;
-
-  char* fd_name;
-  gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
-  grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
-#ifndef NDEBUG
-  if (grpc_trace_fd_refcount.enabled()) {
-    gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
   }
-#endif
-  gpr_free(fd_name);
 
-  new_fd->track_err = track_err;
-  return new_fd;
+  return new (new_fd) grpc_fd(fd, name, track_err);
 }
 
 static int fd_wrapped_fd(grpc_fd* fd) {
@@ -465,6 +422,7 @@ static int fd_wrapped_fd(grpc_fd* fd) {
   return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
 }
 
+static int pollset_epoll_fd_locked(grpc_pollset* pollset);
 static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       const char* reason) {
   bool is_fd_closed = false;
@@ -475,7 +433,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
   // true so that the pollable will no longer access its owner_fd field.
   gpr_mu_lock(&fd->pollable_mu);
   pollable* pollable_obj = fd->pollable_obj;
-  gpr_mu_unlock(&fd->pollable_mu);
 
   if (pollable_obj) {
     gpr_mu_lock(&pollable_obj->owner_orphan_mu);
@@ -487,6 +444,20 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
   /* If release_fd is not NULL, we should be relinquishing control of the file
      descriptor fd->fd (but we still own the grpc_fd structure). */
   if (release_fd != nullptr) {
+    // Remove the FD from all epolls sets, before releasing it.
+    // Otherwise, we will receive epoll events after we release the FD.
+    epoll_event ev_fd;
+    memset(&ev_fd, 0, sizeof(ev_fd));
+    if (release_fd != nullptr) {
+      if (pollable_obj != nullptr) {  // For PO_FD.
+        epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
+      }
+      for (size_t i = 0; i < fd->pollsets.size(); ++i) {  // For PO_MULTI.
+        grpc_pollset* pollset = fd->pollsets[i];
+        const int epfd = pollset_epoll_fd_locked(pollset);
+        epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
+      }
+    }
     *release_fd = fd->fd;
   } else {
     close(fd->fd);
@@ -508,40 +479,56 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
     gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
   }
 
+  gpr_mu_unlock(&fd->pollable_mu);
   gpr_mu_unlock(&fd->orphan_mu);
 
   UNREF_BY(fd, 2, reason); /* Drop the reference */
 }
 
 static bool fd_is_shutdown(grpc_fd* fd) {
-  return fd->read_closure->IsShutdown();
+  return fd->read_closure.IsShutdown();
 }
 
 /* Might be called multiple times */
 static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
-  if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
+  if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
     if (shutdown(fd->fd, SHUT_RDWR)) {
       if (errno != ENOTCONN) {
         gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
                 grpc_fd_wrapped_fd(fd), errno);
       }
     }
-    fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
-    fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
+    fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
+    fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
   }
   GRPC_ERROR_UNREF(why);
 }
 
 static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
-  fd->read_closure->NotifyOn(closure);
+  fd->read_closure.NotifyOn(closure);
 }
 
 static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
-  fd->write_closure->NotifyOn(closure);
+  fd->write_closure.NotifyOn(closure);
 }
 
 static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
-  fd->error_closure->NotifyOn(closure);
+  fd->error_closure.NotifyOn(closure);
+}
+
+static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
+  grpc_core::MutexLock lock(&fd->pollable_mu);
+  for (size_t i = 0; i < fd->pollsets.size(); ++i) {
+    if (fd->pollsets[i] == pollset) {
+      return true;
+    }
+  }
+  return false;
+}
+
+static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
+  grpc_core::MutexLock lock(&fd->pollable_mu);
+  fd->pollsets.push_back(pollset);
 }
 
 /*******************************************************************************
@@ -594,8 +581,6 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
   (*p)->root_worker = nullptr;
   (*p)->event_cursor = 0;
   (*p)->event_count = 0;
-  (*p)->fd_cache_size = 0;
-  (*p)->fd_cache_counter = 0;
   return GRPC_ERROR_NONE;
 }
 
@@ -637,39 +622,6 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
   grpc_error* error = GRPC_ERROR_NONE;
   static const char* err_desc = "pollable_add_fd";
   const int epfd = p->epfd;
-  gpr_mu_lock(&p->mu);
-  p->fd_cache_counter++;
-
-  // Handle the case of overflow for our cache counter by
-  // reseting the recency-counter on all cache objects
-  if (p->fd_cache_counter == 0) {
-    for (int i = 0; i < p->fd_cache_size; i++) {
-      p->fd_cache[i].last_used = 0;
-    }
-  }
-
-  int lru_idx = 0;
-  for (int i = 0; i < p->fd_cache_size; i++) {
-    if (p->fd_cache[i].fd == fd->fd && p->fd_cache[i].salt == fd->salt) {
-      GRPC_STATS_INC_POLLSET_FD_CACHE_HITS();
-      p->fd_cache[i].last_used = p->fd_cache_counter;
-      gpr_mu_unlock(&p->mu);
-      return GRPC_ERROR_NONE;
-    } else if (p->fd_cache[i].last_used < p->fd_cache[lru_idx].last_used) {
-      lru_idx = i;
-    }
-  }
-
-  // Add to cache
-  if (p->fd_cache_size < MAX_FDS_IN_CACHE) {
-    lru_idx = p->fd_cache_size;
-    p->fd_cache_size++;
-  }
-  p->fd_cache[lru_idx].fd = fd->fd;
-  p->fd_cache[lru_idx].salt = fd->salt;
-  p->fd_cache[lru_idx].last_used = p->fd_cache_counter;
-  gpr_mu_unlock(&p->mu);
-
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
   }
@@ -849,6 +801,7 @@ static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
 static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
   gpr_mu_init(&pollset->mu);
   gpr_atm_no_barrier_store(&pollset->worker_count, 0);
+  gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
   pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
   pollset->kicked_without_poller = false;
   pollset->shutdown_closure = nullptr;
@@ -869,11 +822,11 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
     return static_cast<int>(delta);
 }
 
-static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); }
+static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
 
-static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
+static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
 
-static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
+static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
 
 /* Get the pollable_obj attached to this fd. If none is attached, create a new
  * pollable object (of type PO_FD), attach it to the fd and return it
@@ -1283,6 +1236,8 @@ static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
     POLLABLE_UNREF(pollset->active_pollable, "pollset");
     pollset->active_pollable = po_at_start;
   } else {
+    gpr_atm_rel_store(&pollset->active_pollable_type,
+                      pollset->active_pollable->type);
     POLLABLE_UNREF(po_at_start, "pollset_add_fd");
   }
   return error;
@@ -1329,17 +1284,38 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
     pollset->active_pollable = po_at_start;
     *pollable_obj = nullptr;
   } else {
+    gpr_atm_rel_store(&pollset->active_pollable_type,
+                      pollset->active_pollable->type);
     *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
     POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
   }
   return error;
 }
 
+// Caller must hold the lock for `pollset->mu`.
+static int pollset_epoll_fd_locked(grpc_pollset* pollset) {
+  return pollset->active_pollable->epfd;
+}
+
 static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
   GPR_TIMER_SCOPE("pollset_add_fd", 0);
-  gpr_mu_lock(&pollset->mu);
+
+  // We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMOPTY)
+  // and, thus, it is safe to simply store and check whether the FD has already
+  // been added to the active pollable previously.
+  if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
+      fd_has_pollset(fd, pollset)) {
+    return;
+  }
+
+  grpc_core::MutexLock lock(&pollset->mu);
   grpc_error* error = pollset_add_fd_locked(pollset, fd);
-  gpr_mu_unlock(&pollset->mu);
+
+  // If we are in PO_MULTI mode, we should update the pollsets of the FD.
+  if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
+    fd_add_pollset(fd, pollset);
+  }
+
   GRPC_LOG_IF_ERROR("pollset_add_fd", error);
 }
 

+ 1 - 0
src/core/lib/iomgr/exec_ctx.cc

@@ -115,6 +115,7 @@ grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
 
 namespace grpc_core {
 GPR_TLS_CLASS_DEF(ExecCtx::exec_ctx_);
+GPR_TLS_CLASS_DEF(ApplicationCallbackExecCtx::callback_exec_ctx_);
 
 // WARNING: for testing purposes only!
 void ExecCtx::TestOnlyGlobalInit(gpr_timespec new_val) {

+ 59 - 3
src/core/lib/iomgr/exec_ctx.h

@@ -21,6 +21,7 @@
 
 #include <grpc/support/port_platform.h>
 
+#include <grpc/impl/codegen/grpc_types.h>
 #include <grpc/support/atm.h>
 #include <grpc/support/cpu.h>
 #include <grpc/support/log.h>
@@ -34,9 +35,8 @@ typedef int64_t grpc_millis;
 #define GRPC_MILLIS_INF_FUTURE INT64_MAX
 #define GRPC_MILLIS_INF_PAST INT64_MIN
 
-/** A workqueue represents a list of work to be executed asynchronously.
-    Forward declared here to avoid a circular dependency with workqueue.h. */
-typedef struct grpc_workqueue grpc_workqueue;
+/** A combiner represents a list of work to be executed later.
+    Forward declared here to avoid a circular dependency with combiner.h. */
 typedef struct grpc_combiner grpc_combiner;
 
 /* This exec_ctx is ready to return: either pre-populated, or cached as soon as
@@ -226,6 +226,62 @@ class ExecCtx {
   GPR_TLS_CLASS_DECL(exec_ctx_);
   ExecCtx* last_exec_ctx_ = Get();
 };
+
+class ApplicationCallbackExecCtx {
+ public:
+  ApplicationCallbackExecCtx() {
+    if (reinterpret_cast<ApplicationCallbackExecCtx*>(
+            gpr_tls_get(&callback_exec_ctx_)) == nullptr) {
+      grpc_core::Fork::IncExecCtxCount();
+      gpr_tls_set(&callback_exec_ctx_, reinterpret_cast<intptr_t>(this));
+    }
+  }
+  ~ApplicationCallbackExecCtx() {
+    if (reinterpret_cast<ApplicationCallbackExecCtx*>(
+            gpr_tls_get(&callback_exec_ctx_)) == this) {
+      while (head_ != nullptr) {
+        auto* f = head_;
+        head_ = f->internal_next;
+        if (f->internal_next == nullptr) {
+          tail_ = nullptr;
+        }
+        (*f->functor_run)(f, f->internal_success);
+      }
+      gpr_tls_set(&callback_exec_ctx_, reinterpret_cast<intptr_t>(nullptr));
+      grpc_core::Fork::DecExecCtxCount();
+    } else {
+      GPR_DEBUG_ASSERT(head_ == nullptr);
+      GPR_DEBUG_ASSERT(tail_ == nullptr);
+    }
+  }
+  static void Enqueue(grpc_experimental_completion_queue_functor* functor,
+                      int is_success) {
+    functor->internal_success = is_success;
+    functor->internal_next = nullptr;
+
+    auto* ctx = reinterpret_cast<ApplicationCallbackExecCtx*>(
+        gpr_tls_get(&callback_exec_ctx_));
+
+    if (ctx->head_ == nullptr) {
+      ctx->head_ = functor;
+    }
+    if (ctx->tail_ != nullptr) {
+      ctx->tail_->internal_next = functor;
+    }
+    ctx->tail_ = functor;
+  }
+
+  /** Global initialization for ApplicationCallbackExecCtx. Called by init. */
+  static void GlobalInit(void) { gpr_tls_init(&callback_exec_ctx_); }
+
+  /** Global shutdown for ApplicationCallbackExecCtx. Called by init. */
+  static void GlobalShutdown(void) { gpr_tls_destroy(&callback_exec_ctx_); }
+
+ private:
+  grpc_experimental_completion_queue_functor* head_{nullptr};
+  grpc_experimental_completion_queue_functor* tail_{nullptr};
+  GPR_TLS_CLASS_DECL(callback_exec_ctx_);
+};
 }  // namespace grpc_core
 
 #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */

+ 7 - 0
src/core/lib/iomgr/executor.cc

@@ -111,6 +111,13 @@ size_t Executor::RunClosures(const char* executor_name,
                              grpc_closure_list list) {
   size_t n = 0;
 
+  // In the executor, the ExecCtx for the thread is declared in the executor
+  // thread itself, but this is the point where we could start seeing
+  // application-level callbacks. No need to create a new ExecCtx, though,
+  // since there already is one and it is flushed (but not destructed) in this
+  // function itself.
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+
   grpc_closure* c = list.head;
   while (c != nullptr) {
     grpc_closure* next = c->next_data.next;

+ 25 - 25
src/core/lib/iomgr/tcp_posix.cc

@@ -195,7 +195,7 @@ static void run_poller(void* bp, grpc_error* error_ignored) {
 static void drop_uncovered(grpc_tcp* tcp) {
   backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
   gpr_atm old_count =
-      gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
+      gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
             static_cast<int>(old_count), static_cast<int>(old_count) - 1);
@@ -343,6 +343,13 @@ static void tcp_free(grpc_tcp* tcp) {
   grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
   grpc_resource_user_unref(tcp->resource_user);
   gpr_free(tcp->peer_string);
+  /* The lock is not really necessary here, since all refs have been released */
+  gpr_mu_lock(&tcp->tb_mu);
+  grpc_core::TracedBuffer::Shutdown(
+      &tcp->tb_head, tcp->outgoing_buffer_arg,
+      GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
+  gpr_mu_unlock(&tcp->tb_mu);
+  tcp->outgoing_buffer_arg = nullptr;
   gpr_mu_destroy(&tcp->tb_mu);
   gpr_free(tcp);
 }
@@ -389,12 +396,6 @@ static void tcp_destroy(grpc_endpoint* ep) {
   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
   if (grpc_event_engine_can_track_errors()) {
-    gpr_mu_lock(&tcp->tb_mu);
-    grpc_core::TracedBuffer::Shutdown(
-        &tcp->tb_head, tcp->outgoing_buffer_arg,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
-    gpr_mu_unlock(&tcp->tb_mu);
-    tcp->outgoing_buffer_arg = nullptr;
     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
     grpc_fd_set_error(tcp->em_fd);
   }
@@ -408,13 +409,15 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
     gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
     size_t i;
     const char* str = grpc_error_string(error);
-    gpr_log(GPR_INFO, "read: error=%s", str);
-
-    for (i = 0; i < tcp->incoming_buffer->count; i++) {
-      char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
-                                   GPR_DUMP_HEX | GPR_DUMP_ASCII);
-      gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
-      gpr_free(dump);
+    gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp, tcp->peer_string, str);
+
+    if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
+      for (i = 0; i < tcp->incoming_buffer->count; i++) {
+        char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
+                                     GPR_DUMP_HEX | GPR_DUMP_ASCII);
+        gpr_log(GPR_DEBUG, "DATA: %s", dump);
+        gpr_free(dump);
+      }
     }
   }
 
@@ -714,7 +717,7 @@ static void process_errors(grpc_tcp* tcp) {
 
     // Allocate aligned space for cmsgs received along with a timestamps
     union {
-      char rbuf[CMSG_SPACE(sizeof(scm_timestamping)) +
+      char rbuf[CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
                 CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
                 CMSG_SPACE(16 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)))];
       struct cmsghdr align;
@@ -976,10 +979,13 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
     size_t i;
 
     for (i = 0; i < buf->count; i++) {
-      char* data =
-          grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
-      gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
-      gpr_free(data);
+      gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string);
+      if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
+        char* data =
+            grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
+        gpr_log(GPR_DEBUG, "DATA: %s", data);
+        gpr_free(data);
+      }
     }
   }
 
@@ -1179,12 +1185,6 @@ void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
   if (grpc_event_engine_can_track_errors()) {
     /* Stop errors notification. */
-    gpr_mu_lock(&tcp->tb_mu);
-    grpc_core::TracedBuffer::Shutdown(
-        &tcp->tb_head, tcp->outgoing_buffer_arg,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
-    gpr_mu_unlock(&tcp->tb_mu);
-    tcp->outgoing_buffer_arg = nullptr;
     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
     grpc_fd_set_error(tcp->em_fd);
   }

+ 7 - 0
src/core/lib/iomgr/timer_manager.cc

@@ -105,6 +105,13 @@ void grpc_timer_manager_tick() {
 }
 
 static void run_some_timers() {
+  // In the case of timers, the ExecCtx for the thread is declared
+  // in the timer thread itself, but this is the point where we
+  // could start seeing application-level callbacks. No need to
+  // create a new ExecCtx, though, since there already is one and it is
+  // flushed (but not destructed) in this function itself
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+
   // if there's something to execute...
   gpr_mu_lock(&g_mu);
   // remove a waiter from the pool, and start another thread if necessary

+ 1 - 1
src/core/lib/security/credentials/alts/alts_credentials.cc

@@ -31,7 +31,7 @@
 #include "src/core/lib/security/security_connector/alts/alts_security_connector.h"
 
 #define GRPC_CREDENTIALS_TYPE_ALTS "Alts"
-#define GRPC_ALTS_HANDSHAKER_SERVICE_URL "metadata.google.internal:8080"
+#define GRPC_ALTS_HANDSHAKER_SERVICE_URL "metadata.google.internal.:8080"
 
 grpc_alts_credentials::grpc_alts_credentials(
     const grpc_alts_credentials_options* options,

+ 2 - 2
src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc

@@ -25,8 +25,8 @@
 #include <grpc/support/log.h>
 
 bool grpc_alts_is_running_on_gcp() {
-  gpr_log(GPR_ERROR,
-          "Platforms other than Linux and Windows are not supported");
+  gpr_log(GPR_INFO,
+          "ALTS: Platforms other than Linux and Windows are not supported");
   return false;
 }
 

+ 1 - 1
src/core/lib/security/credentials/credentials.h

@@ -60,7 +60,7 @@ typedef enum {
 
 #define GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS 60
 
-#define GRPC_COMPUTE_ENGINE_METADATA_HOST "metadata.google.internal"
+#define GRPC_COMPUTE_ENGINE_METADATA_HOST "metadata.google.internal."
 #define GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH \
   "/computeMetadata/v1/instance/service-accounts/default/token"
 

+ 1 - 1
src/core/lib/security/credentials/google_default/google_default_credentials.cc

@@ -46,7 +46,7 @@
 
 /* -- Constants. -- */
 
-#define GRPC_COMPUTE_ENGINE_DETECTION_HOST "metadata.google.internal"
+#define GRPC_COMPUTE_ENGINE_DETECTION_HOST "metadata.google.internal."
 
 /* -- Default credentials. -- */
 

+ 192 - 0
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc

@@ -0,0 +1,192 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+/** -- gRPC TLS key materials config API implementation. -- **/
+void grpc_tls_key_materials_config::set_key_materials(
+    grpc_core::UniquePtr<char> pem_root_certs,
+    PemKeyCertPairList pem_key_cert_pair_list) {
+  pem_key_cert_pair_list_ = std::move(pem_key_cert_pair_list);
+  pem_root_certs_ = std::move(pem_root_certs);
+}
+
+/** -- gRPC TLS credential reload config API implementation. -- **/
+grpc_tls_credential_reload_config::grpc_tls_credential_reload_config(
+    const void* config_user_data,
+    int (*schedule)(void* config_user_data,
+                    grpc_tls_credential_reload_arg* arg),
+    void (*cancel)(void* config_user_data, grpc_tls_credential_reload_arg* arg),
+    void (*destruct)(void* config_user_data))
+    : config_user_data_(const_cast<void*>(config_user_data)),
+      schedule_(schedule),
+      cancel_(cancel),
+      destruct_(destruct) {}
+
+grpc_tls_credential_reload_config::~grpc_tls_credential_reload_config() {
+  if (destruct_ != nullptr) {
+    destruct_((void*)config_user_data_);
+  }
+}
+
+/** -- gRPC TLS server authorization check API implementation. -- **/
+grpc_tls_server_authorization_check_config::
+    grpc_tls_server_authorization_check_config(
+        const void* config_user_data,
+        int (*schedule)(void* config_user_data,
+                        grpc_tls_server_authorization_check_arg* arg),
+        void (*cancel)(void* config_user_data,
+                       grpc_tls_server_authorization_check_arg* arg),
+        void (*destruct)(void* config_user_data))
+    : config_user_data_(const_cast<void*>(config_user_data)),
+      schedule_(schedule),
+      cancel_(cancel),
+      destruct_(destruct) {}
+
+grpc_tls_server_authorization_check_config::
+    ~grpc_tls_server_authorization_check_config() {
+  if (destruct_ != nullptr) {
+    destruct_((void*)config_user_data_);
+  }
+}
+
+/** -- Wrapper APIs declared in grpc_security.h -- **/
+grpc_tls_credentials_options* grpc_tls_credentials_options_create() {
+  return grpc_core::New<grpc_tls_credentials_options>();
+}
+
+int grpc_tls_credentials_options_set_cert_request_type(
+    grpc_tls_credentials_options* options,
+    grpc_ssl_client_certificate_request_type type) {
+  if (options == nullptr) {
+    gpr_log(GPR_ERROR,
+            "Invalid nullptr arguments to "
+            "grpc_tls_credentials_options_set_cert_request_type()");
+    return 0;
+  }
+  options->set_cert_request_type(type);
+  return 1;
+}
+
+int grpc_tls_credentials_options_set_key_materials_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_key_materials_config* config) {
+  if (options == nullptr || config == nullptr) {
+    gpr_log(GPR_ERROR,
+            "Invalid nullptr arguments to "
+            "grpc_tls_credentials_options_set_key_materials_config()");
+    return 0;
+  }
+  options->set_key_materials_config(config->Ref());
+  return 1;
+}
+
+int grpc_tls_credentials_options_set_credential_reload_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_credential_reload_config* config) {
+  if (options == nullptr || config == nullptr) {
+    gpr_log(GPR_ERROR,
+            "Invalid nullptr arguments to "
+            "grpc_tls_credentials_options_set_credential_reload_config()");
+    return 0;
+  }
+  options->set_credential_reload_config(config->Ref());
+  return 1;
+}
+
+int grpc_tls_credentials_options_set_server_authorization_check_config(
+    grpc_tls_credentials_options* options,
+    grpc_tls_server_authorization_check_config* config) {
+  if (options == nullptr || config == nullptr) {
+    gpr_log(
+        GPR_ERROR,
+        "Invalid nullptr arguments to "
+        "grpc_tls_credentials_options_set_server_authorization_check_config()");
+    return 0;
+  }
+  options->set_server_authorization_check_config(config->Ref());
+  return 1;
+}
+
+grpc_tls_key_materials_config* grpc_tls_key_materials_config_create() {
+  return grpc_core::New<grpc_tls_key_materials_config>();
+}
+
+int grpc_tls_key_materials_config_set_key_materials(
+    grpc_tls_key_materials_config* config, const char* root_certs,
+    const grpc_ssl_pem_key_cert_pair** key_cert_pairs, size_t num) {
+  if (config == nullptr || key_cert_pairs == nullptr || num == 0) {
+    gpr_log(GPR_ERROR,
+            "Invalid arguments to "
+            "grpc_tls_key_materials_config_set_key_materials()");
+    return 0;
+  }
+  grpc_core::UniquePtr<char> pem_root(const_cast<char*>(root_certs));
+  grpc_tls_key_materials_config::PemKeyCertPairList cert_pair_list;
+  for (size_t i = 0; i < num; i++) {
+    grpc_core::PemKeyCertPair key_cert_pair(
+        const_cast<grpc_ssl_pem_key_cert_pair*>(key_cert_pairs[i]));
+    cert_pair_list.emplace_back(std::move(key_cert_pair));
+  }
+  config->set_key_materials(std::move(pem_root), std::move(cert_pair_list));
+  gpr_free(key_cert_pairs);
+  return 1;
+}
+
+grpc_tls_credential_reload_config* grpc_tls_credential_reload_config_create(
+    const void* config_user_data,
+    int (*schedule)(void* config_user_data,
+                    grpc_tls_credential_reload_arg* arg),
+    void (*cancel)(void* config_user_data, grpc_tls_credential_reload_arg* arg),
+    void (*destruct)(void* config_user_data)) {
+  if (schedule == nullptr) {
+    gpr_log(
+        GPR_ERROR,
+        "Schedule API is nullptr in creating TLS credential reload config.");
+    return nullptr;
+  }
+  return grpc_core::New<grpc_tls_credential_reload_config>(
+      config_user_data, schedule, cancel, destruct);
+}
+
+grpc_tls_server_authorization_check_config*
+grpc_tls_server_authorization_check_config_create(
+    const void* config_user_data,
+    int (*schedule)(void* config_user_data,
+                    grpc_tls_server_authorization_check_arg* arg),
+    void (*cancel)(void* config_user_data,
+                   grpc_tls_server_authorization_check_arg* arg),
+    void (*destruct)(void* config_user_data)) {
+  if (schedule == nullptr) {
+    gpr_log(GPR_ERROR,
+            "Schedule API is nullptr in creating TLS server authorization "
+            "check config.");
+    return nullptr;
+  }
+  return grpc_core::New<grpc_tls_server_authorization_check_config>(
+      config_user_data, schedule, cancel, destruct);
+}

+ 213 - 0
src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h

@@ -0,0 +1,213 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H
+#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/grpc_security.h>
+
+#include "src/core/lib/gprpp/inlined_vector.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/security/security_connector/ssl_utils.h"
+
+/** TLS key materials config. **/
+struct grpc_tls_key_materials_config
+    : public grpc_core::RefCounted<grpc_tls_key_materials_config> {
+ public:
+  typedef grpc_core::InlinedVector<grpc_core::PemKeyCertPair, 1>
+      PemKeyCertPairList;
+
+  /** Getters for member fields. **/
+  const char* pem_root_certs() const { return pem_root_certs_.get(); }
+  const PemKeyCertPairList& pem_key_cert_pair_list() const {
+    return pem_key_cert_pair_list_;
+  }
+
+  /** Setters for member fields. **/
+  void set_key_materials(grpc_core::UniquePtr<char> pem_root_certs,
+                         PemKeyCertPairList pem_key_cert_pair_list);
+
+ private:
+  PemKeyCertPairList pem_key_cert_pair_list_;
+  grpc_core::UniquePtr<char> pem_root_certs_;
+};
+
+/** TLS credential reload config. **/
+struct grpc_tls_credential_reload_config
+    : public grpc_core::RefCounted<grpc_tls_credential_reload_config> {
+ public:
+  grpc_tls_credential_reload_config(
+      const void* config_user_data,
+      int (*schedule)(void* config_user_data,
+                      grpc_tls_credential_reload_arg* arg),
+      void (*cancel)(void* config_user_data,
+                     grpc_tls_credential_reload_arg* arg),
+      void (*destruct)(void* config_user_data));
+  ~grpc_tls_credential_reload_config();
+
+  int Schedule(grpc_tls_credential_reload_arg* arg) const {
+    return schedule_(config_user_data_, arg);
+  }
+  void Cancel(grpc_tls_credential_reload_arg* arg) const {
+    if (cancel_ == nullptr) {
+      gpr_log(GPR_ERROR, "cancel API is nullptr.");
+      return;
+    }
+    cancel_(config_user_data_, arg);
+  }
+
+ private:
+  /** config-specific, read-only user data that works for all channels created
+     with a credential using the config. */
+  void* config_user_data_;
+  /** callback function for invoking credential reload API. The implementation
+     of this method has to be non-blocking, but can be performed synchronously
+     or asynchronously.
+     If processing occurs synchronously, it populates \a arg->key_materials, \a
+     arg->status, and \a arg->error_details and returns zero.
+     If processing occurs asynchronously, it returns a non-zero value.
+     Application then invokes \a arg->cb when processing is completed. Note that
+     \a arg->cb cannot be invoked before \a schedule returns.
+  */
+  int (*schedule_)(void* config_user_data, grpc_tls_credential_reload_arg* arg);
+  /** callback function for cancelling a credential reload request scheduled via
+     an asynchronous \a schedule. \a arg is used to pinpoint an exact reloading
+     request to be cancelled, and the operation may not have any effect if the
+     request has already been processed. */
+  void (*cancel_)(void* config_user_data, grpc_tls_credential_reload_arg* arg);
+  /** callback function for cleaning up any data associated with credential
+     reload config. */
+  void (*destruct_)(void* config_user_data);
+};
+
+/** TLS server authorization check config. **/
+struct grpc_tls_server_authorization_check_config
+    : public grpc_core::RefCounted<grpc_tls_server_authorization_check_config> {
+ public:
+  grpc_tls_server_authorization_check_config(
+      const void* config_user_data,
+      int (*schedule)(void* config_user_data,
+                      grpc_tls_server_authorization_check_arg* arg),
+      void (*cancel)(void* config_user_data,
+                     grpc_tls_server_authorization_check_arg* arg),
+      void (*destruct)(void* config_user_data));
+  ~grpc_tls_server_authorization_check_config();
+
+  int Schedule(grpc_tls_server_authorization_check_arg* arg) const {
+    return schedule_(config_user_data_, arg);
+  }
+  void Cancel(grpc_tls_server_authorization_check_arg* arg) const {
+    if (cancel_ == nullptr) {
+      gpr_log(GPR_ERROR, "cancel API is nullptr.");
+      return;
+    }
+    cancel_(config_user_data_, arg);
+  }
+
+ private:
+  /** config-specific, read-only user data that works for all channels created
+     with a Credential using the config. */
+  void* config_user_data_;
+
+  /** callback function for invoking server authorization check. The
+     implementation of this method has to be non-blocking, but can be performed
+     synchronously or asynchronously.
+     If processing occurs synchronously, it populates \a arg->result, \a
+     arg->status, and \a arg->error_details, and returns zero.
+     If processing occurs asynchronously, it returns a non-zero value.
+     Application then invokes \a arg->cb when processing is completed. Note that
+     \a arg->cb cannot be invoked before \a schedule() returns.
+  */
+  int (*schedule_)(void* config_user_data,
+                   grpc_tls_server_authorization_check_arg* arg);
+
+  /** callback function for canceling a server authorization check request. */
+  void (*cancel_)(void* config_user_data,
+                  grpc_tls_server_authorization_check_arg* arg);
+
+  /** callback function for cleaning up any data associated with server
+     authorization check config. */
+  void (*destruct_)(void* config_user_data);
+};
+
+/* TLS credentials options. */
+struct grpc_tls_credentials_options
+    : public grpc_core::RefCounted<grpc_tls_credentials_options> {
+ public:
+  ~grpc_tls_credentials_options() {
+    if (key_materials_config_.get() != nullptr) {
+      key_materials_config_.get()->Unref();
+    }
+    if (credential_reload_config_.get() != nullptr) {
+      credential_reload_config_.get()->Unref();
+    }
+    if (server_authorization_check_config_.get() != nullptr) {
+      server_authorization_check_config_.get()->Unref();
+    }
+  }
+
+  /* Getters for member fields. */
+  grpc_ssl_client_certificate_request_type cert_request_type() const {
+    return cert_request_type_;
+  }
+  const grpc_tls_key_materials_config* key_materials_config() const {
+    return key_materials_config_.get();
+  }
+  const grpc_tls_credential_reload_config* credential_reload_config() const {
+    return credential_reload_config_.get();
+  }
+  const grpc_tls_server_authorization_check_config*
+  server_authorization_check_config() const {
+    return server_authorization_check_config_.get();
+  }
+  grpc_tls_key_materials_config* mutable_key_materials_config() {
+    return key_materials_config_.get();
+  }
+
+  /* Setters for member fields. */
+  void set_cert_request_type(
+      const grpc_ssl_client_certificate_request_type type) {
+    cert_request_type_ = type;
+  }
+  void set_key_materials_config(
+      grpc_core::RefCountedPtr<grpc_tls_key_materials_config> config) {
+    key_materials_config_ = std::move(config);
+  }
+  void set_credential_reload_config(
+      grpc_core::RefCountedPtr<grpc_tls_credential_reload_config> config) {
+    credential_reload_config_ = std::move(config);
+  }
+  void set_server_authorization_check_config(
+      grpc_core::RefCountedPtr<grpc_tls_server_authorization_check_config>
+          config) {
+    server_authorization_check_config_ = std::move(config);
+  }
+
+ private:
+  grpc_ssl_client_certificate_request_type cert_request_type_;
+  grpc_core::RefCountedPtr<grpc_tls_key_materials_config> key_materials_config_;
+  grpc_core::RefCountedPtr<grpc_tls_credential_reload_config>
+      credential_reload_config_;
+  grpc_core::RefCountedPtr<grpc_tls_server_authorization_check_config>
+      server_authorization_check_config_;
+};
+
+#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_TLS_GRPC_TLS_CREDENTIALS_OPTIONS_H \
+        */

+ 33 - 0
src/core/lib/security/security_connector/ssl_utils.h

@@ -89,6 +89,39 @@ class DefaultSslRootStore {
   static grpc_slice default_pem_root_certs_;
 };
 
+class PemKeyCertPair {
+ public:
+  // Construct from the C struct.  We steal its members and then immediately
+  // free it.
+  explicit PemKeyCertPair(grpc_ssl_pem_key_cert_pair* pair)
+      : private_key_(const_cast<char*>(pair->private_key)),
+        cert_chain_(const_cast<char*>(pair->cert_chain)) {
+    gpr_free(pair);
+  }
+
+  // Movable.
+  PemKeyCertPair(PemKeyCertPair&& other) {
+    private_key_ = std::move(other.private_key_);
+    cert_chain_ = std::move(other.cert_chain_);
+  }
+  PemKeyCertPair& operator=(PemKeyCertPair&& other) {
+    private_key_ = std::move(other.private_key_);
+    cert_chain_ = std::move(other.cert_chain_);
+    return *this;
+  }
+
+  // Not copyable.
+  PemKeyCertPair(const PemKeyCertPair&) = delete;
+  PemKeyCertPair& operator=(const PemKeyCertPair&) = delete;
+
+  char* private_key() const { return private_key_.get(); }
+  char* cert_chain() const { return cert_chain_.get(); }
+
+ private:
+  grpc_core::UniquePtr<char> private_key_;
+  grpc_core::UniquePtr<char> cert_chain_;
+};
+
 }  // namespace grpc_core
 
 #endif /* GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_SSL_UTILS_H \

+ 5 - 1
src/core/lib/surface/call.cc

@@ -556,6 +556,7 @@ void grpc_call_unref(grpc_call* c) {
   GPR_TIMER_SCOPE("grpc_call_unref", 0);
 
   child_call* cc = c->child;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
@@ -597,6 +598,7 @@ void grpc_call_unref(grpc_call* c) {
 grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
   GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
   GPR_ASSERT(!reserved);
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
   cancel_with_error(call, GRPC_ERROR_CANCELLED);
   return GRPC_CALL_OK;
@@ -646,6 +648,7 @@ grpc_call_error grpc_call_cancel_with_status(grpc_call* c,
                                              grpc_status_code status,
                                              const char* description,
                                              void* reserved) {
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE(
       "grpc_call_cancel_with_status("
@@ -1894,7 +1897,6 @@ done_with_error:
 
 grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops,
                                       size_t nops, void* tag, void* reserved) {
-  grpc_core::ExecCtx exec_ctx;
   grpc_call_error err;
 
   GRPC_API_TRACE(
@@ -1905,6 +1907,8 @@ grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops,
   if (reserved != nullptr) {
     err = GRPC_CALL_ERROR;
   } else {
+    grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
+    grpc_core::ExecCtx exec_ctx;
     err = call_start_batch(call, ops, nops, tag, 0);
   }
 

+ 4 - 7
src/core/lib/surface/completion_queue.cc

@@ -854,21 +854,17 @@ static void cq_end_op_for_callback(
   // for reserved storage. Invoke the done callback right away to release it.
   done(done_arg, storage);
 
-  gpr_mu_lock(cq->mu);
-  cq_check_tag(cq, tag, false); /* Used in debug builds only */
+  cq_check_tag(cq, tag, true); /* Used in debug builds only */
 
   gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1);
   if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
-    gpr_mu_unlock(cq->mu);
     cq_finish_shutdown_callback(cq);
-  } else {
-    gpr_mu_unlock(cq->mu);
   }
 
   GRPC_ERROR_UNREF(error);
 
   auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(tag);
-  (*functor->functor_run)(functor, is_success);
+  grpc_core::ApplicationCallbackExecCtx::Enqueue(functor, is_success);
 }
 
 void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
@@ -1352,7 +1348,7 @@ static void cq_finish_shutdown_callback(grpc_completion_queue* cq) {
   GPR_ASSERT(cqd->shutdown_called);
 
   cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
-  (*callback->functor_run)(callback, true);
+  grpc_core::ApplicationCallbackExecCtx::Enqueue(callback, true);
 }
 
 static void cq_shutdown_callback(grpc_completion_queue* cq) {
@@ -1385,6 +1381,7 @@ static void cq_shutdown_callback(grpc_completion_queue* cq) {
    to zero here, then enter shutdown mode and wake up any waiters */
 void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
   GPR_TIMER_SCOPE("grpc_completion_queue_shutdown", 0);
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
   cq->vtable->shutdown(cq);

+ 2 - 0
src/core/lib/surface/init.cc

@@ -130,6 +130,7 @@ void grpc_init(void) {
     grpc_channel_init_init();
     grpc_core::channelz::ChannelzRegistry::Init();
     grpc_security_pre_init();
+    grpc_core::ApplicationCallbackExecCtx::GlobalInit();
     grpc_core::ExecCtx::GlobalInit();
     grpc_iomgr_init();
     gpr_timers_global_init();
@@ -183,6 +184,7 @@ void grpc_shutdown(void) {
       grpc_core::Fork::GlobalShutdown();
     }
     grpc_core::ExecCtx::GlobalShutdown();
+    grpc_core::ApplicationCallbackExecCtx::GlobalShutdown();
   }
   gpr_mu_unlock(&g_init_mu);
 }

+ 16 - 15
src/core/lib/surface/server.cc

@@ -997,10 +997,12 @@ void grpc_server_register_completion_queue(grpc_server* server,
       "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
       (server, cq, reserved));
 
-  if (grpc_get_cq_completion_type(cq) != GRPC_CQ_NEXT) {
+  auto cq_type = grpc_get_cq_completion_type(cq);
+  if (cq_type != GRPC_CQ_NEXT && cq_type != GRPC_CQ_CALLBACK) {
     gpr_log(GPR_INFO,
-            "Completion queue which is not of type GRPC_CQ_NEXT is being "
-            "registered as a server-completion-queue");
+            "Completion queue of type %d is being registered as a "
+            "server-completion-queue",
+            static_cast<int>(cq_type));
     /* Ideally we should log an error and abort but ruby-wrapped-language API
        calls grpc_completion_queue_pluck() on server completion queues */
   }
@@ -1302,6 +1304,7 @@ void grpc_server_shutdown_and_notify(grpc_server* server,
   listener* l;
   shutdown_tag* sdt;
   channel_broadcaster broadcaster;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
@@ -1369,6 +1372,7 @@ void grpc_server_shutdown_and_notify(grpc_server* server,
 
 void grpc_server_cancel_all_calls(grpc_server* server) {
   channel_broadcaster broadcaster;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
@@ -1384,6 +1388,7 @@ void grpc_server_cancel_all_calls(grpc_server* server) {
 
 void grpc_server_destroy(grpc_server* server) {
   listener* l;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
@@ -1469,6 +1474,7 @@ grpc_call_error grpc_server_request_call(
     grpc_completion_queue* cq_bound_to_call,
     grpc_completion_queue* cq_for_notification, void* tag) {
   grpc_call_error error;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
   requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
   GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
@@ -1515,11 +1521,11 @@ grpc_call_error grpc_server_request_registered_call(
     grpc_metadata_array* initial_metadata, grpc_byte_buffer** optional_payload,
     grpc_completion_queue* cq_bound_to_call,
     grpc_completion_queue* cq_for_notification, void* tag) {
-  grpc_call_error error;
+  grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
   grpc_core::ExecCtx exec_ctx;
+  GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
   requested_call* rc = static_cast<requested_call*>(gpr_malloc(sizeof(*rc)));
   registered_method* rm = static_cast<registered_method*>(rmp);
-  GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
   GRPC_API_TRACE(
       "grpc_server_request_registered_call("
       "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1537,19 +1543,17 @@ grpc_call_error grpc_server_request_registered_call(
   }
   if (cq_idx == server->cq_count) {
     gpr_free(rc);
-    error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
-    goto done;
+    return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
   }
   if ((optional_payload == nullptr) !=
       (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)) {
     gpr_free(rc);
-    error = GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
-    goto done;
+    return GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
   }
+
   if (grpc_cq_begin_op(cq_for_notification, tag) == false) {
     gpr_free(rc);
-    error = GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
-    goto done;
+    return GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
   }
   rc->cq_idx = cq_idx;
   rc->type = REGISTERED_CALL;
@@ -1561,10 +1565,7 @@ grpc_call_error grpc_server_request_registered_call(
   rc->data.registered.deadline = deadline;
   rc->initial_metadata = initial_metadata;
   rc->data.registered.optional_payload = optional_payload;
-  error = queue_call_request(server, cq_idx, rc);
-done:
-
-  return error;
+  return queue_call_request(server, cq_idx, rc);
 }
 
 static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,

+ 279 - 279
src/core/lib/transport/static_metadata.cc

@@ -236,113 +236,113 @@ grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {
 };
 
 const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {
-    {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
-    {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
-    {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-    {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
-    {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
-    {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
-    {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}},
-    {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
-    {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}},
-    {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
-    {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-    {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}},
-    {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}},
-    {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}},
-    {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
-    {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
-    {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-    {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}},
-    {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}},
-    {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
-    {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
-    {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
-    {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 26}}},
-    {&grpc_static_metadata_refcounts[23], {{g_bytes + 316, 22}}},
-    {&grpc_static_metadata_refcounts[24], {{g_bytes + 338, 12}}},
-    {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}},
-    {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}},
-    {&grpc_static_metadata_refcounts[27], {{g_bytes + 352, 1}}},
-    {&grpc_static_metadata_refcounts[28], {{g_bytes + 353, 1}}},
-    {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}},
-    {&grpc_static_metadata_refcounts[30], {{g_bytes + 354, 19}}},
-    {&grpc_static_metadata_refcounts[31], {{g_bytes + 373, 12}}},
-    {&grpc_static_metadata_refcounts[32], {{g_bytes + 385, 30}}},
-    {&grpc_static_metadata_refcounts[33], {{g_bytes + 415, 31}}},
-    {&grpc_static_metadata_refcounts[34], {{g_bytes + 446, 36}}},
-    {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 28}}},
-    {&grpc_static_metadata_refcounts[36], {{g_bytes + 510, 80}}},
-    {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}},
-    {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}},
-    {&grpc_static_metadata_refcounts[39], {{g_bytes + 601, 11}}},
-    {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}},
-    {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}},
-    {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}},
-    {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}},
-    {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}},
-    {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}},
-    {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}},
-    {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}},
-    {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}},
-    {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}},
-    {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}},
-    {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}},
-    {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}},
-    {&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
-    {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}},
-    {&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
-    {&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
-    {&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
-    {&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
-    {&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
-    {&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
-    {&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
-    {&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
-    {&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
-    {&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
-    {&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
-    {&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
-    {&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
-    {&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
-    {&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
-    {&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
-    {&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
-    {&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
-    {&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
-    {&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
-    {&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
-    {&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
-    {&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
-    {&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
-    {&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
-    {&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
-    {&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
-    {&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
-    {&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
-    {&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
-    {&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
-    {&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
-    {&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
-    {&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
-    {&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
-    {&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
-    {&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
-    {&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
-    {&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
-    {&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
-    {&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
-    {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}},
-    {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}},
-    {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}},
-    {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}},
-    {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}},
-    {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}},
-    {&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
-    {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}},
-    {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}},
-    {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}},
-    {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}},
+    {&grpc_static_metadata_refcounts[0], {{5, g_bytes + 0}}},
+    {&grpc_static_metadata_refcounts[1], {{7, g_bytes + 5}}},
+    {&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+    {&grpc_static_metadata_refcounts[3], {{10, g_bytes + 19}}},
+    {&grpc_static_metadata_refcounts[4], {{7, g_bytes + 29}}},
+    {&grpc_static_metadata_refcounts[5], {{2, g_bytes + 36}}},
+    {&grpc_static_metadata_refcounts[6], {{12, g_bytes + 38}}},
+    {&grpc_static_metadata_refcounts[7], {{11, g_bytes + 50}}},
+    {&grpc_static_metadata_refcounts[8], {{16, g_bytes + 61}}},
+    {&grpc_static_metadata_refcounts[9], {{13, g_bytes + 77}}},
+    {&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+    {&grpc_static_metadata_refcounts[11], {{21, g_bytes + 110}}},
+    {&grpc_static_metadata_refcounts[12], {{13, g_bytes + 131}}},
+    {&grpc_static_metadata_refcounts[13], {{14, g_bytes + 144}}},
+    {&grpc_static_metadata_refcounts[14], {{12, g_bytes + 158}}},
+    {&grpc_static_metadata_refcounts[15], {{16, g_bytes + 170}}},
+    {&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+    {&grpc_static_metadata_refcounts[17], {{30, g_bytes + 201}}},
+    {&grpc_static_metadata_refcounts[18], {{37, g_bytes + 231}}},
+    {&grpc_static_metadata_refcounts[19], {{10, g_bytes + 268}}},
+    {&grpc_static_metadata_refcounts[20], {{4, g_bytes + 278}}},
+    {&grpc_static_metadata_refcounts[21], {{8, g_bytes + 282}}},
+    {&grpc_static_metadata_refcounts[22], {{26, g_bytes + 290}}},
+    {&grpc_static_metadata_refcounts[23], {{22, g_bytes + 316}}},
+    {&grpc_static_metadata_refcounts[24], {{12, g_bytes + 338}}},
+    {&grpc_static_metadata_refcounts[25], {{1, g_bytes + 350}}},
+    {&grpc_static_metadata_refcounts[26], {{1, g_bytes + 351}}},
+    {&grpc_static_metadata_refcounts[27], {{1, g_bytes + 352}}},
+    {&grpc_static_metadata_refcounts[28], {{1, g_bytes + 353}}},
+    {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}},
+    {&grpc_static_metadata_refcounts[30], {{19, g_bytes + 354}}},
+    {&grpc_static_metadata_refcounts[31], {{12, g_bytes + 373}}},
+    {&grpc_static_metadata_refcounts[32], {{30, g_bytes + 385}}},
+    {&grpc_static_metadata_refcounts[33], {{31, g_bytes + 415}}},
+    {&grpc_static_metadata_refcounts[34], {{36, g_bytes + 446}}},
+    {&grpc_static_metadata_refcounts[35], {{28, g_bytes + 482}}},
+    {&grpc_static_metadata_refcounts[36], {{80, g_bytes + 510}}},
+    {&grpc_static_metadata_refcounts[37], {{7, g_bytes + 590}}},
+    {&grpc_static_metadata_refcounts[38], {{4, g_bytes + 597}}},
+    {&grpc_static_metadata_refcounts[39], {{11, g_bytes + 601}}},
+    {&grpc_static_metadata_refcounts[40], {{3, g_bytes + 612}}},
+    {&grpc_static_metadata_refcounts[41], {{4, g_bytes + 615}}},
+    {&grpc_static_metadata_refcounts[42], {{1, g_bytes + 619}}},
+    {&grpc_static_metadata_refcounts[43], {{11, g_bytes + 620}}},
+    {&grpc_static_metadata_refcounts[44], {{4, g_bytes + 631}}},
+    {&grpc_static_metadata_refcounts[45], {{5, g_bytes + 635}}},
+    {&grpc_static_metadata_refcounts[46], {{3, g_bytes + 640}}},
+    {&grpc_static_metadata_refcounts[47], {{3, g_bytes + 643}}},
+    {&grpc_static_metadata_refcounts[48], {{3, g_bytes + 646}}},
+    {&grpc_static_metadata_refcounts[49], {{3, g_bytes + 649}}},
+    {&grpc_static_metadata_refcounts[50], {{3, g_bytes + 652}}},
+    {&grpc_static_metadata_refcounts[51], {{3, g_bytes + 655}}},
+    {&grpc_static_metadata_refcounts[52], {{3, g_bytes + 658}}},
+    {&grpc_static_metadata_refcounts[53], {{14, g_bytes + 661}}},
+    {&grpc_static_metadata_refcounts[54], {{13, g_bytes + 675}}},
+    {&grpc_static_metadata_refcounts[55], {{15, g_bytes + 688}}},
+    {&grpc_static_metadata_refcounts[56], {{13, g_bytes + 703}}},
+    {&grpc_static_metadata_refcounts[57], {{6, g_bytes + 716}}},
+    {&grpc_static_metadata_refcounts[58], {{27, g_bytes + 722}}},
+    {&grpc_static_metadata_refcounts[59], {{3, g_bytes + 749}}},
+    {&grpc_static_metadata_refcounts[60], {{5, g_bytes + 752}}},
+    {&grpc_static_metadata_refcounts[61], {{13, g_bytes + 757}}},
+    {&grpc_static_metadata_refcounts[62], {{13, g_bytes + 770}}},
+    {&grpc_static_metadata_refcounts[63], {{19, g_bytes + 783}}},
+    {&grpc_static_metadata_refcounts[64], {{16, g_bytes + 802}}},
+    {&grpc_static_metadata_refcounts[65], {{14, g_bytes + 818}}},
+    {&grpc_static_metadata_refcounts[66], {{16, g_bytes + 832}}},
+    {&grpc_static_metadata_refcounts[67], {{13, g_bytes + 848}}},
+    {&grpc_static_metadata_refcounts[68], {{6, g_bytes + 861}}},
+    {&grpc_static_metadata_refcounts[69], {{4, g_bytes + 867}}},
+    {&grpc_static_metadata_refcounts[70], {{4, g_bytes + 871}}},
+    {&grpc_static_metadata_refcounts[71], {{6, g_bytes + 875}}},
+    {&grpc_static_metadata_refcounts[72], {{7, g_bytes + 881}}},
+    {&grpc_static_metadata_refcounts[73], {{4, g_bytes + 888}}},
+    {&grpc_static_metadata_refcounts[74], {{8, g_bytes + 892}}},
+    {&grpc_static_metadata_refcounts[75], {{17, g_bytes + 900}}},
+    {&grpc_static_metadata_refcounts[76], {{13, g_bytes + 917}}},
+    {&grpc_static_metadata_refcounts[77], {{8, g_bytes + 930}}},
+    {&grpc_static_metadata_refcounts[78], {{19, g_bytes + 938}}},
+    {&grpc_static_metadata_refcounts[79], {{13, g_bytes + 957}}},
+    {&grpc_static_metadata_refcounts[80], {{4, g_bytes + 970}}},
+    {&grpc_static_metadata_refcounts[81], {{8, g_bytes + 974}}},
+    {&grpc_static_metadata_refcounts[82], {{12, g_bytes + 982}}},
+    {&grpc_static_metadata_refcounts[83], {{18, g_bytes + 994}}},
+    {&grpc_static_metadata_refcounts[84], {{19, g_bytes + 1012}}},
+    {&grpc_static_metadata_refcounts[85], {{5, g_bytes + 1031}}},
+    {&grpc_static_metadata_refcounts[86], {{7, g_bytes + 1036}}},
+    {&grpc_static_metadata_refcounts[87], {{7, g_bytes + 1043}}},
+    {&grpc_static_metadata_refcounts[88], {{11, g_bytes + 1050}}},
+    {&grpc_static_metadata_refcounts[89], {{6, g_bytes + 1061}}},
+    {&grpc_static_metadata_refcounts[90], {{10, g_bytes + 1067}}},
+    {&grpc_static_metadata_refcounts[91], {{25, g_bytes + 1077}}},
+    {&grpc_static_metadata_refcounts[92], {{17, g_bytes + 1102}}},
+    {&grpc_static_metadata_refcounts[93], {{4, g_bytes + 1119}}},
+    {&grpc_static_metadata_refcounts[94], {{3, g_bytes + 1123}}},
+    {&grpc_static_metadata_refcounts[95], {{16, g_bytes + 1126}}},
+    {&grpc_static_metadata_refcounts[96], {{1, g_bytes + 1142}}},
+    {&grpc_static_metadata_refcounts[97], {{8, g_bytes + 1143}}},
+    {&grpc_static_metadata_refcounts[98], {{8, g_bytes + 1151}}},
+    {&grpc_static_metadata_refcounts[99], {{16, g_bytes + 1159}}},
+    {&grpc_static_metadata_refcounts[100], {{4, g_bytes + 1175}}},
+    {&grpc_static_metadata_refcounts[101], {{3, g_bytes + 1179}}},
+    {&grpc_static_metadata_refcounts[102], {{11, g_bytes + 1182}}},
+    {&grpc_static_metadata_refcounts[103], {{16, g_bytes + 1193}}},
+    {&grpc_static_metadata_refcounts[104], {{13, g_bytes + 1209}}},
+    {&grpc_static_metadata_refcounts[105], {{12, g_bytes + 1222}}},
+    {&grpc_static_metadata_refcounts[106], {{21, g_bytes + 1234}}},
 };
 
 uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {
@@ -404,178 +404,178 @@ grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {
 }
 
 grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {
-    {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
-     {&grpc_static_metadata_refcounts[40], {{g_bytes + 612, 3}}}},
-    {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
-     {&grpc_static_metadata_refcounts[41], {{g_bytes + 615, 4}}}},
-    {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
-     {&grpc_static_metadata_refcounts[42], {{g_bytes + 619, 1}}}},
-    {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}},
-     {&grpc_static_metadata_refcounts[43], {{g_bytes + 620, 11}}}},
-    {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
-     {&grpc_static_metadata_refcounts[44], {{g_bytes + 631, 4}}}},
-    {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
-     {&grpc_static_metadata_refcounts[45], {{g_bytes + 635, 5}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[46], {{g_bytes + 640, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[47], {{g_bytes + 643, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[48], {{g_bytes + 646, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[49], {{g_bytes + 649, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[50], {{g_bytes + 652, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[51], {{g_bytes + 655, 3}}}},
-    {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}},
-     {&grpc_static_metadata_refcounts[52], {{g_bytes + 658, 3}}}},
-    {{&grpc_static_metadata_refcounts[53], {{g_bytes + 661, 14}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-     {&grpc_static_metadata_refcounts[54], {{g_bytes + 675, 13}}}},
-    {{&grpc_static_metadata_refcounts[55], {{g_bytes + 688, 15}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[56], {{g_bytes + 703, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[57], {{g_bytes + 716, 6}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[58], {{g_bytes + 722, 27}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[59], {{g_bytes + 749, 3}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[60], {{g_bytes + 752, 5}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[61], {{g_bytes + 757, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[62], {{g_bytes + 770, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[63], {{g_bytes + 783, 19}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[64], {{g_bytes + 802, 16}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[65], {{g_bytes + 818, 14}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[66], {{g_bytes + 832, 16}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[67], {{g_bytes + 848, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[68], {{g_bytes + 861, 6}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[69], {{g_bytes + 867, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[70], {{g_bytes + 871, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[71], {{g_bytes + 875, 6}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[72], {{g_bytes + 881, 7}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[73], {{g_bytes + 888, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[74], {{g_bytes + 892, 8}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[75], {{g_bytes + 900, 17}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[76], {{g_bytes + 917, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[77], {{g_bytes + 930, 8}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[78], {{g_bytes + 938, 19}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[79], {{g_bytes + 957, 13}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[80], {{g_bytes + 970, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[81], {{g_bytes + 974, 8}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[82], {{g_bytes + 982, 12}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[83], {{g_bytes + 994, 18}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[84], {{g_bytes + 1012, 19}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[85], {{g_bytes + 1031, 5}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[86], {{g_bytes + 1036, 7}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[87], {{g_bytes + 1043, 7}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[88], {{g_bytes + 1050, 11}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[89], {{g_bytes + 1061, 6}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[90], {{g_bytes + 1067, 10}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[91], {{g_bytes + 1077, 25}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[92], {{g_bytes + 1102, 17}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1119, 4}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1123, 3}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1126, 16}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
-     {&grpc_static_metadata_refcounts[96], {{g_bytes + 1142, 1}}}},
-    {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
-     {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}},
-    {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}},
-     {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}},
-    {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
-     {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
-    {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
-     {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
-    {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}},
-     {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
-    {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}},
-     {&grpc_static_metadata_refcounts[98], {{g_bytes + 1151, 8}}}},
-    {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}},
-     {&grpc_static_metadata_refcounts[99], {{g_bytes + 1159, 16}}}},
-    {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}},
-     {&grpc_static_metadata_refcounts[100], {{g_bytes + 1175, 4}}}},
-    {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}},
-     {&grpc_static_metadata_refcounts[101], {{g_bytes + 1179, 3}}}},
-    {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
-     {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
-    {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}},
-     {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
-    {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[102], {{g_bytes + 1182, 11}}},
-     {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[37], {{g_bytes + 590, 7}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[103], {{g_bytes + 1193, 16}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[105], {{g_bytes + 1222, 12}}}},
-    {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}},
-     {&grpc_static_metadata_refcounts[106], {{g_bytes + 1234, 21}}}},
-    {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-     {&grpc_static_metadata_refcounts[97], {{g_bytes + 1143, 8}}}},
-    {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-     {&grpc_static_metadata_refcounts[38], {{g_bytes + 597, 4}}}},
-    {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}},
-     {&grpc_static_metadata_refcounts[104], {{g_bytes + 1209, 13}}}},
+    {{&grpc_static_metadata_refcounts[3], {{10, g_bytes + 19}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[1], {{7, g_bytes + 5}}},
+     {&grpc_static_metadata_refcounts[40], {{3, g_bytes + 612}}}},
+    {{&grpc_static_metadata_refcounts[1], {{7, g_bytes + 5}}},
+     {&grpc_static_metadata_refcounts[41], {{4, g_bytes + 615}}}},
+    {{&grpc_static_metadata_refcounts[0], {{5, g_bytes + 0}}},
+     {&grpc_static_metadata_refcounts[42], {{1, g_bytes + 619}}}},
+    {{&grpc_static_metadata_refcounts[0], {{5, g_bytes + 0}}},
+     {&grpc_static_metadata_refcounts[43], {{11, g_bytes + 620}}}},
+    {{&grpc_static_metadata_refcounts[4], {{7, g_bytes + 29}}},
+     {&grpc_static_metadata_refcounts[44], {{4, g_bytes + 631}}}},
+    {{&grpc_static_metadata_refcounts[4], {{7, g_bytes + 29}}},
+     {&grpc_static_metadata_refcounts[45], {{5, g_bytes + 635}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[46], {{3, g_bytes + 640}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[47], {{3, g_bytes + 643}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[48], {{3, g_bytes + 646}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[49], {{3, g_bytes + 649}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[50], {{3, g_bytes + 652}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[51], {{3, g_bytes + 655}}}},
+    {{&grpc_static_metadata_refcounts[2], {{7, g_bytes + 12}}},
+     {&grpc_static_metadata_refcounts[52], {{3, g_bytes + 658}}}},
+    {{&grpc_static_metadata_refcounts[53], {{14, g_bytes + 661}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+     {&grpc_static_metadata_refcounts[54], {{13, g_bytes + 675}}}},
+    {{&grpc_static_metadata_refcounts[55], {{15, g_bytes + 688}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[56], {{13, g_bytes + 703}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[57], {{6, g_bytes + 716}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[58], {{27, g_bytes + 722}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[59], {{3, g_bytes + 749}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[60], {{5, g_bytes + 752}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[61], {{13, g_bytes + 757}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[62], {{13, g_bytes + 770}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[63], {{19, g_bytes + 783}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[15], {{16, g_bytes + 170}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[64], {{16, g_bytes + 802}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[65], {{14, g_bytes + 818}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[66], {{16, g_bytes + 832}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[67], {{13, g_bytes + 848}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[14], {{12, g_bytes + 158}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[68], {{6, g_bytes + 861}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[69], {{4, g_bytes + 867}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[70], {{4, g_bytes + 871}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[71], {{6, g_bytes + 875}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[72], {{7, g_bytes + 881}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[73], {{4, g_bytes + 888}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[20], {{4, g_bytes + 278}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[74], {{8, g_bytes + 892}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[75], {{17, g_bytes + 900}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[76], {{13, g_bytes + 917}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[77], {{8, g_bytes + 930}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[78], {{19, g_bytes + 938}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[79], {{13, g_bytes + 957}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[80], {{4, g_bytes + 970}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[81], {{8, g_bytes + 974}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[82], {{12, g_bytes + 982}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[83], {{18, g_bytes + 994}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[84], {{19, g_bytes + 1012}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[85], {{5, g_bytes + 1031}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[86], {{7, g_bytes + 1036}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[87], {{7, g_bytes + 1043}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[88], {{11, g_bytes + 1050}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[89], {{6, g_bytes + 1061}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[90], {{10, g_bytes + 1067}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[91], {{25, g_bytes + 1077}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[92], {{17, g_bytes + 1102}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[19], {{10, g_bytes + 268}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[93], {{4, g_bytes + 1119}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[94], {{3, g_bytes + 1123}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[95], {{16, g_bytes + 1126}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[7], {{11, g_bytes + 50}}},
+     {&grpc_static_metadata_refcounts[96], {{1, g_bytes + 1142}}}},
+    {{&grpc_static_metadata_refcounts[7], {{11, g_bytes + 50}}},
+     {&grpc_static_metadata_refcounts[25], {{1, g_bytes + 350}}}},
+    {{&grpc_static_metadata_refcounts[7], {{11, g_bytes + 50}}},
+     {&grpc_static_metadata_refcounts[26], {{1, g_bytes + 351}}}},
+    {{&grpc_static_metadata_refcounts[9], {{13, g_bytes + 77}}},
+     {&grpc_static_metadata_refcounts[97], {{8, g_bytes + 1143}}}},
+    {{&grpc_static_metadata_refcounts[9], {{13, g_bytes + 77}}},
+     {&grpc_static_metadata_refcounts[38], {{4, g_bytes + 597}}}},
+    {{&grpc_static_metadata_refcounts[9], {{13, g_bytes + 77}}},
+     {&grpc_static_metadata_refcounts[37], {{7, g_bytes + 590}}}},
+    {{&grpc_static_metadata_refcounts[5], {{2, g_bytes + 36}}},
+     {&grpc_static_metadata_refcounts[98], {{8, g_bytes + 1151}}}},
+    {{&grpc_static_metadata_refcounts[14], {{12, g_bytes + 158}}},
+     {&grpc_static_metadata_refcounts[99], {{16, g_bytes + 1159}}}},
+    {{&grpc_static_metadata_refcounts[4], {{7, g_bytes + 29}}},
+     {&grpc_static_metadata_refcounts[100], {{4, g_bytes + 1175}}}},
+    {{&grpc_static_metadata_refcounts[1], {{7, g_bytes + 5}}},
+     {&grpc_static_metadata_refcounts[101], {{3, g_bytes + 1179}}}},
+    {{&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[15], {{16, g_bytes + 170}}},
+     {&grpc_static_metadata_refcounts[97], {{8, g_bytes + 1143}}}},
+    {{&grpc_static_metadata_refcounts[15], {{16, g_bytes + 170}}},
+     {&grpc_static_metadata_refcounts[38], {{4, g_bytes + 597}}}},
+    {{&grpc_static_metadata_refcounts[21], {{8, g_bytes + 282}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[102], {{11, g_bytes + 1182}}},
+     {&grpc_static_metadata_refcounts[29], {{0, g_bytes + 354}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[97], {{8, g_bytes + 1143}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[37], {{7, g_bytes + 590}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[103], {{16, g_bytes + 1193}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[38], {{4, g_bytes + 597}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[104], {{13, g_bytes + 1209}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[105], {{12, g_bytes + 1222}}}},
+    {{&grpc_static_metadata_refcounts[10], {{20, g_bytes + 90}}},
+     {&grpc_static_metadata_refcounts[106], {{21, g_bytes + 1234}}}},
+    {{&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+     {&grpc_static_metadata_refcounts[97], {{8, g_bytes + 1143}}}},
+    {{&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+     {&grpc_static_metadata_refcounts[38], {{4, g_bytes + 597}}}},
+    {{&grpc_static_metadata_refcounts[16], {{15, g_bytes + 186}}},
+     {&grpc_static_metadata_refcounts[104], {{13, g_bytes + 1209}}}},
 };
 const uint8_t grpc_static_accept_encoding_metadata[8] = {0,  76, 77, 78,
                                                          79, 80, 81, 82};

+ 4 - 2
src/core/lib/transport/transport.cc

@@ -30,6 +30,7 @@
 #include "src/core/lib/gpr/alloc.h"
 #include "src/core/lib/gpr/string.h"
 #include "src/core/lib/iomgr/executor.h"
+#include "src/core/lib/iomgr/iomgr.h"
 #include "src/core/lib/slice/slice_internal.h"
 #include "src/core/lib/slice/slice_string_helpers.h"
 #include "src/core/lib/transport/transport_impl.h"
@@ -63,8 +64,9 @@ void grpc_stream_unref(grpc_stream_refcount* refcount, const char* reason) {
 void grpc_stream_unref(grpc_stream_refcount* refcount) {
 #endif
   if (gpr_unref(&refcount->refs)) {
-    if (grpc_core::ExecCtx::Get()->flags() &
-        GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) {
+    if (!grpc_iomgr_is_any_background_poller_thread() &&
+        (grpc_core::ExecCtx::Get()->flags() &
+         GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP)) {
       /* Ick.
          The thread we're running on MAY be owned (indirectly) by a call-stack.
          If that's the case, destroying the call-stack MAY try to destroy the

+ 10 - 4
src/core/tsi/ssl_transport_security.cc

@@ -619,15 +619,19 @@ static tsi_result x509_store_load_certs(X509_STORE* cert_store,
       sk_X509_NAME_push(*root_names, root_name);
       root_name = nullptr;
     }
+    ERR_clear_error();
     if (!X509_STORE_add_cert(cert_store, root)) {
-      gpr_log(GPR_ERROR, "Could not add root certificate to ssl context.");
-      result = TSI_INTERNAL_ERROR;
-      break;
+      unsigned long error = ERR_get_error();
+      if (ERR_GET_LIB(error) != ERR_LIB_X509 ||
+          ERR_GET_REASON(error) != X509_R_CERT_ALREADY_IN_HASH_TABLE) {
+        gpr_log(GPR_ERROR, "Could not add root certificate to ssl context.");
+        result = TSI_INTERNAL_ERROR;
+        break;
+      }
     }
     X509_free(root);
     num_roots++;
   }
-
   if (num_roots == 0) {
     gpr_log(GPR_ERROR, "Could not load any root certificate.");
     result = TSI_INVALID_ARGUMENT;
@@ -651,6 +655,8 @@ static tsi_result ssl_ctx_load_verification_certs(SSL_CTX* context,
                                                   STACK_OF(X509_NAME) *
                                                       *root_name) {
   X509_STORE* cert_store = SSL_CTX_get_cert_store(context);
+  X509_STORE_set_flags(cert_store,
+                       X509_V_FLAG_PARTIAL_CHAIN | X509_V_FLAG_TRUSTED_FIRST);
   return x509_store_load_certs(cert_store, pem_roots, pem_roots_size,
                                root_name);
 }

+ 3 - 0
src/cpp/common/alarm.cc

@@ -52,6 +52,7 @@ class AlarmImpl : public ::grpc::internal::CompletionQueueTag {
     return true;
   }
   void Set(::grpc::CompletionQueue* cq, gpr_timespec deadline, void* tag) {
+    grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
     grpc_core::ExecCtx exec_ctx;
     GRPC_CQ_INTERNAL_REF(cq->cq(), "alarm");
     cq_ = cq->cq();
@@ -72,6 +73,7 @@ class AlarmImpl : public ::grpc::internal::CompletionQueueTag {
                     &on_alarm_);
   }
   void Set(gpr_timespec deadline, std::function<void(bool)> f) {
+    grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
     grpc_core::ExecCtx exec_ctx;
     // Don't use any CQ at all. Instead just use the timer to fire the function
     callback_ = std::move(f);
@@ -87,6 +89,7 @@ class AlarmImpl : public ::grpc::internal::CompletionQueueTag {
                     &on_alarm_);
   }
   void Cancel() {
+    grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
     grpc_core::ExecCtx exec_ctx;
     grpc_timer_cancel(&timer_);
   }

+ 0 - 8
src/cpp/common/completion_queue_cc.cc

@@ -42,14 +42,6 @@ void CompletionQueue::Shutdown() {
   CompleteAvalanching();
 }
 
-void CompletionQueue::CompleteAvalanching() {
-  // Check if this was the last avalanching operation
-  if (gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_,
-                                   static_cast<gpr_atm>(-1)) == 1) {
-    grpc_completion_queue_shutdown(cq_);
-  }
-}
-
 CompletionQueue::NextStatus CompletionQueue::AsyncNextInternal(
     void** tag, bool* ok, gpr_timespec deadline) {
   for (;;) {

+ 4 - 0
src/cpp/common/core_codegen.cc

@@ -59,6 +59,10 @@ grpc_completion_queue* CoreCodegen::grpc_completion_queue_create_for_pluck(
   return ::grpc_completion_queue_create_for_pluck(reserved);
 }
 
+void CoreCodegen::grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
+  ::grpc_completion_queue_shutdown(cq);
+}
+
 void CoreCodegen::grpc_completion_queue_destroy(grpc_completion_queue* cq) {
   ::grpc_completion_queue_destroy(cq);
 }

+ 2 - 2
src/cpp/server/load_reporter/load_reporter_async_service_impl.cc

@@ -211,8 +211,8 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone(
                                           load_key_);
       const auto& load_report_interval = initial_request.load_report_interval();
       load_report_interval_ms_ =
-          static_cast<uint64_t>(load_report_interval.seconds() * 1000 +
-                                load_report_interval.nanos() / 1000);
+          static_cast<unsigned long>(load_report_interval.seconds() * 1000 +
+                                     load_report_interval.nanos() / 1000);
       gpr_log(
           GPR_INFO,
           "[LRS %p] Initial request received. Start load reporting (load "

+ 149 - 59
src/cpp/server/server_cc.cc

@@ -59,7 +59,15 @@ namespace {
 #define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
 
 // How many callback requests of each method should we pre-register at start
-#define DEFAULT_CALLBACK_REQS_PER_METHOD 32
+#define DEFAULT_CALLBACK_REQS_PER_METHOD 512
+
+// What is the (soft) limit for outstanding requests in the server
+#define SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING 30000
+
+// If the number of unmatched requests for a method drops below this amount, try
+// to allocate extra unless it pushes the total number of callbacks above the
+// soft maximum
+#define SOFT_MINIMUM_SPARE_CALLBACK_REQS_PER_METHOD 128
 
 class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
  public:
@@ -177,11 +185,10 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
     GPR_ASSERT(cq_ && !in_flight_);
     in_flight_ = true;
     if (method_tag_) {
-      if (GRPC_CALL_OK !=
-          grpc_server_request_registered_call(
+      if (grpc_server_request_registered_call(
               server, method_tag_, &call_, &deadline_, &request_metadata_,
               has_request_payload_ ? &request_payload_ : nullptr, cq_,
-              notify_cq, this)) {
+              notify_cq, this) != GRPC_CALL_OK) {
         TeardownRequest();
         return;
       }
@@ -343,9 +350,10 @@ class Server::SyncRequest final : public internal::CompletionQueueTag {
 
 class Server::CallbackRequest final : public internal::CompletionQueueTag {
  public:
-  CallbackRequest(Server* server, internal::RpcServiceMethod* method,
-                  void* method_tag)
+  CallbackRequest(Server* server, size_t method_idx,
+                  internal::RpcServiceMethod* method, void* method_tag)
       : server_(server),
+        method_index_(method_idx),
         method_(method),
         method_tag_(method_tag),
         has_request_payload_(
@@ -353,12 +361,22 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
             method->method_type() == internal::RpcMethod::SERVER_STREAMING),
         cq_(server->CallbackCQ()),
         tag_(this) {
+    server_->callback_reqs_outstanding_++;
     Setup();
   }
 
-  ~CallbackRequest() { Clear(); }
+  ~CallbackRequest() {
+    Clear();
+
+    // The counter of outstanding requests must be decremented
+    // under a lock in case it causes the server shutdown.
+    std::lock_guard<std::mutex> l(server_->callback_reqs_mu_);
+    if (--server_->callback_reqs_outstanding_ == 0) {
+      server_->callback_reqs_done_cv_.notify_one();
+    }
+  }
 
-  void Request() {
+  bool Request() {
     if (method_tag_) {
       if (GRPC_CALL_OK !=
           grpc_server_request_registered_call(
@@ -366,7 +384,7 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
               &request_metadata_,
               has_request_payload_ ? &request_payload_ : nullptr, cq_->cq(),
               cq_->cq(), static_cast<void*>(&tag_))) {
-        return;
+        return false;
       }
     } else {
       if (!call_details_) {
@@ -376,9 +394,10 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
       if (grpc_server_request_call(server_->c_server(), &call_, call_details_,
                                    &request_metadata_, cq_->cq(), cq_->cq(),
                                    static_cast<void*>(&tag_)) != GRPC_CALL_OK) {
-        return;
+        return false;
       }
     }
+    return true;
   }
 
   bool FinalizeResult(void** tag, bool* status) override { return false; }
@@ -409,12 +428,36 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
       GPR_ASSERT(!req_->FinalizeResult(&ignored, &new_ok));
       GPR_ASSERT(ignored == req_);
 
+      int count =
+          static_cast<int>(gpr_atm_no_barrier_fetch_add(
+              &req_->server_
+                   ->callback_unmatched_reqs_count_[req_->method_index_],
+              -1)) -
+          1;
       if (!ok) {
-        // The call has been shutdown
-        req_->Clear();
+        // The call has been shutdown.
+        // Delete its contents to free up the request.
+        delete req_;
         return;
       }
 
+      // If this was the last request in the list or it is below the soft
+      // minimum and there are spare requests available, set up a new one.
+      if (count == 0 || (count < SOFT_MINIMUM_SPARE_CALLBACK_REQS_PER_METHOD &&
+                         req_->server_->callback_reqs_outstanding_ <
+                             SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING)) {
+        auto* new_req = new CallbackRequest(req_->server_, req_->method_index_,
+                                            req_->method_, req_->method_tag_);
+        if (!new_req->Request()) {
+          // The server must have just decided to shutdown.
+          gpr_atm_no_barrier_fetch_add(
+              &new_req->server_
+                   ->callback_unmatched_reqs_count_[new_req->method_index_],
+              -1);
+          delete new_req;
+        }
+      }
+
       // Bind the call, deadline, and metadata from what we got
       req_->ctx_.set_call(req_->call_);
       req_->ctx_.cq_ = req_->cq_;
@@ -462,17 +505,30 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
           internal::MethodHandler::HandlerParameter(
               call_, &req_->ctx_, req_->request_, req_->request_status_,
               [this] {
-                req_->Reset();
-                req_->Request();
+                // Recycle this request if there aren't too many outstanding.
+                // Note that we don't have to worry about a case where there
+                // are no requests waiting to match for this method since that
+                // is already taken care of when binding a request to a call.
+                // TODO(vjpai): Also don't recycle this request if the dynamic
+                //              load no longer justifies it. Consider measuring
+                //              dynamic load and setting a target accordingly.
+                if (req_->server_->callback_reqs_outstanding_ <
+                    SOFT_MAXIMUM_CALLBACK_REQS_OUTSTANDING) {
+                  req_->Clear();
+                  req_->Setup();
+                } else {
+                  // We can free up this request because there are too many
+                  delete req_;
+                  return;
+                }
+                if (!req_->Request()) {
+                  // The server must have just decided to shutdown.
+                  delete req_;
+                }
               }));
     }
   };
 
-  void Reset() {
-    Clear();
-    Setup();
-  }
-
   void Clear() {
     if (call_details_) {
       delete call_details_;
@@ -487,6 +543,8 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
   }
 
   void Setup() {
+    gpr_atm_no_barrier_fetch_add(
+        &server_->callback_unmatched_reqs_count_[method_index_], 1);
     grpc_metadata_array_init(&request_metadata_);
     ctx_.Setup(gpr_inf_future(GPR_CLOCK_REALTIME));
     request_payload_ = nullptr;
@@ -495,6 +553,7 @@ class Server::CallbackRequest final : public internal::CompletionQueueTag {
   }
 
   Server* const server_;
+  size_t method_index_;
   internal::RpcServiceMethod* const method_;
   void* const method_tag_;
   const bool has_request_payload_;
@@ -715,6 +774,13 @@ Server::~Server() {
   }
 
   grpc_server_destroy(server_);
+  for (auto& per_method_count : callback_unmatched_reqs_count_) {
+    // There should be no more unmatched callbacks for any method
+    // as each request is failed by Shutdown. Check that this actually
+    // happened
+    GPR_ASSERT(static_cast<int>(gpr_atm_no_barrier_load(&per_method_count)) ==
+               0);
+  }
 }
 
 void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
@@ -769,6 +835,7 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
   }
 
   const char* method_name = nullptr;
+
   for (auto it = service->methods_.begin(); it != service->methods_.end();
        ++it) {
     if (it->get() == nullptr) {  // Handled by generic service if any.
@@ -794,13 +861,15 @@ bool Server::RegisterService(const grpc::string* host, Service* service) {
       }
     } else {
       // a callback method. Register at least some callback requests
+      callback_unmatched_reqs_count_.push_back(0);
+      auto method_index = callback_unmatched_reqs_count_.size() - 1;
       // TODO(vjpai): Register these dynamically based on need
       for (int i = 0; i < DEFAULT_CALLBACK_REQS_PER_METHOD; i++) {
-        auto* req = new CallbackRequest(this, method, method_registration_tag);
-        callback_reqs_.emplace_back(req);
+        callback_reqs_to_start_.push_back(new CallbackRequest(
+            this, method_index, method, method_registration_tag));
       }
-      // Enqueue it so that it will be Request'ed later once
-      // all request matchers are created at core server startup
+      // Enqueue it so that it will be Request'ed later after all request
+      // matchers are created at core server startup
     }
 
     method_name = method->name();
@@ -889,9 +958,10 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
     (*it)->Start();
   }
 
-  for (auto& cbreq : callback_reqs_) {
-    cbreq->Request();
+  for (auto* cbreq : callback_reqs_to_start_) {
+    GPR_ASSERT(cbreq->Request());
   }
+  callback_reqs_to_start_.clear();
 
   if (default_health_check_service_impl != nullptr) {
     default_health_check_service_impl->StartServingThread();
@@ -900,49 +970,69 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
 
 void Server::ShutdownInternal(gpr_timespec deadline) {
   std::unique_lock<std::mutex> lock(mu_);
-  if (!shutdown_) {
-    shutdown_ = true;
+  if (shutdown_) {
+    return;
+  }
 
-    /// The completion queue to use for server shutdown completion notification
-    CompletionQueue shutdown_cq;
-    ShutdownTag shutdown_tag;  // Dummy shutdown tag
-    grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
+  shutdown_ = true;
 
-    shutdown_cq.Shutdown();
+  /// The completion queue to use for server shutdown completion notification
+  CompletionQueue shutdown_cq;
+  ShutdownTag shutdown_tag;  // Dummy shutdown tag
+  grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
 
-    void* tag;
-    bool ok;
-    CompletionQueue::NextStatus status =
-        shutdown_cq.AsyncNext(&tag, &ok, deadline);
+  shutdown_cq.Shutdown();
 
-    // If this timed out, it means we are done with the grace period for a clean
-    // shutdown. We should force a shutdown now by cancelling all inflight calls
-    if (status == CompletionQueue::NextStatus::TIMEOUT) {
-      grpc_server_cancel_all_calls(server_);
-    }
-    // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
-    // successfully shutdown
+  void* tag;
+  bool ok;
+  CompletionQueue::NextStatus status =
+      shutdown_cq.AsyncNext(&tag, &ok, deadline);
 
-    // Shutdown all ThreadManagers. This will try to gracefully stop all the
-    // threads in the ThreadManagers (once they process any inflight requests)
-    for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
-      (*it)->Shutdown();  // ThreadManager's Shutdown()
-    }
+  // If this timed out, it means we are done with the grace period for a clean
+  // shutdown. We should force a shutdown now by cancelling all inflight calls
+  if (status == CompletionQueue::NextStatus::TIMEOUT) {
+    grpc_server_cancel_all_calls(server_);
+  }
+  // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
+  // successfully shutdown
 
-    // Wait for threads in all ThreadManagers to terminate
-    for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
-      (*it)->Wait();
-    }
+  // Shutdown all ThreadManagers. This will try to gracefully stop all the
+  // threads in the ThreadManagers (once they process any inflight requests)
+  for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+    (*it)->Shutdown();  // ThreadManager's Shutdown()
+  }
 
-    // Drain the shutdown queue (if the previous call to AsyncNext() timed out
-    // and we didn't remove the tag from the queue yet)
-    while (shutdown_cq.Next(&tag, &ok)) {
-      // Nothing to be done here. Just ignore ok and tag values
-    }
+  // Wait for threads in all ThreadManagers to terminate
+  for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
+    (*it)->Wait();
+  }
 
-    shutdown_notified_ = true;
-    shutdown_cv_.notify_all();
+  // Wait for all outstanding callback requests to complete
+  // (whether waiting for a match or already active).
+  // We know that no new requests will be created after this point
+  // because they are only created at server startup time or when
+  // we have a successful match on a request. During the shutdown phase,
+  // requests that have not yet matched will be failed rather than
+  // allowed to succeed, which will cause the server to delete the
+  // request and decrement the count. Possibly a request will match before
+  // the shutdown but then find that shutdown has already started by the
+  // time it tries to register a new request. In that case, the registration
+  // will report a failure, indicating a shutdown and again we won't end
+  // up incrementing the counter.
+  {
+    std::unique_lock<std::mutex> cblock(callback_reqs_mu_);
+    callback_reqs_done_cv_.wait(
+        cblock, [this] { return callback_reqs_outstanding_ == 0; });
+  }
+
+  // Drain the shutdown queue (if the previous call to AsyncNext() timed out
+  // and we didn't remove the tag from the queue yet)
+  while (shutdown_cq.Next(&tag, &ok)) {
+    // Nothing to be done here. Just ignore ok and tag values
   }
+
+  shutdown_notified_ = true;
+  shutdown_cv_.notify_all();
 }
 
 void Server::Wait() {

+ 2 - 0
src/csharp/Grpc.Core.Api/.gitignore

@@ -0,0 +1,2 @@
+bin
+obj

+ 0 - 1
src/csharp/Grpc.Core/AuthContext.cs → src/csharp/Grpc.Core.Api/AuthContext.cs

@@ -19,7 +19,6 @@
 using System;
 using System.Collections.Generic;
 using System.Linq;
-using Grpc.Core.Internal;
 using Grpc.Core.Utils;
 
 namespace Grpc.Core

+ 3 - 2
src/csharp/Grpc.Core/AuthProperty.cs → src/csharp/Grpc.Core.Api/AuthProperty.cs

@@ -19,7 +19,7 @@
 using System;
 using System.Collections.Generic;
 using System.Linq;
-using Grpc.Core.Internal;
+using System.Text;
 using Grpc.Core.Utils;
 
 namespace Grpc.Core
@@ -30,6 +30,7 @@ namespace Grpc.Core
     /// </summary>
     public class AuthProperty
     {
+        static readonly Encoding EncodingUTF8 = System.Text.Encoding.UTF8;
         string name;
         byte[] valueBytes;
         Lazy<string> value;
@@ -38,7 +39,7 @@ namespace Grpc.Core
         {
             this.name = GrpcPreconditions.CheckNotNull(name);
             this.valueBytes = GrpcPreconditions.CheckNotNull(valueBytes);
-            this.value = new Lazy<string>(() => MarshalUtils.GetStringUTF8(this.valueBytes));
+            this.value = new Lazy<string>(() => EncodingUTF8.GetString(this.valueBytes));
         }
 
         /// <summary>

+ 59 - 0
src/csharp/Grpc.Core.Api/ContextPropagationOptions.cs

@@ -0,0 +1,59 @@
+#region Copyright notice and license
+
+// Copyright 2019 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using System;
+
+namespace Grpc.Core
+{
+    /// <summary>
+    /// Options for <see cref="ContextPropagationToken"/>.
+    /// </summary>
+    public class ContextPropagationOptions
+    {
+        /// <summary>
+        /// The context propagation options that will be used by default.
+        /// </summary>
+        public static readonly ContextPropagationOptions Default = new ContextPropagationOptions();
+
+        bool propagateDeadline;
+        bool propagateCancellation;
+
+        /// <summary>
+        /// Creates new context propagation options.
+        /// </summary>
+        /// <param name="propagateDeadline">If set to <c>true</c> parent call's deadline will be propagated to the child call.</param>
+        /// <param name="propagateCancellation">If set to <c>true</c> parent call's cancellation token will be propagated to the child call.</param>
+        public ContextPropagationOptions(bool propagateDeadline = true, bool propagateCancellation = true)
+        {
+            this.propagateDeadline = propagateDeadline;
+            this.propagateCancellation = propagateCancellation;
+        }
+            
+        /// <summary><c>true</c> if parent call's deadline should be propagated to the child call.</summary>
+        public bool IsPropagateDeadline
+        {
+            get { return this.propagateDeadline; }
+        }
+
+        /// <summary><c>true</c> if parent call's cancellation token should be propagated to the child call.</summary>
+        public bool IsPropagateCancellation
+        {
+            get { return this.propagateCancellation; }
+        }
+    }
+}

+ 35 - 0
src/csharp/Grpc.Core.Api/ContextPropagationToken.cs

@@ -0,0 +1,35 @@
+#region Copyright notice and license
+
+// Copyright 2015 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+namespace Grpc.Core
+{
+    /// <summary>
+    /// Token for propagating context of server side handlers to child calls.
+    /// In situations when a backend is making calls to another backend,
+    /// it makes sense to propagate properties like deadline and cancellation 
+    /// token of the server call to the child call.
+    /// Underlying gRPC implementation may provide other "opaque" contexts (like tracing context) that
+    /// are not explicitly accesible via the public C# API, but this token still allows propagating them.
+    /// </summary>
+    public abstract class ContextPropagationToken
+    {
+        internal ContextPropagationToken()
+        {
+        }
+    }
+}

+ 0 - 0
src/csharp/Grpc.Core/DeserializationContext.cs → src/csharp/Grpc.Core.Api/DeserializationContext.cs


+ 32 - 0
src/csharp/Grpc.Core.Api/Grpc.Core.Api.csproj

@@ -0,0 +1,32 @@
+<Project Sdk="Microsoft.NET.Sdk">
+
+  <Import Project="..\Grpc.Core\Version.csproj.include" />
+  <Import Project="..\Grpc.Core\Common.csproj.include" />
+
+  <PropertyGroup>
+    <Copyright>Copyright 2019, Google Inc.</Copyright>
+    <AssemblyTitle>gRPC C# Surface API</AssemblyTitle>
+    <VersionPrefix>$(GrpcCsharpVersion)</VersionPrefix>
+    <Authors>Google Inc.</Authors>
+    <TargetFrameworks>net45;netstandard1.5</TargetFrameworks>
+    <AssemblyName>Grpc.Core.Api</AssemblyName>
+    <PackageId>Grpc.Core.Api</PackageId>
+    <PackageTags>gRPC RPC Protocol HTTP/2</PackageTags>
+    <PackageProjectUrl>https://github.com/grpc/grpc</PackageProjectUrl>
+    <PackageLicenseUrl>https://github.com/grpc/grpc/blob/master/LICENSE</PackageLicenseUrl>
+    <GenerateDocumentationFile>true</GenerateDocumentationFile>
+    <TreatWarningsAsErrors>true</TreatWarningsAsErrors>
+  </PropertyGroup>
+
+  <Import Project="..\Grpc.Core\SourceLink.csproj.include" />
+
+  <ItemGroup>
+    <PackageReference Include="System.Interactive.Async" Version="3.1.1" />
+  </ItemGroup>
+
+  <ItemGroup Condition=" '$(TargetFramework)' == 'net45' ">
+    <Reference Include="System" />
+    <Reference Include="Microsoft.CSharp" />
+  </ItemGroup>
+
+</Project>

+ 0 - 0
src/csharp/Grpc.Core/IAsyncStreamReader.cs → src/csharp/Grpc.Core.Api/IAsyncStreamReader.cs


+ 0 - 0
src/csharp/Grpc.Core/IAsyncStreamWriter.cs → src/csharp/Grpc.Core.Api/IAsyncStreamWriter.cs


Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff