server_cc.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Copyright 2015 gRPC authors.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. *
  16. */
  17. #include <grpc++/server.h>
  18. #include <cstdlib>
  19. #include <sstream>
  20. #include <utility>
  21. #include <grpc++/completion_queue.h>
  22. #include <grpc++/generic/async_generic_service.h>
  23. #include <grpc++/impl/codegen/async_unary_call.h>
  24. #include <grpc++/impl/codegen/completion_queue_tag.h>
  25. #include <grpc++/impl/grpc_library.h>
  26. #include <grpc++/impl/method_handler_impl.h>
  27. #include <grpc++/impl/rpc_service_method.h>
  28. #include <grpc++/impl/server_initializer.h>
  29. #include <grpc++/impl/service_type.h>
  30. #include <grpc++/security/server_credentials.h>
  31. #include <grpc++/server_context.h>
  32. #include <grpc++/support/time.h>
  33. #include <grpc/grpc.h>
  34. #include <grpc/support/alloc.h>
  35. #include <grpc/support/log.h>
  36. #include "src/core/ext/transport/inproc/inproc_transport.h"
  37. #include "src/core/lib/profiling/timers.h"
  38. #include "src/core/lib/surface/call.h"
  39. #include "src/cpp/client/create_channel_internal.h"
  40. #include "src/cpp/server/health/default_health_check_service.h"
  41. #include "src/cpp/thread_manager/thread_manager.h"
  42. namespace grpc {
  43. class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
  44. public:
  45. ~DefaultGlobalCallbacks() override {}
  46. void PreSynchronousRequest(ServerContext* context) override {}
  47. void PostSynchronousRequest(ServerContext* context) override {}
  48. };
  49. static std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
  50. static gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
  51. static void InitGlobalCallbacks() {
  52. if (!g_callbacks) {
  53. g_callbacks.reset(new DefaultGlobalCallbacks());
  54. }
  55. }
  56. class Server::UnimplementedAsyncRequestContext {
  57. protected:
  58. UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
  59. GenericServerContext server_context_;
  60. GenericServerAsyncReaderWriter generic_stream_;
  61. };
  62. class Server::UnimplementedAsyncRequest final
  63. : public UnimplementedAsyncRequestContext,
  64. public GenericAsyncRequest {
  65. public:
  66. UnimplementedAsyncRequest(Server* server, ServerCompletionQueue* cq)
  67. : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
  68. NULL, false),
  69. server_(server),
  70. cq_(cq) {}
  71. bool FinalizeResult(void** tag, bool* status) override;
  72. ServerContext* context() { return &server_context_; }
  73. GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
  74. private:
  75. Server* const server_;
  76. ServerCompletionQueue* const cq_;
  77. };
  78. typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
  79. UnimplementedAsyncResponseOp;
  80. class Server::UnimplementedAsyncResponse final
  81. : public UnimplementedAsyncResponseOp {
  82. public:
  83. UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
  84. ~UnimplementedAsyncResponse() { delete request_; }
  85. bool FinalizeResult(void** tag, bool* status) override {
  86. bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
  87. delete this;
  88. return r;
  89. }
  90. private:
  91. UnimplementedAsyncRequest* const request_;
  92. };
  93. class ShutdownTag : public CompletionQueueTag {
  94. public:
  95. bool FinalizeResult(void** tag, bool* status) { return false; }
  96. };
  97. class DummyTag : public CompletionQueueTag {
  98. public:
  99. bool FinalizeResult(void** tag, bool* status) {
  100. *status = true;
  101. return true;
  102. }
  103. };
  104. class Server::SyncRequest final : public CompletionQueueTag {
  105. public:
  106. SyncRequest(RpcServiceMethod* method, void* tag)
  107. : method_(method),
  108. tag_(tag),
  109. in_flight_(false),
  110. has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC ||
  111. method->method_type() ==
  112. RpcMethod::SERVER_STREAMING),
  113. call_details_(nullptr),
  114. cq_(nullptr) {
  115. grpc_metadata_array_init(&request_metadata_);
  116. }
  117. ~SyncRequest() {
  118. if (call_details_) {
  119. delete call_details_;
  120. }
  121. grpc_metadata_array_destroy(&request_metadata_);
  122. }
  123. void SetupRequest() { cq_ = grpc_completion_queue_create_for_pluck(nullptr); }
  124. void TeardownRequest() {
  125. grpc_completion_queue_destroy(cq_);
  126. cq_ = nullptr;
  127. }
  128. void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
  129. GPR_ASSERT(cq_ && !in_flight_);
  130. in_flight_ = true;
  131. if (tag_) {
  132. if (GRPC_CALL_OK !=
  133. grpc_server_request_registered_call(
  134. server, tag_, &call_, &deadline_, &request_metadata_,
  135. has_request_payload_ ? &request_payload_ : nullptr, cq_,
  136. notify_cq, this)) {
  137. TeardownRequest();
  138. return;
  139. }
  140. } else {
  141. if (!call_details_) {
  142. call_details_ = new grpc_call_details;
  143. grpc_call_details_init(call_details_);
  144. }
  145. if (grpc_server_request_call(server, &call_, call_details_,
  146. &request_metadata_, cq_, notify_cq,
  147. this) != GRPC_CALL_OK) {
  148. TeardownRequest();
  149. return;
  150. }
  151. }
  152. }
  153. bool FinalizeResult(void** tag, bool* status) override {
  154. if (!*status) {
  155. grpc_completion_queue_destroy(cq_);
  156. }
  157. if (call_details_) {
  158. deadline_ = call_details_->deadline;
  159. grpc_call_details_destroy(call_details_);
  160. grpc_call_details_init(call_details_);
  161. }
  162. return true;
  163. }
  164. class CallData final {
  165. public:
  166. explicit CallData(Server* server, SyncRequest* mrd)
  167. : cq_(mrd->cq_),
  168. call_(mrd->call_, server, &cq_, server->max_receive_message_size()),
  169. ctx_(mrd->deadline_, &mrd->request_metadata_),
  170. has_request_payload_(mrd->has_request_payload_),
  171. request_payload_(mrd->request_payload_),
  172. method_(mrd->method_) {
  173. ctx_.set_call(mrd->call_);
  174. ctx_.cq_ = &cq_;
  175. GPR_ASSERT(mrd->in_flight_);
  176. mrd->in_flight_ = false;
  177. mrd->request_metadata_.count = 0;
  178. }
  179. ~CallData() {
  180. if (has_request_payload_ && request_payload_) {
  181. grpc_byte_buffer_destroy(request_payload_);
  182. }
  183. }
  184. void Run(std::shared_ptr<GlobalCallbacks> global_callbacks) {
  185. ctx_.BeginCompletionOp(&call_);
  186. global_callbacks->PreSynchronousRequest(&ctx_);
  187. method_->handler()->RunHandler(
  188. MethodHandler::HandlerParameter(&call_, &ctx_, request_payload_));
  189. global_callbacks->PostSynchronousRequest(&ctx_);
  190. request_payload_ = nullptr;
  191. cq_.Shutdown();
  192. CompletionQueueTag* op_tag = ctx_.GetCompletionOpTag();
  193. cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
  194. /* Ensure the cq_ is shutdown */
  195. DummyTag ignored_tag;
  196. GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
  197. }
  198. private:
  199. CompletionQueue cq_;
  200. Call call_;
  201. ServerContext ctx_;
  202. const bool has_request_payload_;
  203. grpc_byte_buffer* request_payload_;
  204. RpcServiceMethod* const method_;
  205. };
  206. private:
  207. RpcServiceMethod* const method_;
  208. void* const tag_;
  209. bool in_flight_;
  210. const bool has_request_payload_;
  211. grpc_call* call_;
  212. grpc_call_details* call_details_;
  213. gpr_timespec deadline_;
  214. grpc_metadata_array request_metadata_;
  215. grpc_byte_buffer* request_payload_;
  216. grpc_completion_queue* cq_;
  217. };
  218. // Implementation of ThreadManager. Each instance of SyncRequestThreadManager
  219. // manages a pool of threads that poll for incoming Sync RPCs and call the
  220. // appropriate RPC handlers
  221. class Server::SyncRequestThreadManager : public ThreadManager {
  222. public:
  223. SyncRequestThreadManager(Server* server, CompletionQueue* server_cq,
  224. std::shared_ptr<GlobalCallbacks> global_callbacks,
  225. int min_pollers, int max_pollers,
  226. int cq_timeout_msec)
  227. : ThreadManager(min_pollers, max_pollers),
  228. server_(server),
  229. server_cq_(server_cq),
  230. cq_timeout_msec_(cq_timeout_msec),
  231. global_callbacks_(global_callbacks) {}
  232. WorkStatus PollForWork(void** tag, bool* ok) override {
  233. *tag = nullptr;
  234. // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
  235. // right now
  236. gpr_timespec deadline =
  237. gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
  238. gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
  239. switch (server_cq_->AsyncNext(tag, ok, deadline)) {
  240. case CompletionQueue::TIMEOUT:
  241. return TIMEOUT;
  242. case CompletionQueue::SHUTDOWN:
  243. return SHUTDOWN;
  244. case CompletionQueue::GOT_EVENT:
  245. return WORK_FOUND;
  246. }
  247. GPR_UNREACHABLE_CODE(return TIMEOUT);
  248. }
  249. void DoWork(void* tag, bool ok) override {
  250. SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
  251. if (!sync_req) {
  252. // No tag. Nothing to work on. This is an unlikley scenario and possibly a
  253. // bug in RPC Manager implementation.
  254. gpr_log(GPR_ERROR, "Sync server. DoWork() was called with NULL tag");
  255. return;
  256. }
  257. if (ok) {
  258. // Calldata takes ownership of the completion queue inside sync_req
  259. SyncRequest::CallData cd(server_, sync_req);
  260. // Prepare for the next request
  261. if (!IsShutdown()) {
  262. sync_req->SetupRequest(); // Create new completion queue for sync_req
  263. sync_req->Request(server_->c_server(), server_cq_->cq());
  264. }
  265. GPR_TIMER_SCOPE("cd.Run()", 0);
  266. cd.Run(global_callbacks_);
  267. }
  268. // TODO (sreek) If ok is false here (which it isn't in case of
  269. // grpc_request_registered_call), we should still re-queue the request
  270. // object
  271. }
  272. void AddSyncMethod(RpcServiceMethod* method, void* tag) {
  273. sync_requests_.emplace_back(new SyncRequest(method, tag));
  274. }
  275. void AddUnknownSyncMethod() {
  276. if (!sync_requests_.empty()) {
  277. unknown_method_.reset(new RpcServiceMethod(
  278. "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
  279. sync_requests_.emplace_back(
  280. new SyncRequest(unknown_method_.get(), nullptr));
  281. }
  282. }
  283. void Shutdown() override {
  284. ThreadManager::Shutdown();
  285. server_cq_->Shutdown();
  286. }
  287. void Wait() override {
  288. ThreadManager::Wait();
  289. // Drain any pending items from the queue
  290. void* tag;
  291. bool ok;
  292. while (server_cq_->Next(&tag, &ok)) {
  293. // Do nothing
  294. }
  295. }
  296. void Start() {
  297. if (!sync_requests_.empty()) {
  298. for (auto m = sync_requests_.begin(); m != sync_requests_.end(); m++) {
  299. (*m)->SetupRequest();
  300. (*m)->Request(server_->c_server(), server_cq_->cq());
  301. }
  302. Initialize(); // ThreadManager's Initialize()
  303. }
  304. }
  305. private:
  306. Server* server_;
  307. CompletionQueue* server_cq_;
  308. int cq_timeout_msec_;
  309. std::vector<std::unique_ptr<SyncRequest>> sync_requests_;
  310. std::unique_ptr<RpcServiceMethod> unknown_method_;
  311. std::unique_ptr<RpcServiceMethod> health_check_;
  312. std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
  313. };
  314. static internal::GrpcLibraryInitializer g_gli_initializer;
  315. Server::Server(
  316. int max_receive_message_size, ChannelArguments* args,
  317. std::shared_ptr<std::vector<std::unique_ptr<ServerCompletionQueue>>>
  318. sync_server_cqs,
  319. int min_pollers, int max_pollers, int sync_cq_timeout_msec)
  320. : max_receive_message_size_(max_receive_message_size),
  321. sync_server_cqs_(sync_server_cqs),
  322. started_(false),
  323. shutdown_(false),
  324. shutdown_notified_(false),
  325. has_generic_service_(false),
  326. server_(nullptr),
  327. server_initializer_(new ServerInitializer(this)),
  328. health_check_service_disabled_(false) {
  329. g_gli_initializer.summon();
  330. gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks);
  331. global_callbacks_ = g_callbacks;
  332. global_callbacks_->UpdateArguments(args);
  333. for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end();
  334. it++) {
  335. sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
  336. this, (*it).get(), global_callbacks_, min_pollers, max_pollers,
  337. sync_cq_timeout_msec));
  338. }
  339. grpc_channel_args channel_args;
  340. args->SetChannelArgs(&channel_args);
  341. for (size_t i = 0; i < channel_args.num_args; i++) {
  342. if (0 ==
  343. strcmp(channel_args.args[i].key, kHealthCheckServiceInterfaceArg)) {
  344. if (channel_args.args[i].value.pointer.p == nullptr) {
  345. health_check_service_disabled_ = true;
  346. } else {
  347. health_check_service_.reset(static_cast<HealthCheckServiceInterface*>(
  348. channel_args.args[i].value.pointer.p));
  349. }
  350. break;
  351. }
  352. }
  353. server_ = grpc_server_create(&channel_args, nullptr);
  354. }
  355. Server::~Server() {
  356. {
  357. std::unique_lock<std::mutex> lock(mu_);
  358. if (started_ && !shutdown_) {
  359. lock.unlock();
  360. Shutdown();
  361. } else if (!started_) {
  362. // Shutdown the completion queues
  363. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  364. (*it)->Shutdown();
  365. }
  366. }
  367. }
  368. grpc_server_destroy(server_);
  369. }
  370. void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
  371. GPR_ASSERT(!g_callbacks);
  372. GPR_ASSERT(callbacks);
  373. g_callbacks.reset(callbacks);
  374. }
  375. grpc_server* Server::c_server() { return server_; }
  376. std::shared_ptr<Channel> Server::InProcessChannel(
  377. const ChannelArguments& args) {
  378. grpc_channel_args channel_args = args.c_channel_args();
  379. return CreateChannelInternal(
  380. "inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr));
  381. }
  382. static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
  383. RpcServiceMethod* method) {
  384. switch (method->method_type()) {
  385. case RpcMethod::NORMAL_RPC:
  386. case RpcMethod::SERVER_STREAMING:
  387. return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
  388. case RpcMethod::CLIENT_STREAMING:
  389. case RpcMethod::BIDI_STREAMING:
  390. return GRPC_SRM_PAYLOAD_NONE;
  391. }
  392. GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
  393. }
  394. bool Server::RegisterService(const grpc::string* host, Service* service) {
  395. bool has_async_methods = service->has_async_methods();
  396. if (has_async_methods) {
  397. GPR_ASSERT(service->server_ == nullptr &&
  398. "Can only register an asynchronous service against one server.");
  399. service->server_ = this;
  400. }
  401. const char* method_name = nullptr;
  402. for (auto it = service->methods_.begin(); it != service->methods_.end();
  403. ++it) {
  404. if (it->get() == nullptr) { // Handled by generic service if any.
  405. continue;
  406. }
  407. RpcServiceMethod* method = it->get();
  408. void* tag = grpc_server_register_method(
  409. server_, method->name(), host ? host->c_str() : nullptr,
  410. PayloadHandlingForMethod(method), 0);
  411. if (tag == nullptr) {
  412. gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
  413. method->name());
  414. return false;
  415. }
  416. if (method->handler() == nullptr) { // Async method
  417. method->set_server_tag(tag);
  418. } else {
  419. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  420. (*it)->AddSyncMethod(method, tag);
  421. }
  422. }
  423. method_name = method->name();
  424. }
  425. // Parse service name.
  426. if (method_name != nullptr) {
  427. std::stringstream ss(method_name);
  428. grpc::string service_name;
  429. if (std::getline(ss, service_name, '/') &&
  430. std::getline(ss, service_name, '/')) {
  431. services_.push_back(service_name);
  432. }
  433. }
  434. return true;
  435. }
  436. void Server::RegisterAsyncGenericService(AsyncGenericService* service) {
  437. GPR_ASSERT(service->server_ == nullptr &&
  438. "Can only register an async generic service against one server.");
  439. service->server_ = this;
  440. has_generic_service_ = true;
  441. }
  442. int Server::AddListeningPort(const grpc::string& addr,
  443. ServerCredentials* creds) {
  444. GPR_ASSERT(!started_);
  445. int port = creds->AddPortToServer(addr, server_);
  446. global_callbacks_->AddPort(this, addr, creds, port);
  447. return port;
  448. }
  449. void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
  450. GPR_ASSERT(!started_);
  451. global_callbacks_->PreServerStart(this);
  452. started_ = true;
  453. // Only create default health check service when user did not provide an
  454. // explicit one.
  455. if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
  456. DefaultHealthCheckServiceEnabled()) {
  457. if (sync_server_cqs_->empty()) {
  458. gpr_log(GPR_INFO,
  459. "Default health check service disabled at async-only server.");
  460. } else {
  461. auto* default_hc_service = new DefaultHealthCheckService;
  462. health_check_service_.reset(default_hc_service);
  463. RegisterService(nullptr, default_hc_service->GetHealthCheckService());
  464. }
  465. }
  466. grpc_server_start(server_);
  467. if (!has_generic_service_) {
  468. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  469. (*it)->AddUnknownSyncMethod();
  470. }
  471. for (size_t i = 0; i < num_cqs; i++) {
  472. if (cqs[i]->IsFrequentlyPolled()) {
  473. new UnimplementedAsyncRequest(this, cqs[i]);
  474. }
  475. }
  476. }
  477. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  478. (*it)->Start();
  479. }
  480. }
  481. void Server::ShutdownInternal(gpr_timespec deadline) {
  482. std::unique_lock<std::mutex> lock(mu_);
  483. if (!shutdown_) {
  484. shutdown_ = true;
  485. /// The completion queue to use for server shutdown completion notification
  486. CompletionQueue shutdown_cq;
  487. ShutdownTag shutdown_tag; // Dummy shutdown tag
  488. grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
  489. shutdown_cq.Shutdown();
  490. void* tag;
  491. bool ok;
  492. CompletionQueue::NextStatus status =
  493. shutdown_cq.AsyncNext(&tag, &ok, deadline);
  494. // If this timed out, it means we are done with the grace period for a clean
  495. // shutdown. We should force a shutdown now by cancelling all inflight calls
  496. if (status == CompletionQueue::NextStatus::TIMEOUT) {
  497. grpc_server_cancel_all_calls(server_);
  498. }
  499. // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
  500. // successfully shutdown
  501. // Shutdown all ThreadManagers. This will try to gracefully stop all the
  502. // threads in the ThreadManagers (once they process any inflight requests)
  503. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  504. (*it)->Shutdown(); // ThreadManager's Shutdown()
  505. }
  506. // Wait for threads in all ThreadManagers to terminate
  507. for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
  508. (*it)->Wait();
  509. }
  510. // Drain the shutdown queue (if the previous call to AsyncNext() timed out
  511. // and we didn't remove the tag from the queue yet)
  512. while (shutdown_cq.Next(&tag, &ok)) {
  513. // Nothing to be done here. Just ignore ok and tag values
  514. }
  515. shutdown_notified_ = true;
  516. shutdown_cv_.notify_all();
  517. }
  518. }
  519. void Server::Wait() {
  520. std::unique_lock<std::mutex> lock(mu_);
  521. while (started_ && !shutdown_notified_) {
  522. shutdown_cv_.wait(lock);
  523. }
  524. }
  525. void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
  526. static const size_t MAX_OPS = 8;
  527. size_t nops = 0;
  528. grpc_op cops[MAX_OPS];
  529. ops->FillOps(call->call(), cops, &nops);
  530. auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
  531. if (result != GRPC_CALL_OK) {
  532. gpr_log(GPR_ERROR, "Fatal: grpc_call_start_batch returned %d", result);
  533. grpc_call_log_batch(__FILE__, __LINE__, GPR_LOG_SEVERITY_ERROR,
  534. call->call(), cops, nops, ops);
  535. abort();
  536. }
  537. }
  538. ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
  539. ServerInterface* server, ServerContext* context,
  540. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag,
  541. bool delete_on_finalize)
  542. : server_(server),
  543. context_(context),
  544. stream_(stream),
  545. call_cq_(call_cq),
  546. tag_(tag),
  547. delete_on_finalize_(delete_on_finalize),
  548. call_(nullptr) {
  549. call_cq_->RegisterAvalanching(); // This op will trigger more ops
  550. }
  551. ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
  552. call_cq_->CompleteAvalanching();
  553. }
  554. bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
  555. bool* status) {
  556. if (*status) {
  557. context_->client_metadata_.FillMap();
  558. }
  559. context_->set_call(call_);
  560. context_->cq_ = call_cq_;
  561. Call call(call_, server_, call_cq_, server_->max_receive_message_size());
  562. if (*status && call_) {
  563. context_->BeginCompletionOp(&call);
  564. }
  565. // just the pointers inside call are copied here
  566. stream_->BindCall(&call);
  567. *tag = tag_;
  568. if (delete_on_finalize_) {
  569. delete this;
  570. }
  571. return true;
  572. }
  573. ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
  574. ServerInterface* server, ServerContext* context,
  575. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
  576. : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
  577. void ServerInterface::RegisteredAsyncRequest::IssueRequest(
  578. void* registered_method, grpc_byte_buffer** payload,
  579. ServerCompletionQueue* notification_cq) {
  580. GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_registered_call(
  581. server_->server(), registered_method, &call_,
  582. &context_->deadline_,
  583. context_->client_metadata_.arr(), payload,
  584. call_cq_->cq(), notification_cq->cq(), this));
  585. }
  586. ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
  587. ServerInterface* server, GenericServerContext* context,
  588. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
  589. ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
  590. : BaseAsyncRequest(server, context, stream, call_cq, tag,
  591. delete_on_finalize) {
  592. grpc_call_details_init(&call_details_);
  593. GPR_ASSERT(notification_cq);
  594. GPR_ASSERT(call_cq);
  595. GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
  596. server->server(), &call_, &call_details_,
  597. context->client_metadata_.arr(), call_cq->cq(),
  598. notification_cq->cq(), this));
  599. }
  600. bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
  601. bool* status) {
  602. // TODO(yangg) remove the copy here.
  603. if (*status) {
  604. static_cast<GenericServerContext*>(context_)->method_ =
  605. StringFromCopiedSlice(call_details_.method);
  606. static_cast<GenericServerContext*>(context_)->host_ =
  607. StringFromCopiedSlice(call_details_.host);
  608. context_->deadline_ = call_details_.deadline;
  609. }
  610. grpc_slice_unref(call_details_.method);
  611. grpc_slice_unref(call_details_.host);
  612. return BaseAsyncRequest::FinalizeResult(tag, status);
  613. }
  614. bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
  615. bool* status) {
  616. if (GenericAsyncRequest::FinalizeResult(tag, status) && *status) {
  617. new UnimplementedAsyncRequest(server_, cq_);
  618. new UnimplementedAsyncResponse(this);
  619. } else {
  620. delete this;
  621. }
  622. return false;
  623. }
  624. Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
  625. UnimplementedAsyncRequest* request)
  626. : request_(request) {
  627. Status status(StatusCode::UNIMPLEMENTED, "");
  628. UnknownMethodHandler::FillOps(request_->context(), this);
  629. request_->stream()->call_.PerformOps(this);
  630. }
  631. ServerInitializer* Server::initializer() { return server_initializer_.get(); }
  632. } // namespace grpc