server.cc 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. *
  3. * Copyright 2015, Google Inc.
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions are
  8. * met:
  9. *
  10. * * Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * * Redistributions in binary form must reproduce the above
  13. * copyright notice, this list of conditions and the following disclaimer
  14. * in the documentation and/or other materials provided with the
  15. * distribution.
  16. * * Neither the name of Google Inc. nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  21. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  22. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  23. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  24. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  25. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  26. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  27. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  28. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  29. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  30. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  31. *
  32. */
  33. #include <grpc++/server.h>
  34. #include <utility>
  35. #include <grpc/grpc.h>
  36. #include <grpc/support/alloc.h>
  37. #include <grpc/support/log.h>
  38. #include <grpc++/completion_queue.h>
  39. #include <grpc++/generic/async_generic_service.h>
  40. #include <grpc++/impl/rpc_service_method.h>
  41. #include <grpc++/impl/service_type.h>
  42. #include <grpc++/server_context.h>
  43. #include <grpc++/security/server_credentials.h>
  44. #include <grpc++/support/time.h>
  45. #include "src/core/profiling/timers.h"
  46. #include "src/cpp/server/thread_pool_interface.h"
  47. namespace grpc {
  48. class Server::UnimplementedAsyncRequestContext {
  49. protected:
  50. UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
  51. GenericServerContext server_context_;
  52. GenericServerAsyncReaderWriter generic_stream_;
  53. };
  54. class Server::UnimplementedAsyncRequest GRPC_FINAL
  55. : public UnimplementedAsyncRequestContext,
  56. public GenericAsyncRequest {
  57. public:
  58. UnimplementedAsyncRequest(Server* server, ServerCompletionQueue* cq)
  59. : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
  60. NULL, false),
  61. server_(server),
  62. cq_(cq) {}
  63. bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
  64. ServerContext* context() { return &server_context_; }
  65. GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
  66. private:
  67. Server* const server_;
  68. ServerCompletionQueue* const cq_;
  69. };
  70. typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
  71. UnimplementedAsyncResponseOp;
  72. class Server::UnimplementedAsyncResponse GRPC_FINAL
  73. : public UnimplementedAsyncResponseOp {
  74. public:
  75. UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
  76. ~UnimplementedAsyncResponse() { delete request_; }
  77. bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
  78. bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
  79. delete this;
  80. return r;
  81. }
  82. private:
  83. UnimplementedAsyncRequest* const request_;
  84. };
  85. class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag {
  86. public:
  87. bool FinalizeResult(void** tag, bool* status) {
  88. delete this;
  89. return false;
  90. }
  91. };
  92. class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
  93. public:
  94. SyncRequest(RpcServiceMethod* method, void* tag)
  95. : method_(method),
  96. tag_(tag),
  97. in_flight_(false),
  98. has_request_payload_(method->method_type() == RpcMethod::NORMAL_RPC ||
  99. method->method_type() ==
  100. RpcMethod::SERVER_STREAMING),
  101. call_details_(nullptr),
  102. cq_(nullptr) {
  103. grpc_metadata_array_init(&request_metadata_);
  104. }
  105. ~SyncRequest() {
  106. if (call_details_) {
  107. delete call_details_;
  108. }
  109. grpc_metadata_array_destroy(&request_metadata_);
  110. }
  111. static SyncRequest* Wait(CompletionQueue* cq, bool* ok) {
  112. void* tag = nullptr;
  113. *ok = false;
  114. if (!cq->Next(&tag, ok)) {
  115. return nullptr;
  116. }
  117. auto* mrd = static_cast<SyncRequest*>(tag);
  118. GPR_ASSERT(mrd->in_flight_);
  119. return mrd;
  120. }
  121. static bool AsyncWait(CompletionQueue* cq, SyncRequest** req, bool* ok,
  122. gpr_timespec deadline) {
  123. void* tag = nullptr;
  124. *ok = false;
  125. switch (cq->AsyncNext(&tag, ok, deadline)) {
  126. case CompletionQueue::TIMEOUT:
  127. *req = nullptr;
  128. return true;
  129. case CompletionQueue::SHUTDOWN:
  130. *req = nullptr;
  131. return false;
  132. case CompletionQueue::GOT_EVENT:
  133. *req = static_cast<SyncRequest*>(tag);
  134. GPR_ASSERT((*req)->in_flight_);
  135. return true;
  136. }
  137. gpr_log(GPR_ERROR, "Should never reach here");
  138. abort();
  139. }
  140. void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); }
  141. void TeardownRequest() {
  142. grpc_completion_queue_destroy(cq_);
  143. cq_ = nullptr;
  144. }
  145. void Request(grpc_server* server, grpc_completion_queue* notify_cq) {
  146. GPR_ASSERT(cq_ && !in_flight_);
  147. in_flight_ = true;
  148. if (tag_) {
  149. GPR_ASSERT(GRPC_CALL_OK ==
  150. grpc_server_request_registered_call(
  151. server, tag_, &call_, &deadline_, &request_metadata_,
  152. has_request_payload_ ? &request_payload_ : nullptr, cq_,
  153. notify_cq, this));
  154. } else {
  155. if (!call_details_) {
  156. call_details_ = new grpc_call_details;
  157. grpc_call_details_init(call_details_);
  158. }
  159. GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(
  160. server, &call_, call_details_,
  161. &request_metadata_, cq_, notify_cq, this));
  162. }
  163. }
  164. bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
  165. if (!*status) {
  166. grpc_completion_queue_destroy(cq_);
  167. }
  168. if (call_details_) {
  169. deadline_ = call_details_->deadline;
  170. grpc_call_details_destroy(call_details_);
  171. grpc_call_details_init(call_details_);
  172. }
  173. return true;
  174. }
  175. class CallData GRPC_FINAL {
  176. public:
  177. explicit CallData(Server* server, SyncRequest* mrd)
  178. : cq_(mrd->cq_),
  179. call_(mrd->call_, server, &cq_, server->max_message_size_),
  180. ctx_(mrd->deadline_, mrd->request_metadata_.metadata,
  181. mrd->request_metadata_.count),
  182. has_request_payload_(mrd->has_request_payload_),
  183. request_payload_(mrd->request_payload_),
  184. method_(mrd->method_) {
  185. ctx_.set_call(mrd->call_);
  186. ctx_.cq_ = &cq_;
  187. GPR_ASSERT(mrd->in_flight_);
  188. mrd->in_flight_ = false;
  189. mrd->request_metadata_.count = 0;
  190. }
  191. ~CallData() {
  192. if (has_request_payload_ && request_payload_) {
  193. grpc_byte_buffer_destroy(request_payload_);
  194. }
  195. }
  196. void Run() {
  197. ctx_.BeginCompletionOp(&call_);
  198. method_->handler()->RunHandler(MethodHandler::HandlerParameter(
  199. &call_, &ctx_, request_payload_, call_.max_message_size()));
  200. request_payload_ = nullptr;
  201. void* ignored_tag;
  202. bool ignored_ok;
  203. cq_.Shutdown();
  204. GPR_ASSERT(cq_.Next(&ignored_tag, &ignored_ok) == false);
  205. }
  206. private:
  207. CompletionQueue cq_;
  208. Call call_;
  209. ServerContext ctx_;
  210. const bool has_request_payload_;
  211. grpc_byte_buffer* request_payload_;
  212. RpcServiceMethod* const method_;
  213. };
  214. private:
  215. RpcServiceMethod* const method_;
  216. void* const tag_;
  217. bool in_flight_;
  218. const bool has_request_payload_;
  219. grpc_call* call_;
  220. grpc_call_details* call_details_;
  221. gpr_timespec deadline_;
  222. grpc_metadata_array request_metadata_;
  223. grpc_byte_buffer* request_payload_;
  224. grpc_completion_queue* cq_;
  225. };
  226. static grpc_server* CreateServer(
  227. int max_message_size, const grpc_compression_options& compression_options) {
  228. grpc_arg args[2];
  229. size_t args_idx = 0;
  230. if (max_message_size > 0) {
  231. args[args_idx].type = GRPC_ARG_INTEGER;
  232. args[args_idx].key = const_cast<char*>(GRPC_ARG_MAX_MESSAGE_LENGTH);
  233. args[args_idx].value.integer = max_message_size;
  234. args_idx++;
  235. }
  236. args[args_idx].type = GRPC_ARG_INTEGER;
  237. args[args_idx].key = const_cast<char*>(GRPC_COMPRESSION_ALGORITHM_STATE_ARG);
  238. args[args_idx].value.integer = compression_options.enabled_algorithms_bitset;
  239. args_idx++;
  240. grpc_channel_args channel_args = {args_idx, args};
  241. return grpc_server_create(&channel_args, nullptr);
  242. }
  243. Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
  244. int max_message_size,
  245. grpc_compression_options compression_options)
  246. : max_message_size_(max_message_size),
  247. started_(false),
  248. shutdown_(false),
  249. num_running_cb_(0),
  250. sync_methods_(new std::list<SyncRequest>),
  251. has_generic_service_(false),
  252. server_(CreateServer(max_message_size, compression_options)),
  253. thread_pool_(thread_pool),
  254. thread_pool_owned_(thread_pool_owned) {
  255. grpc_server_register_completion_queue(server_, cq_.cq(), nullptr);
  256. }
  257. Server::~Server() {
  258. {
  259. grpc::unique_lock<grpc::mutex> lock(mu_);
  260. if (started_ && !shutdown_) {
  261. lock.unlock();
  262. Shutdown();
  263. }
  264. }
  265. void* got_tag;
  266. bool ok;
  267. GPR_ASSERT(!cq_.Next(&got_tag, &ok));
  268. grpc_server_destroy(server_);
  269. if (thread_pool_owned_) {
  270. delete thread_pool_;
  271. }
  272. delete sync_methods_;
  273. }
  274. bool Server::RegisterService(const grpc::string* host, RpcService* service) {
  275. for (int i = 0; i < service->GetMethodCount(); ++i) {
  276. RpcServiceMethod* method = service->GetMethod(i);
  277. void* tag = grpc_server_register_method(server_, method->name(),
  278. host ? host->c_str() : nullptr);
  279. if (!tag) {
  280. gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
  281. method->name());
  282. return false;
  283. }
  284. sync_methods_->emplace_back(method, tag);
  285. }
  286. return true;
  287. }
  288. bool Server::RegisterAsyncService(const grpc::string* host,
  289. AsynchronousService* service) {
  290. GPR_ASSERT(service->server_ == nullptr &&
  291. "Can only register an asynchronous service against one server.");
  292. service->server_ = this;
  293. service->request_args_ = new void*[service->method_count_];
  294. for (size_t i = 0; i < service->method_count_; ++i) {
  295. void* tag = grpc_server_register_method(server_, service->method_names_[i],
  296. host ? host->c_str() : nullptr);
  297. if (!tag) {
  298. gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
  299. service->method_names_[i]);
  300. return false;
  301. }
  302. service->request_args_[i] = tag;
  303. }
  304. return true;
  305. }
  306. void Server::RegisterAsyncGenericService(AsyncGenericService* service) {
  307. GPR_ASSERT(service->server_ == nullptr &&
  308. "Can only register an async generic service against one server.");
  309. service->server_ = this;
  310. has_generic_service_ = true;
  311. }
  312. int Server::AddListeningPort(const grpc::string& addr,
  313. ServerCredentials* creds) {
  314. GPR_ASSERT(!started_);
  315. return creds->AddPortToServer(addr, server_);
  316. }
  317. bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
  318. GPR_ASSERT(!started_);
  319. started_ = true;
  320. grpc_server_start(server_);
  321. if (!has_generic_service_) {
  322. if (!sync_methods_->empty()) {
  323. unknown_method_.reset(new RpcServiceMethod(
  324. "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler));
  325. // Use of emplace_back with just constructor arguments is not accepted
  326. // here by gcc-4.4 because it can't match the anonymous nullptr with a
  327. // proper constructor implicitly. Construct the object and use push_back.
  328. sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr));
  329. }
  330. for (size_t i = 0; i < num_cqs; i++) {
  331. new UnimplementedAsyncRequest(this, cqs[i]);
  332. }
  333. }
  334. // Start processing rpcs.
  335. if (!sync_methods_->empty()) {
  336. for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) {
  337. m->SetupRequest();
  338. m->Request(server_, cq_.cq());
  339. }
  340. ScheduleCallback();
  341. }
  342. return true;
  343. }
  344. void Server::ShutdownInternal(gpr_timespec deadline) {
  345. grpc::unique_lock<grpc::mutex> lock(mu_);
  346. if (started_ && !shutdown_) {
  347. shutdown_ = true;
  348. grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest());
  349. cq_.Shutdown();
  350. // Spin, eating requests until the completion queue is completely shutdown.
  351. // If the deadline expires then cancel anything that's pending and keep
  352. // spinning forever until the work is actually drained.
  353. // Since nothing else needs to touch state guarded by mu_, holding it
  354. // through this loop is fine.
  355. SyncRequest* request;
  356. bool ok;
  357. while (SyncRequest::AsyncWait(&cq_, &request, &ok, deadline)) {
  358. if (request == NULL) { // deadline expired
  359. grpc_server_cancel_all_calls(server_);
  360. deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
  361. } else if (ok) {
  362. SyncRequest::CallData call_data(this, request);
  363. }
  364. }
  365. // Wait for running callbacks to finish.
  366. while (num_running_cb_ != 0) {
  367. callback_cv_.wait(lock);
  368. }
  369. }
  370. }
  371. void Server::Wait() {
  372. grpc::unique_lock<grpc::mutex> lock(mu_);
  373. while (num_running_cb_ != 0) {
  374. callback_cv_.wait(lock);
  375. }
  376. }
  377. void Server::PerformOpsOnCall(CallOpSetInterface* ops, Call* call) {
  378. static const size_t MAX_OPS = 8;
  379. size_t nops = 0;
  380. grpc_op cops[MAX_OPS];
  381. ops->FillOps(cops, &nops);
  382. auto result = grpc_call_start_batch(call->call(), cops, nops, ops, nullptr);
  383. GPR_ASSERT(GRPC_CALL_OK == result);
  384. }
  385. Server::BaseAsyncRequest::BaseAsyncRequest(
  386. Server* server, ServerContext* context,
  387. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag,
  388. bool delete_on_finalize)
  389. : server_(server),
  390. context_(context),
  391. stream_(stream),
  392. call_cq_(call_cq),
  393. tag_(tag),
  394. delete_on_finalize_(delete_on_finalize),
  395. call_(nullptr) {
  396. memset(&initial_metadata_array_, 0, sizeof(initial_metadata_array_));
  397. }
  398. Server::BaseAsyncRequest::~BaseAsyncRequest() {}
  399. bool Server::BaseAsyncRequest::FinalizeResult(void** tag, bool* status) {
  400. if (*status) {
  401. for (size_t i = 0; i < initial_metadata_array_.count; i++) {
  402. context_->client_metadata_.insert(
  403. std::pair<grpc::string_ref, grpc::string_ref>(
  404. initial_metadata_array_.metadata[i].key,
  405. grpc::string_ref(
  406. initial_metadata_array_.metadata[i].value,
  407. initial_metadata_array_.metadata[i].value_length)));
  408. }
  409. }
  410. grpc_metadata_array_destroy(&initial_metadata_array_);
  411. context_->set_call(call_);
  412. context_->cq_ = call_cq_;
  413. Call call(call_, server_, call_cq_, server_->max_message_size_);
  414. if (*status && call_) {
  415. context_->BeginCompletionOp(&call);
  416. }
  417. // just the pointers inside call are copied here
  418. stream_->BindCall(&call);
  419. *tag = tag_;
  420. if (delete_on_finalize_) {
  421. delete this;
  422. }
  423. return true;
  424. }
  425. Server::RegisteredAsyncRequest::RegisteredAsyncRequest(
  426. Server* server, ServerContext* context,
  427. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq, void* tag)
  428. : BaseAsyncRequest(server, context, stream, call_cq, tag, true) {}
  429. void Server::RegisteredAsyncRequest::IssueRequest(
  430. void* registered_method, grpc_byte_buffer** payload,
  431. ServerCompletionQueue* notification_cq) {
  432. grpc_server_request_registered_call(
  433. server_->server_, registered_method, &call_, &context_->deadline_,
  434. &initial_metadata_array_, payload, call_cq_->cq(), notification_cq->cq(),
  435. this);
  436. }
  437. Server::GenericAsyncRequest::GenericAsyncRequest(
  438. Server* server, GenericServerContext* context,
  439. ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
  440. ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
  441. : BaseAsyncRequest(server, context, stream, call_cq, tag,
  442. delete_on_finalize) {
  443. grpc_call_details_init(&call_details_);
  444. GPR_ASSERT(notification_cq);
  445. GPR_ASSERT(call_cq);
  446. grpc_server_request_call(server->server_, &call_, &call_details_,
  447. &initial_metadata_array_, call_cq->cq(),
  448. notification_cq->cq(), this);
  449. }
  450. bool Server::GenericAsyncRequest::FinalizeResult(void** tag, bool* status) {
  451. // TODO(yangg) remove the copy here.
  452. if (*status) {
  453. static_cast<GenericServerContext*>(context_)->method_ =
  454. call_details_.method;
  455. static_cast<GenericServerContext*>(context_)->host_ = call_details_.host;
  456. }
  457. gpr_free(call_details_.method);
  458. gpr_free(call_details_.host);
  459. return BaseAsyncRequest::FinalizeResult(tag, status);
  460. }
  461. bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
  462. bool* status) {
  463. if (GenericAsyncRequest::FinalizeResult(tag, status) && *status) {
  464. new UnimplementedAsyncRequest(server_, cq_);
  465. new UnimplementedAsyncResponse(this);
  466. } else {
  467. delete this;
  468. }
  469. return false;
  470. }
  471. Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
  472. UnimplementedAsyncRequest* request)
  473. : request_(request) {
  474. Status status(StatusCode::UNIMPLEMENTED, "");
  475. UnknownMethodHandler::FillOps(request_->context(), this);
  476. request_->stream()->call_.PerformOps(this);
  477. }
  478. void Server::ScheduleCallback() {
  479. {
  480. grpc::unique_lock<grpc::mutex> lock(mu_);
  481. num_running_cb_++;
  482. }
  483. thread_pool_->Add(std::bind(&Server::RunRpc, this));
  484. }
  485. void Server::RunRpc() {
  486. // Wait for one more incoming rpc.
  487. bool ok;
  488. auto* mrd = SyncRequest::Wait(&cq_, &ok);
  489. if (mrd) {
  490. ScheduleCallback();
  491. if (ok) {
  492. SyncRequest::CallData cd(this, mrd);
  493. {
  494. mrd->SetupRequest();
  495. grpc::unique_lock<grpc::mutex> lock(mu_);
  496. if (!shutdown_) {
  497. mrd->Request(server_, cq_.cq());
  498. } else {
  499. // destroy the structure that was created
  500. mrd->TeardownRequest();
  501. }
  502. }
  503. cd.Run();
  504. }
  505. }
  506. {
  507. grpc::unique_lock<grpc::mutex> lock(mu_);
  508. num_running_cb_--;
  509. if (shutdown_) {
  510. callback_cv_.notify_all();
  511. }
  512. }
  513. }
  514. } // namespace grpc