tcp_posix.cc 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. /*
  2. *
  3. * Copyright 2015 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. #include <grpc/support/port_platform.h>
  19. #include "src/core/lib/iomgr/port.h"
  20. #ifdef GRPC_POSIX_SOCKET_TCP
  21. #include "src/core/lib/iomgr/network_status_tracker.h"
  22. #include "src/core/lib/iomgr/tcp_posix.h"
  23. #include <errno.h>
  24. #include <limits.h>
  25. #include <netinet/in.h>
  26. #include <stdbool.h>
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <sys/socket.h>
  31. #include <sys/types.h>
  32. #include <unistd.h>
  33. #include <grpc/slice.h>
  34. #include <grpc/support/alloc.h>
  35. #include <grpc/support/log.h>
  36. #include <grpc/support/string_util.h>
  37. #include <grpc/support/sync.h>
  38. #include <grpc/support/time.h>
  39. #include "src/core/lib/channel/channel_args.h"
  40. #include "src/core/lib/debug/stats.h"
  41. #include "src/core/lib/debug/trace.h"
  42. #include "src/core/lib/gpr/string.h"
  43. #include "src/core/lib/gpr/useful.h"
  44. #include "src/core/lib/iomgr/buffer_list.h"
  45. #include "src/core/lib/iomgr/ev_posix.h"
  46. #include "src/core/lib/iomgr/executor.h"
  47. #include "src/core/lib/profiling/timers.h"
  48. #include "src/core/lib/slice/slice_internal.h"
  49. #include "src/core/lib/slice/slice_string_helpers.h"
  50. #ifdef GRPC_HAVE_MSG_NOSIGNAL
  51. #define SENDMSG_FLAGS MSG_NOSIGNAL
  52. #else
  53. #define SENDMSG_FLAGS 0
  54. #endif
  55. #ifdef GRPC_MSG_IOVLEN_TYPE
  56. typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
  57. #else
  58. typedef size_t msg_iovlen_type;
  59. #endif
  60. extern grpc_core::TraceFlag grpc_tcp_trace;
  61. namespace {
  62. struct grpc_tcp {
  63. grpc_endpoint base;
  64. grpc_fd* em_fd;
  65. int fd;
  66. /* Used by the endpoint read function to distinguish the very first read call
  67. * from the rest */
  68. bool is_first_read;
  69. double target_length;
  70. double bytes_read_this_round;
  71. gpr_refcount refcount;
  72. gpr_atm shutdown_count;
  73. int min_read_chunk_size;
  74. int max_read_chunk_size;
  75. /* garbage after the last read */
  76. grpc_slice_buffer last_read_buffer;
  77. grpc_slice_buffer* incoming_buffer;
  78. grpc_slice_buffer* outgoing_buffer;
  79. /** byte within outgoing_buffer->slices[0] to write next */
  80. size_t outgoing_byte_idx;
  81. grpc_closure* read_cb;
  82. grpc_closure* write_cb;
  83. grpc_closure* release_fd_cb;
  84. int* release_fd;
  85. grpc_closure read_done_closure;
  86. grpc_closure write_done_closure;
  87. grpc_closure error_closure;
  88. char* peer_string;
  89. grpc_resource_user* resource_user;
  90. grpc_resource_user_slice_allocator slice_allocator;
  91. grpc_core::TracedBuffer* tb_head; /* List of traced buffers */
  92. gpr_mu tb_mu; /* Lock for access to list of traced buffers */
  93. /* grpc_endpoint_write takes an argument which if non-null means that the
  94. * transport layer wants the TCP layer to collect timestamps for this write.
  95. * This arg is forwarded to the timestamps callback function when the ACK
  96. * timestamp is received from the kernel. This arg is a (void *) which allows
  97. * users of this API to pass in a pointer to any kind of structure. This
  98. * structure could actually be a tag or any book-keeping object that the user
  99. * can use to distinguish between different traced writes. The only
  100. * requirement from the TCP endpoint layer is that this arg should be non-null
  101. * if the user wants timestamps for the write. */
  102. void* outgoing_buffer_arg;
  103. /* A counter which starts at 0. It is initialized the first time the socket
  104. * options for collecting timestamps are set, and is incremented with each
  105. * byte sent. */
  106. int bytes_counter;
  107. bool socket_ts_enabled; /* True if timestamping options are set on the socket
  108. */
  109. gpr_atm
  110. stop_error_notification; /* Set to 1 if we do not want to be notified on
  111. errors anymore */
  112. };
  113. struct backup_poller {
  114. gpr_mu* pollset_mu;
  115. grpc_closure run_poller;
  116. };
  117. } // namespace
  118. #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
  119. static gpr_atm g_uncovered_notifications_pending;
  120. static gpr_atm g_backup_poller; /* backup_poller* */
  121. static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
  122. static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
  123. static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
  124. grpc_error* error);
  125. static void done_poller(void* bp, grpc_error* error_ignored) {
  126. backup_poller* p = static_cast<backup_poller*>(bp);
  127. if (grpc_tcp_trace.enabled()) {
  128. gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
  129. }
  130. grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
  131. gpr_free(p);
  132. }
  133. static void run_poller(void* bp, grpc_error* error_ignored) {
  134. backup_poller* p = static_cast<backup_poller*>(bp);
  135. if (grpc_tcp_trace.enabled()) {
  136. gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
  137. }
  138. gpr_mu_lock(p->pollset_mu);
  139. grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
  140. GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
  141. GRPC_LOG_IF_ERROR(
  142. "backup_poller:pollset_work",
  143. grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
  144. gpr_mu_unlock(p->pollset_mu);
  145. /* last "uncovered" notification is the ref that keeps us polling, if we get
  146. * there try a cas to release it */
  147. if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
  148. gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
  149. gpr_mu_lock(p->pollset_mu);
  150. bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
  151. if (grpc_tcp_trace.enabled()) {
  152. gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
  153. }
  154. gpr_mu_unlock(p->pollset_mu);
  155. if (grpc_tcp_trace.enabled()) {
  156. gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
  157. }
  158. grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
  159. GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
  160. grpc_schedule_on_exec_ctx));
  161. } else {
  162. if (grpc_tcp_trace.enabled()) {
  163. gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
  164. }
  165. GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
  166. }
  167. }
  168. static void drop_uncovered(grpc_tcp* tcp) {
  169. backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
  170. gpr_atm old_count =
  171. gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
  172. if (grpc_tcp_trace.enabled()) {
  173. gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
  174. static_cast<int>(old_count), static_cast<int>(old_count) - 1);
  175. }
  176. GPR_ASSERT(old_count != 1);
  177. }
  178. // gRPC API considers a Write operation to be done the moment it clears ‘flow
  179. // control’ i.e., not necessarily sent on the wire. This means that the
  180. // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
  181. // manner when its `Write()` API is acked.
  182. //
  183. // We need to ensure that the fd is 'covered' (i.e being monitored by some
  184. // polling thread and progress is made) and hence add it to a backup poller here
  185. static void cover_self(grpc_tcp* tcp) {
  186. backup_poller* p;
  187. gpr_atm old_count =
  188. gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
  189. if (grpc_tcp_trace.enabled()) {
  190. gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
  191. static_cast<int>(old_count), 2 + static_cast<int>(old_count));
  192. }
  193. if (old_count == 0) {
  194. GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
  195. p = static_cast<backup_poller*>(
  196. gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
  197. if (grpc_tcp_trace.enabled()) {
  198. gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
  199. }
  200. grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
  201. gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
  202. GRPC_CLOSURE_SCHED(
  203. GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
  204. grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
  205. GRPC_ERROR_NONE);
  206. } else {
  207. while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) ==
  208. nullptr) {
  209. // spin waiting for backup poller
  210. }
  211. }
  212. if (grpc_tcp_trace.enabled()) {
  213. gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
  214. }
  215. grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
  216. if (old_count != 0) {
  217. drop_uncovered(tcp);
  218. }
  219. }
  220. static void notify_on_read(grpc_tcp* tcp) {
  221. if (grpc_tcp_trace.enabled()) {
  222. gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
  223. }
  224. GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
  225. grpc_schedule_on_exec_ctx);
  226. grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
  227. }
  228. static void notify_on_write(grpc_tcp* tcp) {
  229. if (grpc_tcp_trace.enabled()) {
  230. gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
  231. }
  232. cover_self(tcp);
  233. GRPC_CLOSURE_INIT(&tcp->write_done_closure,
  234. tcp_drop_uncovered_then_handle_write, tcp,
  235. grpc_schedule_on_exec_ctx);
  236. grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
  237. }
  238. static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
  239. if (grpc_tcp_trace.enabled()) {
  240. gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
  241. }
  242. drop_uncovered(static_cast<grpc_tcp*>(arg));
  243. tcp_handle_write(arg, error);
  244. }
  245. static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
  246. tcp->bytes_read_this_round += static_cast<double>(bytes);
  247. }
  248. static void finish_estimate(grpc_tcp* tcp) {
  249. /* If we read >80% of the target buffer in one read loop, increase the size
  250. of the target buffer to either the amount read, or twice its previous
  251. value */
  252. if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
  253. tcp->target_length =
  254. GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round);
  255. } else {
  256. tcp->target_length =
  257. 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
  258. }
  259. tcp->bytes_read_this_round = 0;
  260. }
  261. static size_t get_target_read_size(grpc_tcp* tcp) {
  262. grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
  263. double pressure = grpc_resource_quota_get_memory_pressure(rq);
  264. double target =
  265. tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
  266. size_t sz = ((static_cast<size_t> GPR_CLAMP(target, tcp->min_read_chunk_size,
  267. tcp->max_read_chunk_size)) +
  268. 255) &
  269. ~static_cast<size_t>(255);
  270. /* don't use more than 1/16th of the overall resource quota for a single read
  271. * alloc */
  272. size_t rqmax = grpc_resource_quota_peek_size(rq);
  273. if (sz > rqmax / 16 && rqmax > 1024) {
  274. sz = rqmax / 16;
  275. }
  276. return sz;
  277. }
  278. static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
  279. return grpc_error_set_str(
  280. grpc_error_set_int(
  281. grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
  282. /* All tcp errors are marked with UNAVAILABLE so that application may
  283. * choose to retry. */
  284. GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
  285. GRPC_ERROR_STR_TARGET_ADDRESS,
  286. grpc_slice_from_copied_string(tcp->peer_string));
  287. }
  288. static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
  289. static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
  290. static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
  291. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  292. grpc_fd_shutdown(tcp->em_fd, why);
  293. grpc_resource_user_shutdown(tcp->resource_user);
  294. }
  295. static void tcp_free(grpc_tcp* tcp) {
  296. grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
  297. "tcp_unref_orphan");
  298. grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
  299. grpc_resource_user_unref(tcp->resource_user);
  300. gpr_free(tcp->peer_string);
  301. gpr_mu_destroy(&tcp->tb_mu);
  302. gpr_free(tcp);
  303. }
  304. #ifndef NDEBUG
  305. #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
  306. #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
  307. static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
  308. int line) {
  309. if (grpc_tcp_trace.enabled()) {
  310. gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
  311. gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
  312. "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
  313. val - 1);
  314. }
  315. if (gpr_unref(&tcp->refcount)) {
  316. tcp_free(tcp);
  317. }
  318. }
  319. static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
  320. int line) {
  321. if (grpc_tcp_trace.enabled()) {
  322. gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
  323. gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
  324. "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
  325. val + 1);
  326. }
  327. gpr_ref(&tcp->refcount);
  328. }
  329. #else
  330. #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
  331. #define TCP_REF(tcp, reason) tcp_ref((tcp))
  332. static void tcp_unref(grpc_tcp* tcp) {
  333. if (gpr_unref(&tcp->refcount)) {
  334. tcp_free(tcp);
  335. }
  336. }
  337. static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
  338. #endif
  339. static void tcp_destroy(grpc_endpoint* ep) {
  340. grpc_network_status_unregister_endpoint(ep);
  341. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  342. grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
  343. if (grpc_event_engine_can_track_errors()) {
  344. gpr_mu_lock(&tcp->tb_mu);
  345. grpc_core::TracedBuffer::Shutdown(
  346. &tcp->tb_head, tcp->outgoing_buffer_arg,
  347. GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
  348. gpr_mu_unlock(&tcp->tb_mu);
  349. tcp->outgoing_buffer_arg = nullptr;
  350. gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
  351. grpc_fd_set_error(tcp->em_fd);
  352. }
  353. TCP_UNREF(tcp, "destroy");
  354. }
  355. static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
  356. grpc_closure* cb = tcp->read_cb;
  357. if (grpc_tcp_trace.enabled()) {
  358. gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
  359. size_t i;
  360. const char* str = grpc_error_string(error);
  361. gpr_log(GPR_INFO, "read: error=%s", str);
  362. for (i = 0; i < tcp->incoming_buffer->count; i++) {
  363. char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
  364. GPR_DUMP_HEX | GPR_DUMP_ASCII);
  365. gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
  366. gpr_free(dump);
  367. }
  368. }
  369. tcp->read_cb = nullptr;
  370. tcp->incoming_buffer = nullptr;
  371. GRPC_CLOSURE_SCHED(cb, error);
  372. }
  373. #define MAX_READ_IOVEC 4
  374. static void tcp_do_read(grpc_tcp* tcp) {
  375. GPR_TIMER_SCOPE("tcp_do_read", 0);
  376. struct msghdr msg;
  377. struct iovec iov[MAX_READ_IOVEC];
  378. ssize_t read_bytes;
  379. size_t i;
  380. GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
  381. for (i = 0; i < tcp->incoming_buffer->count; i++) {
  382. iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
  383. iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
  384. }
  385. msg.msg_name = nullptr;
  386. msg.msg_namelen = 0;
  387. msg.msg_iov = iov;
  388. msg.msg_iovlen = static_cast<msg_iovlen_type>(tcp->incoming_buffer->count);
  389. msg.msg_control = nullptr;
  390. msg.msg_controllen = 0;
  391. msg.msg_flags = 0;
  392. GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
  393. GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
  394. do {
  395. GPR_TIMER_SCOPE("recvmsg", 0);
  396. GRPC_STATS_INC_SYSCALL_READ();
  397. read_bytes = recvmsg(tcp->fd, &msg, 0);
  398. } while (read_bytes < 0 && errno == EINTR);
  399. if (read_bytes < 0) {
  400. /* NB: After calling call_read_cb a parallel call of the read handler may
  401. * be running. */
  402. if (errno == EAGAIN) {
  403. finish_estimate(tcp);
  404. /* We've consumed the edge, request a new one */
  405. notify_on_read(tcp);
  406. } else {
  407. grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
  408. call_read_cb(tcp,
  409. tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
  410. TCP_UNREF(tcp, "read");
  411. }
  412. } else if (read_bytes == 0) {
  413. /* 0 read size ==> end of stream */
  414. grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
  415. call_read_cb(
  416. tcp, tcp_annotate_error(
  417. GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
  418. TCP_UNREF(tcp, "read");
  419. } else {
  420. GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
  421. add_to_estimate(tcp, static_cast<size_t>(read_bytes));
  422. GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
  423. if (static_cast<size_t>(read_bytes) == tcp->incoming_buffer->length) {
  424. finish_estimate(tcp);
  425. } else if (static_cast<size_t>(read_bytes) < tcp->incoming_buffer->length) {
  426. grpc_slice_buffer_trim_end(
  427. tcp->incoming_buffer,
  428. tcp->incoming_buffer->length - static_cast<size_t>(read_bytes),
  429. &tcp->last_read_buffer);
  430. }
  431. GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
  432. call_read_cb(tcp, GRPC_ERROR_NONE);
  433. TCP_UNREF(tcp, "read");
  434. }
  435. }
  436. static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
  437. grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
  438. if (grpc_tcp_trace.enabled()) {
  439. gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
  440. grpc_error_string(error));
  441. }
  442. if (error != GRPC_ERROR_NONE) {
  443. grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
  444. grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
  445. call_read_cb(tcp, GRPC_ERROR_REF(error));
  446. TCP_UNREF(tcp, "read");
  447. } else {
  448. tcp_do_read(tcp);
  449. }
  450. }
  451. static void tcp_continue_read(grpc_tcp* tcp) {
  452. size_t target_read_size = get_target_read_size(tcp);
  453. if (tcp->incoming_buffer->length < target_read_size / 2 &&
  454. tcp->incoming_buffer->count < MAX_READ_IOVEC) {
  455. if (grpc_tcp_trace.enabled()) {
  456. gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
  457. }
  458. grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
  459. tcp->incoming_buffer);
  460. } else {
  461. if (grpc_tcp_trace.enabled()) {
  462. gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
  463. }
  464. tcp_do_read(tcp);
  465. }
  466. }
  467. static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
  468. grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
  469. if (grpc_tcp_trace.enabled()) {
  470. gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
  471. }
  472. if (error != GRPC_ERROR_NONE) {
  473. grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
  474. grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
  475. call_read_cb(tcp, GRPC_ERROR_REF(error));
  476. TCP_UNREF(tcp, "read");
  477. } else {
  478. tcp_continue_read(tcp);
  479. }
  480. }
  481. static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
  482. grpc_closure* cb) {
  483. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  484. GPR_ASSERT(tcp->read_cb == nullptr);
  485. tcp->read_cb = cb;
  486. tcp->incoming_buffer = incoming_buffer;
  487. grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
  488. grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
  489. TCP_REF(tcp, "read");
  490. if (tcp->is_first_read) {
  491. /* Endpoint read called for the very first time. Register read callback with
  492. * the polling engine */
  493. tcp->is_first_read = false;
  494. notify_on_read(tcp);
  495. } else {
  496. /* Not the first time. We may or may not have more bytes available. In any
  497. * case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
  498. * right thing (i.e calls tcp_do_read() which either reads the available
  499. * bytes or calls notify_on_read() to be notified when new bytes become
  500. * available */
  501. GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE);
  502. }
  503. }
  504. /* A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
  505. * of bytes sent. */
  506. ssize_t tcp_send(int fd, const struct msghdr* msg) {
  507. GPR_TIMER_SCOPE("sendmsg", 1);
  508. ssize_t sent_length;
  509. do {
  510. /* TODO(klempner): Cork if this is a partial write */
  511. GRPC_STATS_INC_SYSCALL_WRITE();
  512. sent_length = sendmsg(fd, msg, SENDMSG_FLAGS);
  513. } while (sent_length < 0 && errno == EINTR);
  514. return sent_length;
  515. }
  516. /** This is to be called if outgoing_buffer_arg is not null. On linux platforms,
  517. * this will call sendmsg with socket options set to collect timestamps inside
  518. * the kernel. On return, sent_length is set to the return value of the sendmsg
  519. * call. Returns false if setting the socket options failed. This is not
  520. * implemented for non-linux platforms currently, and crashes out.
  521. */
  522. static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
  523. size_t sending_length,
  524. ssize_t* sent_length, grpc_error** error);
  525. /** The callback function to be invoked when we get an error on the socket. */
  526. static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error);
  527. #ifdef GRPC_LINUX_ERRQUEUE
  528. static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
  529. size_t sending_length,
  530. ssize_t* sent_length,
  531. grpc_error** error) {
  532. if (!tcp->socket_ts_enabled) {
  533. uint32_t opt = grpc_core::kTimestampingSocketOptions;
  534. if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
  535. static_cast<void*>(&opt), sizeof(opt)) != 0) {
  536. *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "setsockopt"), tcp);
  537. grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
  538. if (grpc_tcp_trace.enabled()) {
  539. gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
  540. }
  541. return false;
  542. }
  543. tcp->bytes_counter = -1;
  544. tcp->socket_ts_enabled = true;
  545. }
  546. /* Set control message to indicate that you want timestamps. */
  547. union {
  548. char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
  549. struct cmsghdr align;
  550. } u;
  551. cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
  552. cmsg->cmsg_level = SOL_SOCKET;
  553. cmsg->cmsg_type = SO_TIMESTAMPING;
  554. cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
  555. *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
  556. grpc_core::kTimestampingRecordingOptions;
  557. msg->msg_control = u.cmsg_buf;
  558. msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
  559. /* If there was an error on sendmsg the logic in tcp_flush will handle it. */
  560. ssize_t length = tcp_send(tcp->fd, msg);
  561. *sent_length = length;
  562. /* Only save timestamps if all the bytes were taken by sendmsg. */
  563. if (sending_length == static_cast<size_t>(length)) {
  564. gpr_mu_lock(&tcp->tb_mu);
  565. grpc_core::TracedBuffer::AddNewEntry(
  566. &tcp->tb_head, static_cast<int>(tcp->bytes_counter + length),
  567. tcp->outgoing_buffer_arg);
  568. gpr_mu_unlock(&tcp->tb_mu);
  569. tcp->outgoing_buffer_arg = nullptr;
  570. }
  571. return true;
  572. }
  573. /** Reads \a cmsg to derive timestamps from the control messages. If a valid
  574. * timestamp is found, the traced buffer list is updated with this timestamp.
  575. * The caller of this function should be looping on the control messages found
  576. * in \a msg. \a cmsg should point to the control message that the caller wants
  577. * processed.
  578. * On return, a pointer to a control message is returned. On the next iteration,
  579. * CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg. */
  580. struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
  581. struct cmsghdr* cmsg) {
  582. auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
  583. if (next_cmsg == nullptr) {
  584. if (grpc_tcp_trace.enabled()) {
  585. gpr_log(GPR_ERROR, "Received timestamp without extended error");
  586. }
  587. return cmsg;
  588. }
  589. if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
  590. !(next_cmsg->cmsg_type == IP_RECVERR ||
  591. next_cmsg->cmsg_type == IPV6_RECVERR)) {
  592. if (grpc_tcp_trace.enabled()) {
  593. gpr_log(GPR_ERROR, "Unexpected control message");
  594. }
  595. return cmsg;
  596. }
  597. auto tss =
  598. reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
  599. auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
  600. if (serr->ee_errno != ENOMSG ||
  601. serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
  602. gpr_log(GPR_ERROR, "Unexpected control message");
  603. return cmsg;
  604. }
  605. /* The error handling can potentially be done on another thread so we need
  606. * to protect the traced buffer list. A lock free list might be better. Using
  607. * a simple mutex for now. */
  608. gpr_mu_lock(&tcp->tb_mu);
  609. grpc_core::TracedBuffer::ProcessTimestamp(&tcp->tb_head, serr, tss);
  610. gpr_mu_unlock(&tcp->tb_mu);
  611. return next_cmsg;
  612. }
  613. /** For linux platforms, reads the socket's error queue and processes error
  614. * messages from the queue. Returns true if all the errors processed were
  615. * timestamps. Returns false if any of the errors were not timestamps. For
  616. * non-linux platforms, error processing is not used/enabled currently.
  617. */
  618. static bool process_errors(grpc_tcp* tcp) {
  619. while (true) {
  620. struct iovec iov;
  621. iov.iov_base = nullptr;
  622. iov.iov_len = 0;
  623. struct msghdr msg;
  624. msg.msg_name = nullptr;
  625. msg.msg_namelen = 0;
  626. msg.msg_iov = &iov;
  627. msg.msg_iovlen = 0;
  628. msg.msg_flags = 0;
  629. union {
  630. char rbuf[1024 /*CMSG_SPACE(sizeof(scm_timestamping)) +
  631. CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in))*/];
  632. struct cmsghdr align;
  633. } aligned_buf;
  634. memset(&aligned_buf, 0, sizeof(aligned_buf));
  635. msg.msg_control = aligned_buf.rbuf;
  636. msg.msg_controllen = sizeof(aligned_buf.rbuf);
  637. int r, saved_errno;
  638. do {
  639. r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
  640. saved_errno = errno;
  641. } while (r < 0 && saved_errno == EINTR);
  642. if (r == -1 && saved_errno == EAGAIN) {
  643. return true; /* No more errors to process */
  644. }
  645. if (r == -1) {
  646. return false;
  647. }
  648. if (grpc_tcp_trace.enabled()) {
  649. if ((msg.msg_flags & MSG_CTRUNC) == 1) {
  650. gpr_log(GPR_INFO, "Error message was truncated.");
  651. }
  652. }
  653. if (msg.msg_controllen == 0) {
  654. /* There was no control message found. It was probably spurious. */
  655. return true;
  656. }
  657. for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
  658. cmsg = CMSG_NXTHDR(&msg, cmsg)) {
  659. if (cmsg->cmsg_level != SOL_SOCKET ||
  660. cmsg->cmsg_type != SCM_TIMESTAMPING) {
  661. /* Got a control message that is not a timestamp. Don't know how to
  662. * handle this. */
  663. if (grpc_tcp_trace.enabled()) {
  664. gpr_log(GPR_INFO,
  665. "unknown control message cmsg_level:%d cmsg_type:%d",
  666. cmsg->cmsg_level, cmsg->cmsg_type);
  667. }
  668. return false;
  669. }
  670. process_timestamp(tcp, &msg, cmsg);
  671. }
  672. }
  673. }
  674. static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
  675. grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
  676. if (grpc_tcp_trace.enabled()) {
  677. gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp, grpc_error_string(error));
  678. }
  679. if (error != GRPC_ERROR_NONE ||
  680. static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
  681. /* We aren't going to register to hear on error anymore, so it is safe to
  682. * unref. */
  683. TCP_UNREF(tcp, "error-tracking");
  684. return;
  685. }
  686. /* We are still interested in collecting timestamps, so let's try reading
  687. * them. */
  688. if (!process_errors(tcp)) {
  689. /* This was not a timestamps error. This was an actual error. Set the
  690. * read and write closures to be ready.
  691. */
  692. grpc_fd_set_readable(tcp->em_fd);
  693. grpc_fd_set_writable(tcp->em_fd);
  694. }
  695. GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
  696. grpc_schedule_on_exec_ctx);
  697. grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
  698. }
  699. #else /* GRPC_LINUX_ERRQUEUE */
  700. static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
  701. size_t sending_length,
  702. ssize_t* sent_length,
  703. grpc_error** error) {
  704. gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
  705. GPR_ASSERT(0);
  706. return false;
  707. }
  708. static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
  709. gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
  710. GPR_ASSERT(0);
  711. }
  712. #endif /* GRPC_LINUX_ERRQUEUE */
  713. /* If outgoing_buffer_arg is filled, shuts down the list early, so that any
  714. * release operations needed can be performed on the arg */
  715. void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
  716. if (tcp->outgoing_buffer_arg) {
  717. gpr_mu_lock(&tcp->tb_mu);
  718. grpc_core::TracedBuffer::Shutdown(
  719. &tcp->tb_head, tcp->outgoing_buffer_arg,
  720. GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
  721. gpr_mu_unlock(&tcp->tb_mu);
  722. tcp->outgoing_buffer_arg = nullptr;
  723. }
  724. }
  725. /* returns true if done, false if pending; if returning true, *error is set */
  726. #if defined(IOV_MAX) && IOV_MAX < 1000
  727. #define MAX_WRITE_IOVEC IOV_MAX
  728. #else
  729. #define MAX_WRITE_IOVEC 1000
  730. #endif
  731. static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
  732. struct msghdr msg;
  733. struct iovec iov[MAX_WRITE_IOVEC];
  734. msg_iovlen_type iov_size;
  735. ssize_t sent_length;
  736. size_t sending_length;
  737. size_t trailing;
  738. size_t unwind_slice_idx;
  739. size_t unwind_byte_idx;
  740. // We always start at zero, because we eagerly unref and trim the slice
  741. // buffer as we write
  742. size_t outgoing_slice_idx = 0;
  743. for (;;) {
  744. sending_length = 0;
  745. unwind_slice_idx = outgoing_slice_idx;
  746. unwind_byte_idx = tcp->outgoing_byte_idx;
  747. for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
  748. iov_size != MAX_WRITE_IOVEC;
  749. iov_size++) {
  750. iov[iov_size].iov_base =
  751. GRPC_SLICE_START_PTR(
  752. tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
  753. tcp->outgoing_byte_idx;
  754. iov[iov_size].iov_len =
  755. GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
  756. tcp->outgoing_byte_idx;
  757. sending_length += iov[iov_size].iov_len;
  758. outgoing_slice_idx++;
  759. tcp->outgoing_byte_idx = 0;
  760. }
  761. GPR_ASSERT(iov_size > 0);
  762. msg.msg_name = nullptr;
  763. msg.msg_namelen = 0;
  764. msg.msg_iov = iov;
  765. msg.msg_iovlen = iov_size;
  766. msg.msg_flags = 0;
  767. if (tcp->outgoing_buffer_arg != nullptr) {
  768. if (!tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
  769. error)) {
  770. tcp_shutdown_buffer_list(tcp);
  771. return true; /* something went wrong with timestamps */
  772. }
  773. } else {
  774. msg.msg_control = nullptr;
  775. msg.msg_controllen = 0;
  776. GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
  777. GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
  778. sent_length = tcp_send(tcp->fd, &msg);
  779. }
  780. if (sent_length < 0) {
  781. if (errno == EAGAIN) {
  782. tcp->outgoing_byte_idx = unwind_byte_idx;
  783. // unref all and forget about all slices that have been written to this
  784. // point
  785. for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
  786. grpc_slice_unref_internal(
  787. grpc_slice_buffer_take_first(tcp->outgoing_buffer));
  788. }
  789. return false;
  790. } else if (errno == EPIPE) {
  791. *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
  792. grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
  793. tcp_shutdown_buffer_list(tcp);
  794. return true;
  795. } else {
  796. *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
  797. grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
  798. tcp_shutdown_buffer_list(tcp);
  799. return true;
  800. }
  801. }
  802. GPR_ASSERT(tcp->outgoing_byte_idx == 0);
  803. tcp->bytes_counter += sent_length;
  804. trailing = sending_length - static_cast<size_t>(sent_length);
  805. while (trailing > 0) {
  806. size_t slice_length;
  807. outgoing_slice_idx--;
  808. slice_length =
  809. GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
  810. if (slice_length > trailing) {
  811. tcp->outgoing_byte_idx = slice_length - trailing;
  812. break;
  813. } else {
  814. trailing -= slice_length;
  815. }
  816. }
  817. if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
  818. *error = GRPC_ERROR_NONE;
  819. grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
  820. return true;
  821. }
  822. }
  823. }
  824. static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
  825. grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
  826. grpc_closure* cb;
  827. if (error != GRPC_ERROR_NONE) {
  828. cb = tcp->write_cb;
  829. tcp->write_cb = nullptr;
  830. cb->cb(cb->cb_arg, error);
  831. TCP_UNREF(tcp, "write");
  832. return;
  833. }
  834. if (!tcp_flush(tcp, &error)) {
  835. if (grpc_tcp_trace.enabled()) {
  836. gpr_log(GPR_INFO, "write: delayed");
  837. }
  838. notify_on_write(tcp);
  839. } else {
  840. cb = tcp->write_cb;
  841. tcp->write_cb = nullptr;
  842. if (grpc_tcp_trace.enabled()) {
  843. const char* str = grpc_error_string(error);
  844. gpr_log(GPR_INFO, "write: %s", str);
  845. }
  846. GRPC_CLOSURE_SCHED(cb, error);
  847. TCP_UNREF(tcp, "write");
  848. }
  849. }
  850. static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
  851. grpc_closure* cb, void* arg) {
  852. GPR_TIMER_SCOPE("tcp_write", 0);
  853. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  854. grpc_error* error = GRPC_ERROR_NONE;
  855. if (grpc_tcp_trace.enabled()) {
  856. size_t i;
  857. for (i = 0; i < buf->count; i++) {
  858. char* data =
  859. grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
  860. gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
  861. gpr_free(data);
  862. }
  863. }
  864. GPR_ASSERT(tcp->write_cb == nullptr);
  865. tcp->outgoing_buffer_arg = arg;
  866. if (buf->length == 0) {
  867. GRPC_CLOSURE_SCHED(
  868. cb, grpc_fd_is_shutdown(tcp->em_fd)
  869. ? tcp_annotate_error(
  870. GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
  871. : GRPC_ERROR_NONE);
  872. tcp_shutdown_buffer_list(tcp);
  873. return;
  874. }
  875. tcp->outgoing_buffer = buf;
  876. tcp->outgoing_byte_idx = 0;
  877. if (arg) {
  878. GPR_ASSERT(grpc_event_engine_can_track_errors());
  879. }
  880. if (!tcp_flush(tcp, &error)) {
  881. TCP_REF(tcp, "write");
  882. tcp->write_cb = cb;
  883. if (grpc_tcp_trace.enabled()) {
  884. gpr_log(GPR_INFO, "write: delayed");
  885. }
  886. notify_on_write(tcp);
  887. } else {
  888. if (grpc_tcp_trace.enabled()) {
  889. const char* str = grpc_error_string(error);
  890. gpr_log(GPR_INFO, "write: %s", str);
  891. }
  892. GRPC_CLOSURE_SCHED(cb, error);
  893. }
  894. }
  895. static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
  896. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  897. grpc_pollset_add_fd(pollset, tcp->em_fd);
  898. }
  899. static void tcp_add_to_pollset_set(grpc_endpoint* ep,
  900. grpc_pollset_set* pollset_set) {
  901. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  902. grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
  903. }
  904. static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
  905. grpc_pollset_set* pollset_set) {
  906. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  907. grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
  908. }
  909. static char* tcp_get_peer(grpc_endpoint* ep) {
  910. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  911. return gpr_strdup(tcp->peer_string);
  912. }
  913. static int tcp_get_fd(grpc_endpoint* ep) {
  914. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  915. return tcp->fd;
  916. }
  917. static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
  918. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  919. return tcp->resource_user;
  920. }
  921. static bool tcp_can_track_err(grpc_endpoint* ep) {
  922. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  923. if (!grpc_event_engine_can_track_errors()) {
  924. return false;
  925. }
  926. struct sockaddr addr;
  927. socklen_t len = sizeof(addr);
  928. if (getsockname(tcp->fd, &addr, &len) < 0) {
  929. return false;
  930. }
  931. if (addr.sa_family == AF_INET || addr.sa_family == AF_INET6) {
  932. return true;
  933. }
  934. return false;
  935. }
  936. static const grpc_endpoint_vtable vtable = {tcp_read,
  937. tcp_write,
  938. tcp_add_to_pollset,
  939. tcp_add_to_pollset_set,
  940. tcp_delete_from_pollset_set,
  941. tcp_shutdown,
  942. tcp_destroy,
  943. tcp_get_resource_user,
  944. tcp_get_peer,
  945. tcp_get_fd,
  946. tcp_can_track_err};
  947. #define MAX_CHUNK_SIZE 32 * 1024 * 1024
  948. grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
  949. const grpc_channel_args* channel_args,
  950. const char* peer_string) {
  951. int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
  952. int tcp_max_read_chunk_size = 4 * 1024 * 1024;
  953. int tcp_min_read_chunk_size = 256;
  954. grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
  955. if (channel_args != nullptr) {
  956. for (size_t i = 0; i < channel_args->num_args; i++) {
  957. if (0 ==
  958. strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
  959. grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
  960. tcp_read_chunk_size =
  961. grpc_channel_arg_get_integer(&channel_args->args[i], options);
  962. } else if (0 == strcmp(channel_args->args[i].key,
  963. GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
  964. grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
  965. tcp_min_read_chunk_size =
  966. grpc_channel_arg_get_integer(&channel_args->args[i], options);
  967. } else if (0 == strcmp(channel_args->args[i].key,
  968. GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
  969. grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
  970. tcp_max_read_chunk_size =
  971. grpc_channel_arg_get_integer(&channel_args->args[i], options);
  972. } else if (0 ==
  973. strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
  974. grpc_resource_quota_unref_internal(resource_quota);
  975. resource_quota =
  976. grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
  977. channel_args->args[i].value.pointer.p));
  978. }
  979. }
  980. }
  981. if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) {
  982. tcp_min_read_chunk_size = tcp_max_read_chunk_size;
  983. }
  984. tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
  985. tcp_max_read_chunk_size);
  986. grpc_tcp* tcp = static_cast<grpc_tcp*>(gpr_malloc(sizeof(grpc_tcp)));
  987. tcp->base.vtable = &vtable;
  988. tcp->peer_string = gpr_strdup(peer_string);
  989. tcp->fd = grpc_fd_wrapped_fd(em_fd);
  990. tcp->read_cb = nullptr;
  991. tcp->write_cb = nullptr;
  992. tcp->release_fd_cb = nullptr;
  993. tcp->release_fd = nullptr;
  994. tcp->incoming_buffer = nullptr;
  995. tcp->target_length = static_cast<double>(tcp_read_chunk_size);
  996. tcp->min_read_chunk_size = tcp_min_read_chunk_size;
  997. tcp->max_read_chunk_size = tcp_max_read_chunk_size;
  998. tcp->bytes_read_this_round = 0;
  999. /* Will be set to false by the very first endpoint read function */
  1000. tcp->is_first_read = true;
  1001. tcp->bytes_counter = -1;
  1002. tcp->socket_ts_enabled = false;
  1003. tcp->outgoing_buffer_arg = nullptr;
  1004. /* paired with unref in grpc_tcp_destroy */
  1005. gpr_ref_init(&tcp->refcount, 1);
  1006. gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
  1007. tcp->em_fd = em_fd;
  1008. grpc_slice_buffer_init(&tcp->last_read_buffer);
  1009. tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
  1010. grpc_resource_user_slice_allocator_init(
  1011. &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
  1012. /* Tell network status tracker about new endpoint */
  1013. grpc_network_status_register_endpoint(&tcp->base);
  1014. grpc_resource_quota_unref_internal(resource_quota);
  1015. gpr_mu_init(&tcp->tb_mu);
  1016. tcp->tb_head = nullptr;
  1017. /* Start being notified on errors if event engine can track errors. */
  1018. if (grpc_event_engine_can_track_errors()) {
  1019. /* Grab a ref to tcp so that we can safely access the tcp struct when
  1020. * processing errors. We unref when we no longer want to track errors
  1021. * separately. */
  1022. TCP_REF(tcp, "error-tracking");
  1023. gpr_atm_rel_store(&tcp->stop_error_notification, 0);
  1024. GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
  1025. grpc_schedule_on_exec_ctx);
  1026. grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
  1027. }
  1028. return &tcp->base;
  1029. }
  1030. int grpc_tcp_fd(grpc_endpoint* ep) {
  1031. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  1032. GPR_ASSERT(ep->vtable == &vtable);
  1033. return grpc_fd_wrapped_fd(tcp->em_fd);
  1034. }
  1035. void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
  1036. grpc_closure* done) {
  1037. grpc_network_status_unregister_endpoint(ep);
  1038. grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
  1039. GPR_ASSERT(ep->vtable == &vtable);
  1040. tcp->release_fd = fd;
  1041. tcp->release_fd_cb = done;
  1042. grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
  1043. if (grpc_event_engine_can_track_errors()) {
  1044. /* Stop errors notification. */
  1045. gpr_mu_lock(&tcp->tb_mu);
  1046. grpc_core::TracedBuffer::Shutdown(
  1047. &tcp->tb_head, tcp->outgoing_buffer_arg,
  1048. GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
  1049. gpr_mu_unlock(&tcp->tb_mu);
  1050. tcp->outgoing_buffer_arg = nullptr;
  1051. gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
  1052. grpc_fd_set_error(tcp->em_fd);
  1053. }
  1054. TCP_UNREF(tcp, "destroy");
  1055. }
  1056. #endif /* GRPC_POSIX_SOCKET_TCP */