Where Online Learning is simpler!
The C and C++ Include Header Files
/usr/include/c++/13/experimental/io_context
$ cat -n /usr/include/c++/13/experimental/io_context 1 //
-*- C++ -*- 2 3 // Copyright (C) 2015-2023 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 //
. 24 25 /** @file experimental/io_context 26 * This is a TS C++ Library header. 27 * @ingroup networking-ts 28 */ 29 30 #ifndef _GLIBCXX_EXPERIMENTAL_IO_SERVICE 31 #define _GLIBCXX_EXPERIMENTAL_IO_SERVICE 1 32 33 #pragma GCC system_header 34 35 #include
// experimental is currently omitted 36 37 #if __cplusplus >= 201402L 38 39 #include
40 #include
41 #include
42 #include
43 #include
44 #include
45 #include
46 #include
47 #include
48 #if _GLIBCXX_HAVE_UNISTD_H 49 # include
50 #endif 51 #ifdef _GLIBCXX_HAVE_POLL_H 52 # include
53 #endif 54 #ifdef _GLIBCXX_HAVE_FCNTL_H 55 # include
56 #endif 57 58 namespace std _GLIBCXX_VISIBILITY(default) 59 { 60 _GLIBCXX_BEGIN_NAMESPACE_VERSION 61 namespace experimental 62 { 63 namespace net 64 { 65 inline namespace v1 66 { 67 68 /** @addtogroup networking-ts 69 * @{ 70 */ 71 72 class __socket_impl; 73 74 /// An ExecutionContext for I/O operations. 75 class io_context : public execution_context 76 { 77 public: 78 // types: 79 80 /// An executor for an io_context. 81 class executor_type 82 { 83 public: 84 // construct / copy / destroy: 85 86 executor_type(const executor_type& __other) noexcept = default; 87 executor_type(executor_type&& __other) noexcept = default; 88 89 executor_type& operator=(const executor_type& __other) noexcept = default; 90 executor_type& operator=(executor_type&& __other) noexcept = default; 91 92 // executor operations: 93 94 bool running_in_this_thread() const noexcept 95 { 96 #ifdef _GLIBCXX_HAS_GTHREADS 97 lock_guard
__lock(_M_ctx->_M_mtx); 98 auto __end = _M_ctx->_M_call_stack.end(); 99 return std::find(_M_ctx->_M_call_stack.begin(), __end, 100 this_thread::get_id()) != __end; 101 #else 102 return _M_ctx->_M_run_count != 0; 103 #endif 104 } 105 106 io_context& context() const noexcept { return *_M_ctx; } 107 108 void on_work_started() const noexcept { ++_M_ctx->_M_work_count; } 109 void on_work_finished() const noexcept { --_M_ctx->_M_work_count; } 110 111 template
112 void 113 dispatch(_Func&& __f, const _ProtoAllocator& __a) const 114 { 115 if (running_in_this_thread()) 116 decay_t<_Func>{std::forward<_Func>(__f)}(); 117 else 118 post(std::forward<_Func>(__f), __a); 119 } 120 121 template
122 void 123 post(_Func&& __f, const _ProtoAllocator& __a) const 124 { 125 lock_guard
__lock(_M_ctx->_M_mtx); 126 // TODO (re-use functionality in system_context) 127 _M_ctx->_M_reactor._M_notify(); 128 } 129 130 template
131 void 132 defer(_Func&& __f, const _ProtoAllocator& __a) const 133 { post(std::forward<_Func>(__f), __a); } 134 135 private: 136 friend io_context; 137 138 explicit 139 executor_type(io_context& __ctx) : _M_ctx(std::addressof(__ctx)) { } 140 141 io_context* _M_ctx; 142 }; 143 144 using count_type = size_t; 145 146 // construct / copy / destroy: 147 148 io_context() : _M_work_count(0) { } 149 150 explicit 151 io_context(int __concurrency_hint) : _M_work_count(0) { } 152 153 io_context(const io_context&) = delete; 154 io_context& operator=(const io_context&) = delete; 155 156 // io_context operations: 157 158 executor_type get_executor() noexcept { return executor_type(*this); } 159 160 count_type 161 run() 162 { 163 count_type __n = 0; 164 while (run_one()) 165 if (__n != numeric_limits
::max()) 166 ++__n; 167 return __n; 168 } 169 170 template
171 count_type 172 run_for(const chrono::duration<_Rep, _Period>& __rel_time) 173 { return run_until(chrono::steady_clock::now() + __rel_time); } 174 175 template
176 count_type 177 run_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 178 { 179 count_type __n = 0; 180 while (run_one_until(__abs_time)) 181 if (__n != numeric_limits
::max()) 182 ++__n; 183 return __n; 184 } 185 186 count_type 187 run_one() 188 { return _M_do_one(chrono::milliseconds{-1}); } 189 190 template
191 count_type 192 run_one_for(const chrono::duration<_Rep, _Period>& __rel_time) 193 { return run_one_until(chrono::steady_clock::now() + __rel_time); } 194 195 template
196 count_type 197 run_one_until(const chrono::time_point<_Clock, _Duration>& __abs_time) 198 { 199 auto __now = _Clock::now(); 200 while (__now < __abs_time) 201 { 202 using namespace std::chrono; 203 auto __ms = duration_cast
(__abs_time - __now); 204 if (_M_do_one(__ms)) 205 return 1; 206 __now = _Clock::now(); 207 } 208 return 0; 209 } 210 211 count_type 212 poll() 213 { 214 count_type __n = 0; 215 while (poll_one()) 216 if (__n != numeric_limits
::max()) 217 ++__n; 218 return __n; 219 } 220 221 count_type 222 poll_one() 223 { return _M_do_one(chrono::milliseconds{0}); } 224 225 void stop() 226 { 227 lock_guard
__lock(_M_mtx); 228 _M_stopped = true; 229 _M_reactor._M_notify(); 230 } 231 232 bool stopped() const noexcept 233 { 234 lock_guard
__lock(_M_mtx); 235 return _M_stopped; 236 } 237 238 void restart() 239 { 240 _M_stopped = false; 241 } 242 243 private: 244 245 template
246 friend class basic_waitable_timer; 247 248 friend __socket_impl; 249 250 template
251 friend class __basic_socket_impl; 252 253 template
254 friend class basic_socket; 255 256 template
257 friend class basic_datagram_socket; 258 259 template
260 friend class basic_stream_socket; 261 262 template
263 friend class basic_socket_acceptor; 264 265 count_type 266 _M_outstanding_work() const 267 { return _M_work_count + !_M_ops.empty(); } 268 269 struct __timer_queue_base : execution_context::service 270 { 271 // return milliseconds until next timer expires, or milliseconds::max() 272 virtual chrono::milliseconds _M_next() const = 0; 273 virtual bool run_one() = 0; 274 275 protected: 276 explicit 277 __timer_queue_base(execution_context& __ctx) : service(__ctx) 278 { 279 auto& __ioc = static_cast
(__ctx); 280 lock_guard
__lock(__ioc._M_mtx); 281 __ioc._M_timers.push_back(this); 282 } 283 284 mutable execution_context::mutex_type _M_qmtx; 285 }; 286 287 template
288 struct __timer_queue : __timer_queue_base 289 { 290 using key_type = __timer_queue; 291 292 explicit 293 __timer_queue(execution_context& __ctx) : __timer_queue_base(__ctx) 294 { } 295 296 void shutdown() noexcept { } 297 298 io_context& context() noexcept 299 { return static_cast
(service::context()); } 300 301 // Start an asynchronous wait. 302 void 303 push(const _Timer& __t, function
__h) 304 { 305 context().get_executor().on_work_started(); 306 lock_guard
__lock(_M_qmtx); 307 _M_queue.emplace(__t, _M_next_id++, std::move(__h)); 308 // no need to notify reactor unless this timer went to the front? 309 } 310 311 // Cancel all outstanding waits for __t 312 size_t 313 cancel(const _Timer& __t) 314 { 315 lock_guard
__lock(_M_qmtx); 316 size_t __count = 0; 317 auto __last = _M_queue.end(); 318 for (auto __it = _M_queue.begin(), __end = __last; __it != __end; 319 ++__it) 320 { 321 if (__it->_M_key == __t._M_key.get()) 322 { 323 __it->cancel(); 324 __last = __it; 325 ++__count; 326 } 327 } 328 if (__count) 329 _M_queue._M_sort_to(__last); 330 return __count; 331 } 332 333 // Cancel oldest outstanding wait for __t 334 bool 335 cancel_one(const _Timer& __t) 336 { 337 lock_guard
__lock(_M_qmtx); 338 const auto __end = _M_queue.end(); 339 auto __oldest = __end; 340 for (auto __it = _M_queue.begin(); __it != __end; ++__it) 341 if (__it->_M_key == __t._M_key.get()) 342 if (__oldest == __end || __it->_M_id < __oldest->_M_id) 343 __oldest = __it; 344 if (__oldest == __end) 345 return false; 346 __oldest->cancel(); 347 _M_queue._M_sort_to(__oldest); 348 return true; 349 } 350 351 chrono::milliseconds 352 _M_next() const override 353 { 354 typename _Timer::time_point __exp; 355 { 356 lock_guard
__lock(_M_qmtx); 357 if (_M_queue.empty()) 358 return chrono::milliseconds::max(); // no pending timers 359 if (_M_queue.top()._M_key == nullptr) 360 return chrono::milliseconds::zero(); // cancelled, run now 361 __exp = _M_queue.top()._M_expiry; 362 } 363 auto __dur = _Timer::traits_type::to_wait_duration(__exp); 364 if (__dur < __dur.zero()) 365 __dur = __dur.zero(); 366 return chrono::duration_cast
(__dur); 367 } 368 369 private: 370 371 bool run_one() override 372 { 373 auto __now = _Timer::clock_type::now(); 374 function
__h; 375 error_code __ec; 376 { 377 lock_guard
__lock(_M_qmtx); 378 379 if (_M_queue.top()._M_key == nullptr) // cancelled 380 { 381 __h = std::move(_M_queue.top()._M_h); 382 __ec = std::make_error_code(errc::operation_canceled); 383 _M_queue.pop(); 384 } 385 else if (_M_queue.top()._M_expiry <= _Timer::clock_type::now()) 386 { 387 __h = std::move(_M_queue.top()._M_h); 388 _M_queue.pop(); 389 } 390 } 391 if (__h) 392 { 393 __h(__ec); 394 context().get_executor().on_work_finished(); 395 return true; 396 } 397 return false; 398 } 399 400 using __timer_id_type = uint64_t; 401 402 struct __pending_timer 403 { 404 __pending_timer(const _Timer& __t, uint64_t __id, 405 function
__h) 406 : _M_expiry(__t.expiry()), _M_key(__t._M_key.get()), _M_id(__id), 407 _M_h(std::move(__h)) 408 { } 409 410 typename _Timer::time_point _M_expiry; 411 _Key* _M_key; 412 __timer_id_type _M_id; 413 function
_M_h; 414 415 void cancel() { _M_expiry = _M_expiry.min(); _M_key = nullptr; } 416 417 bool 418 operator<(const __pending_timer& __rhs) const 419 { return _M_expiry < __rhs._M_expiry; } 420 }; 421 422 struct __queue : priority_queue<__pending_timer> 423 { 424 using iterator = 425 typename priority_queue<__pending_timer>::container_type::iterator; 426 427 // expose begin/end/erase for direct access to underlying container 428 iterator begin() { return this->c.begin(); } 429 iterator end() { return this->c.end(); } 430 iterator erase(iterator __it) { return this->c.erase(__it); } 431 432 void 433 _M_sort_to(iterator __it) 434 { std::stable_sort(this->c.begin(), ++__it); } 435 }; 436 437 __queue _M_queue; 438 __timer_id_type _M_next_id = 0; 439 }; 440 441 template
442 void 443 async_wait(const _Timer& __timer, _CompletionHandler&& __h) 444 { 445 auto& __queue = use_service<__timer_queue<_Timer>>(*this); 446 __queue.push(__timer, std::move(__h)); 447 _M_reactor._M_notify(); 448 } 449 450 // Cancel all wait operations initiated by __timer. 451 template
452 size_t 453 cancel(const _Timer& __timer) 454 { 455 if (!has_service<__timer_queue<_Timer>>(*this)) 456 return 0; 457 458 auto __c = use_service<__timer_queue<_Timer>>(*this).cancel(__timer); 459 if (__c != 0) 460 _M_reactor._M_notify(); 461 return __c; 462 } 463 464 // Cancel the oldest wait operation initiated by __timer. 465 template
466 size_t 467 cancel_one(const _Timer& __timer) 468 { 469 if (!has_service<__timer_queue<_Timer>>(*this)) 470 return 0; 471 472 if (use_service<__timer_queue<_Timer>>(*this).cancel_one(__timer)) 473 { 474 _M_reactor._M_notify(); 475 return 1; 476 } 477 return 0; 478 } 479 480 // The caller must know what the wait-type __w will be interpreted. 481 // In the current implementation the reactor is based on
482 // so the parameter must be one of POLLIN, POLLOUT or POLLERR. 483 template
484 void 485 async_wait(int __fd, int __w, _Op&& __op) 486 { 487 lock_guard
__lock(_M_mtx); 488 // TODO need push_back, use std::list not std::forward_list 489 auto __tail = _M_ops.before_begin(), __it = _M_ops.begin(); 490 while (__it != _M_ops.end()) 491 { 492 ++__it; 493 ++__tail; 494 } 495 using __type = __async_operation_impl<_Op>; 496 _M_ops.emplace_after(__tail, 497 make_unique<__type>(std::move(__op), __fd, __w)); 498 _M_reactor._M_fd_interest(__fd, __w); 499 } 500 501 void _M_add_fd(int __fd) { _M_reactor._M_add_fd(__fd); } 502 void _M_remove_fd(int __fd) { _M_reactor._M_remove_fd(__fd); } 503 504 void cancel(int __fd, error_code&) 505 { 506 lock_guard
__lock(_M_mtx); 507 const auto __end = _M_ops.end(); 508 auto __it = _M_ops.begin(); 509 auto __prev = _M_ops.before_begin(); 510 while (__it != __end && (*__it)->_M_is_cancelled()) 511 { 512 ++__it; 513 ++__prev; 514 } 515 auto __cancelled = __prev; 516 while (__it != __end) 517 { 518 if ((*__it)->_M_fd == __fd) 519 { 520 (*__it)->cancel(); 521 ++__it; 522 _M_ops.splice_after(__cancelled, _M_ops, __prev); 523 ++__cancelled; 524 } 525 else 526 { 527 ++__it; 528 ++__prev; 529 } 530 } 531 _M_reactor._M_not_interested(__fd); 532 } 533 534 struct __async_operation 535 { 536 __async_operation(int __fd, int __ev) : _M_fd(__fd), _M_ev(__ev) { } 537 538 virtual ~__async_operation() = default; 539 540 int _M_fd; 541 short _M_ev; 542 543 void cancel() { _M_fd = -1; } 544 bool _M_is_cancelled() const { return _M_fd == -1; } 545 virtual void run(io_context&) = 0; 546 }; 547 548 template
549 struct __async_operation_impl : __async_operation 550 { 551 __async_operation_impl(_Op&& __op, int __fd, int __ev) 552 : __async_operation{__fd, __ev}, _M_op(std::move(__op)) { } 553 554 _Op _M_op; 555 556 void run(io_context& __ctx) 557 { 558 if (_M_is_cancelled()) 559 _M_op(std::make_error_code(errc::operation_canceled)); 560 else 561 _M_op(error_code{}); 562 } 563 }; 564 565 atomic
_M_work_count; 566 mutable execution_context::mutex_type _M_mtx; 567 queue
> _M_op; 568 bool _M_stopped = false; 569 570 struct __monitor 571 { 572 __monitor(io_context& __c) : _M_ctx(__c) 573 { 574 #ifdef _GLIBCXX_HAS_GTHREADS 575 lock_guard
__lock(_M_ctx._M_mtx); 576 _M_ctx._M_call_stack.push_back(this_thread::get_id()); 577 #else 578 _M_ctx._M_run_count++; 579 #endif 580 } 581 582 ~__monitor() 583 { 584 #ifdef _GLIBCXX_HAS_GTHREADS 585 lock_guard
__lock(_M_ctx._M_mtx); 586 _M_ctx._M_call_stack.pop_back(); 587 #else 588 _M_ctx._M_run_count--; 589 #endif 590 if (_M_ctx._M_outstanding_work() == 0) 591 { 592 _M_ctx._M_stopped = true; 593 _M_ctx._M_reactor._M_notify(); 594 } 595 } 596 597 __monitor(__monitor&&) = delete; 598 599 io_context& _M_ctx; 600 }; 601 602 bool 603 _M_do_one(chrono::milliseconds __timeout) 604 { 605 const bool __block = __timeout != chrono::milliseconds::zero(); 606 607 __reactor::__fdvec __fds; 608 609 __monitor __mon{*this}; 610 611 __timer_queue_base* __timerq = nullptr; 612 unique_ptr<__async_operation> __async_op; 613 614 while (true) 615 { 616 if (__timerq) 617 { 618 if (__timerq->run_one()) 619 return true; 620 else 621 __timerq = nullptr; 622 } 623 624 if (__async_op) 625 { 626 __async_op->run(*this); 627 // TODO need to unregister __async_op 628 return true; 629 } 630 631 chrono::milliseconds __ms{0}; 632 633 { 634 lock_guard
__lock(_M_mtx); 635 636 if (_M_stopped) 637 return false; 638 639 // find first timer with something to do 640 for (auto __q : _M_timers) 641 { 642 auto __next = __q->_M_next(); 643 if (__next == __next.zero()) // ready to run immediately 644 { 645 __timerq = __q; 646 __ms = __next; 647 break; 648 } 649 else if (__next != __next.max() && __block 650 && (__next < __ms || __timerq == nullptr)) 651 { 652 __timerq = __q; 653 __ms = __next; 654 } 655 } 656 657 if (__timerq && __ms == __ms.zero()) 658 continue; // restart loop to run a timer immediately 659 660 if (!_M_ops.empty() && _M_ops.front()->_M_is_cancelled()) 661 { 662 _M_ops.front().swap(__async_op); 663 _M_ops.pop_front(); 664 continue; 665 } 666 667 // TODO run any posted items 668 669 if (__block) 670 { 671 if (__timerq == nullptr) 672 __ms = __timeout; 673 else if (__ms.zero() <= __timeout && __timeout < __ms) 674 __ms = __timeout; 675 else if (__ms.count() > numeric_limits
::max()) 676 __ms = chrono::milliseconds{numeric_limits
::max()}; 677 } 678 // else __ms == 0 and poll() will return immediately 679 680 } 681 682 auto __res = _M_reactor.wait(__fds, __ms); 683 684 if (__res == __reactor::_S_retry) 685 continue; 686 687 if (__res == __reactor::_S_timeout) 688 { 689 if (__timerq == nullptr) 690 return false; 691 else 692 continue; // timed out, so restart loop and process the timer 693 } 694 695 __timerq = nullptr; 696 697 if (__fds.empty()) // nothing to do 698 return false; 699 700 lock_guard
__lock(_M_mtx); 701 for (auto __it = _M_ops.begin(), __end = _M_ops.end(), 702 __prev = _M_ops.before_begin(); __it != __end; ++__it, ++__prev) 703 { 704 auto& __op = **__it; 705 auto __pos = std::lower_bound(__fds.begin(), __fds.end(), 706 __op._M_fd, 707 [](const auto& __p, int __fd) { return __p.fd < __fd; }); 708 if (__pos != __fds.end() && __pos->fd == __op._M_fd 709 && __pos->revents & __op._M_ev) 710 { 711 __it->swap(__async_op); 712 _M_ops.erase_after(__prev); 713 break; // restart loop and run op 714 } 715 } 716 } 717 } 718 719 struct __reactor 720 { 721 #ifdef _GLIBCXX_HAVE_POLL_H 722 __reactor() : _M_fds(1) 723 { 724 int __pipe[2]; 725 if (::pipe(__pipe) == -1) 726 __throw_system_error(errno); 727 if (::fcntl(__pipe[0], F_SETFL, O_NONBLOCK) == -1 728 || ::fcntl(__pipe[1], F_SETFL, O_NONBLOCK) == -1) 729 { 730 int __e = errno; 731 ::close(__pipe[0]); 732 ::close(__pipe[1]); 733 __throw_system_error(__e); 734 } 735 _M_fds.back().events = POLLIN; 736 _M_fds.back().fd = __pipe[0]; 737 _M_notify_wr = __pipe[1]; 738 } 739 740 ~__reactor() 741 { 742 ::close(_M_fds.back().fd); 743 ::close(_M_notify_wr); 744 } 745 #endif 746 747 // write a notification byte to the pipe (ignoring errors) 748 void _M_notify() 749 { 750 int __n; 751 do { 752 __n = ::write(_M_notify_wr, "", 1); 753 } while (__n == -1 && errno == EINTR); 754 } 755 756 // read all notification bytes from the pipe 757 void _M_on_notify() 758 { 759 // Drain the pipe. 760 char __buf[64]; 761 ssize_t __n; 762 do { 763 __n = ::read(_M_fds.back().fd, __buf, sizeof(__buf)); 764 } while (__n != -1 || errno == EINTR); 765 } 766 767 void 768 _M_add_fd(int __fd) 769 { 770 auto __pos = _M_lower_bound(__fd); 771 if (__pos->fd == __fd) 772 __throw_system_error((int)errc::invalid_argument); 773 _M_fds.insert(__pos, __fdvec::value_type{})->fd = __fd; 774 _M_notify(); 775 } 776 777 void 778 _M_remove_fd(int __fd) 779 { 780 auto __pos = _M_lower_bound(__fd); 781 if (__pos->fd == __fd) 782 _M_fds.erase(__pos); 783 // else bug! 784 _M_notify(); 785 } 786 787 void 788 _M_fd_interest(int __fd, int __w) 789 { 790 auto __pos = _M_lower_bound(__fd); 791 if (__pos->fd == __fd) 792 __pos->events |= __w; 793 // else bug! 794 _M_notify(); 795 } 796 797 void 798 _M_not_interested(int __fd) 799 { 800 auto __pos = _M_lower_bound(__fd); 801 if (__pos->fd == __fd) 802 __pos->events = 0; 803 _M_notify(); 804 } 805 806 #ifdef _GLIBCXX_HAVE_POLL_H 807 using __fdvec = vector<::pollfd>; 808 #else 809 struct dummy_pollfd { int fd = -1; short events = 0, revents = 0; }; 810 using __fdvec = vector
; 811 #endif 812 813 // Find first element p such that !(p.fd < __fd) 814 // N.B. always returns a dereferencable iterator. 815 __fdvec::iterator 816 _M_lower_bound(int __fd) 817 { 818 return std::lower_bound(_M_fds.begin(), _M_fds.end() - 1, 819 __fd, [](const auto& __p, int __fd) { return __p.fd < __fd; }); 820 } 821 822 enum __status { _S_retry, _S_timeout, _S_ok, _S_error }; 823 824 __status 825 wait(__fdvec& __fds, chrono::milliseconds __timeout) 826 { 827 #ifdef _GLIBCXX_HAVE_POLL_H 828 // XXX not thread-safe! 829 __fds = _M_fds; // take snapshot to pass to poll() 830 831 int __res = ::poll(__fds.data(), __fds.size(), __timeout.count()); 832 833 if (__res == -1) 834 { 835 __fds.clear(); 836 if (errno == EINTR) 837 return _S_retry; 838 return _S_error; // XXX ??? 839 } 840 else if (__res == 0) 841 { 842 __fds.clear(); 843 return _S_timeout; 844 } 845 else if (__fds.back().revents != 0) // something changed, restart 846 { 847 __fds.clear(); 848 _M_on_notify(); 849 return _S_retry; 850 } 851 852 auto __part = std::stable_partition(__fds.begin(), __fds.end() - 1, 853 [](const __fdvec::value_type& __p) { return __p.revents != 0; }); 854 __fds.erase(__part, __fds.end()); 855 856 return _S_ok; 857 #else 858 (void) __timeout; 859 __fds.clear(); 860 return _S_error; 861 #endif 862 } 863 864 __fdvec _M_fds; // _M_fds.back() is the read end of the self-pipe 865 int _M_notify_wr; // write end of the self-pipe 866 }; 867 868 __reactor _M_reactor; 869 870 vector<__timer_queue_base*> _M_timers; 871 forward_list
> _M_ops; 872 873 #ifdef _GLIBCXX_HAS_GTHREADS 874 vector
_M_call_stack; 875 #else 876 int _M_run_count = 0; 877 #endif 878 }; 879 880 inline bool 881 operator==(const io_context::executor_type& __a, 882 const io_context::executor_type& __b) noexcept 883 { 884 // https://github.com/chriskohlhoff/asio-tr2/issues/201 885 using executor_type = io_context::executor_type; 886 return std::addressof(executor_type(__a).context()) 887 == std::addressof(executor_type(__b).context()); 888 } 889 890 inline bool 891 operator!=(const io_context::executor_type& __a, 892 const io_context::executor_type& __b) noexcept 893 { return !(__a == __b); } 894 895 template<> struct is_executor
: true_type {}; 896 897 /// @} 898 899 } // namespace v1 900 } // namespace net 901 } // namespace experimental 902 _GLIBCXX_END_NAMESPACE_VERSION 903 } // namespace std 904 905 #endif // C++14 906 907 #endif // _GLIBCXX_EXPERIMENTAL_IO_SERVICE
Contact us
|
About us
|
Term of use
|
Copyright © 2000-2025 MyWebUniversity.com ™