Where Online Learning is simpler!
The C and C++ Include Header Files
/usr/include/c++/13/experimental/executor
$ cat -n /usr/include/c++/13/experimental/executor 1 //
-*- C++ -*- 2 3 // Copyright (C) 2015-2023 Free Software Foundation, Inc. 4 // 5 // This file is part of the GNU ISO C++ Library. This library is free 6 // software; you can redistribute it and/or modify it under the 7 // terms of the GNU General Public License as published by the 8 // Free Software Foundation; either version 3, or (at your option) 9 // any later version. 10 11 // This library is distributed in the hope that it will be useful, 12 // but WITHOUT ANY WARRANTY; without even the implied warranty of 13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 // GNU General Public License for more details. 15 16 // Under Section 7 of GPL version 3, you are granted additional 17 // permissions described in the GCC Runtime Library Exception, version 18 // 3.1, as published by the Free Software Foundation. 19 20 // You should have received a copy of the GNU General Public License and 21 // a copy of the GCC Runtime Library Exception along with this program; 22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23 //
. 24 25 /** @file experimental/executor 26 * This is a TS C++ Library header. 27 * @ingroup networking-ts 28 */ 29 30 #ifndef _GLIBCXX_EXPERIMENTAL_EXECUTOR 31 #define _GLIBCXX_EXPERIMENTAL_EXECUTOR 1 32 33 #pragma GCC system_header 34 35 #include
// experimental is currently omitted 36 37 #if __cplusplus >= 201402L 38 39 #include
40 #include
41 #include
42 #include
43 #include
44 #include
45 #include
46 #include
47 #include
48 #include
49 #include
50 #include
51 52 namespace std _GLIBCXX_VISIBILITY(default) 53 { 54 _GLIBCXX_BEGIN_NAMESPACE_VERSION 55 namespace experimental 56 { 57 namespace net 58 { 59 inline namespace v1 60 { 61 62 /** @addtogroup networking-ts 63 * @{ 64 */ 65 66 /// Customization point for asynchronous operations. 67 template
68 class async_result; 69 70 /// Convenience utility to help implement asynchronous operations. 71 template
72 class async_completion; 73 74 template
> 75 struct __associated_allocator_impl 76 { 77 using type = _ProtoAlloc; 78 79 static type 80 _S_get(const _Tp&, const _ProtoAlloc& __a) noexcept { return __a; } 81 }; 82 83 template
84 struct __associated_allocator_impl<_Tp, _ProtoAlloc, 85 __void_t
> 86 { 87 using type = typename _Tp::allocator_type; 88 89 static type 90 _S_get(const _Tp& __t, const _ProtoAlloc&) noexcept 91 { return __t.get_allocator(); } 92 }; 93 94 /// Helper to associate an allocator with a type. 95 template
> 96 struct associated_allocator 97 : __associated_allocator_impl<_Tp, _ProtoAllocator> 98 { 99 static auto 100 get(const _Tp& __t, 101 const _ProtoAllocator& __a = _ProtoAllocator()) noexcept 102 { 103 using _Impl = __associated_allocator_impl<_Tp, _ProtoAllocator>; 104 return _Impl::_S_get(__t, __a); 105 } 106 }; 107 108 /// Alias template for associated_allocator. 109 template
> 110 using associated_allocator_t 111 = typename associated_allocator<_Tp, _ProtoAllocator>::type; 112 113 // get_associated_allocator: 114 115 template
116 inline associated_allocator_t<_Tp> 117 get_associated_allocator(const _Tp& __t) noexcept 118 { return associated_allocator<_Tp>::get(__t); } 119 120 template
121 inline associated_allocator_t<_Tp, _ProtoAllocator> 122 get_associated_allocator(const _Tp& __t, 123 const _ProtoAllocator& __a) noexcept 124 { return associated_allocator<_Tp, _ProtoAllocator>::get(__t, __a); } 125 126 enum class fork_event { prepare, parent, child }; 127 128 /// An extensible, type-safe, polymorphic set of services. 129 class execution_context; 130 131 class service_already_exists : public logic_error 132 { 133 public: 134 // _GLIBCXX_RESOLVE_LIB_DEFECTS 135 // 3414. service_already_exists has no usable constructors 136 service_already_exists() : logic_error("service already exists") { } 137 }; 138 139 template
struct is_executor; 140 141 struct executor_arg_t { }; 142 143 constexpr executor_arg_t executor_arg = executor_arg_t(); 144 145 /// Trait for determining whether to construct an object with an executor. 146 template
struct uses_executor; 147 148 template
> 149 struct __associated_executor_impl 150 { 151 using type = _Executor; 152 153 static type 154 _S_get(const _Tp&, const _Executor& __e) noexcept { return __e; } 155 }; 156 157 template
158 struct __associated_executor_impl<_Tp, _Executor, 159 __void_t
> 160 { 161 using type = typename _Tp::executor_type; 162 163 static type 164 _S_get(const _Tp& __t, const _Executor&) noexcept 165 { return __t.get_executor(); } 166 }; 167 168 /// Helper to associate an executor with a type. 169 template
170 struct associated_executor 171 : __associated_executor_impl<_Tp, _Executor> 172 { 173 static auto 174 get(const _Tp& __t, const _Executor& __e = _Executor()) noexcept 175 { return __associated_executor_impl<_Tp, _Executor>::_S_get(__t, __e); } 176 }; 177 178 179 template
180 using associated_executor_t 181 = typename associated_executor<_Tp, _Executor>::type; 182 183 template
184 using __is_exec_context 185 = is_convertible<_ExecutionContext&, execution_context&>; 186 187 template
188 using __executor_t = typename _Tp::executor_type; 189 190 // get_associated_executor: 191 192 template
193 inline associated_executor_t<_Tp> 194 get_associated_executor(const _Tp& __t) noexcept 195 { return associated_executor<_Tp>::get(__t); } 196 197 template
198 inline 199 enable_if_t
::value, 200 associated_executor_t<_Tp, _Executor>> 201 get_associated_executor(const _Tp& __t, const _Executor& __ex) 202 { return associated_executor<_Tp, _Executor>::get(__t, __ex); } 203 204 template
205 inline 206 enable_if_t<__is_exec_context<_ExecutionContext>::value, 207 associated_executor_t<_Tp, __executor_t<_ExecutionContext>>> 208 get_associated_executor(const _Tp& __t, _ExecutionContext& __ctx) noexcept 209 { return net::get_associated_executor(__t, __ctx.get_executor()); } 210 211 212 /// Helper to bind an executor to an object or function. 213 template
214 class executor_binder; 215 216 template
217 class async_result
, _Signature>; 218 219 template
220 struct associated_allocator
, 221 _ProtoAllocator>; 222 223 template
224 struct associated_executor
, _Executor1>; 225 226 // bind_executor: 227 228 template
229 inline 230 enable_if_t
::value, 231 executor_binder
, _Executor>> 232 bind_executor(const _Executor& __ex, _Tp&& __t) 233 { return { std::forward<_Tp>(__t), __ex }; } 234 235 template
236 inline 237 enable_if_t<__is_exec_context<_ExecutionContext>::value, 238 executor_binder
, __executor_t<_ExecutionContext>>> 239 bind_executor(_ExecutionContext& __ctx, _Tp&& __t) 240 { return { __ctx.get_executor(), forward<_Tp>(__t) }; } 241 242 243 /// A scope-guard type to record when work is started and finished. 244 template
245 class executor_work_guard; 246 247 // make_work_guard: 248 249 template
250 inline 251 enable_if_t
::value, executor_work_guard<_Executor>> 252 make_work_guard(const _Executor& __ex) 253 { return executor_work_guard<_Executor>(__ex); } 254 255 template
256 inline 257 enable_if_t<__is_exec_context<_ExecutionContext>::value, 258 executor_work_guard<__executor_t<_ExecutionContext>>> 259 make_work_guard(_ExecutionContext& __ctx) 260 { return net::make_work_guard(__ctx.get_executor()); } 261 262 template
263 inline 264 enable_if_t<__not_<__or_
, __is_exec_context<_Tp>>>::value, 265 executor_work_guard
>> 266 make_work_guard(const _Tp& __t) 267 { return net::get_associated_executor(__t); } 268 269 template
270 auto 271 make_work_guard(const _Tp& __t, _Up&& __u) 272 -> decltype(net::make_work_guard( 273 net::get_associated_executor(__t, forward<_Up>(__u)))) 274 { 275 return net::make_work_guard( 276 net::get_associated_executor(__t, forward<_Up>(__u))); 277 } 278 279 /// Allows function objects to execute on any thread. 280 class system_executor; 281 282 /// The execution context associated with system_executor objects. 283 class system_context; 284 285 inline bool 286 operator==(const system_executor&, const system_executor&) { return true; } 287 288 inline bool 289 operator!=(const system_executor&, const system_executor&) { return false; } 290 291 /// Exception thrown by empty executors. 292 class bad_executor; 293 294 /// Polymorphic wrapper for types satisfying the Executor requirements. 295 class executor; 296 297 bool 298 operator==(const executor&, const executor&) noexcept; 299 300 bool 301 operator==(const executor&, nullptr_t) noexcept; 302 303 bool 304 operator==(nullptr_t, const executor&) noexcept; 305 306 bool 307 operator!=(const executor&, const executor&) noexcept; 308 309 bool 310 operator!=(const executor&, nullptr_t) noexcept; 311 312 bool 313 operator!=(nullptr_t, const executor&) noexcept; 314 315 void swap(executor&, executor&) noexcept; 316 317 // dispatch: 318 319 template
320 __deduced_t<_CompletionToken, void()> 321 dispatch(_CompletionToken&& __token); 322 323 template
324 __deduced_t<_CompletionToken, void()> 325 dispatch(const _Executor& __ex, _CompletionToken&& __token); 326 327 template
328 __deduced_t<_CompletionToken, void()> 329 dispatch(_ExecutionContext& __ctx, _CompletionToken&& __token); 330 331 // post: 332 333 template
334 __deduced_t<_CompletionToken, void()> 335 post(_CompletionToken&& __token); 336 template
337 enable_if_t
::value, 338 __deduced_t<_CompletionToken, void()>> 339 post(const _Executor& __ex, _CompletionToken&& __token); 340 template
341 enable_if_t<__is_exec_context<_ExecutionContext>::value, 342 __deduced_t<_CompletionToken, void()>> 343 post(_ExecutionContext& __ctx, _CompletionToken&& __token); 344 345 // defer: 346 347 template
348 __deduced_t<_CompletionToken, void()> 349 defer(_CompletionToken&& __token); 350 template
351 __deduced_t<_CompletionToken, void()> 352 defer(const _Executor& __ex, _CompletionToken&& __token); 353 template
354 __deduced_t<_CompletionToken, void()> 355 defer(_ExecutionContext& __ctx, _CompletionToken&& __token); 356 357 template
358 class strand; 359 360 template
361 bool 362 operator==(const strand<_Executor>& __a, const strand<_Executor>& __b); 363 364 template
365 bool 366 operator!=(const strand<_Executor>& __a, const strand<_Executor>& __b) 367 { return !(__a == __b); } 368 369 template
370 class async_result 371 { 372 public: 373 using completion_handler_type = _CompletionToken; 374 using return_type = void; 375 376 explicit async_result(completion_handler_type&) {} 377 async_result(const async_result&) = delete; 378 async_result& operator=(const async_result&) = delete; 379 380 return_type get() {} 381 }; 382 383 template
384 class async_completion 385 { 386 using __result_type 387 = async_result
, _Signature>; 388 389 public: 390 using completion_handler_type 391 = typename __result_type::completion_handler_type; 392 393 private: 394 using __handler_type = __conditional_t< 395 is_same<_CompletionToken, completion_handler_type>::value, 396 completion_handler_type&, 397 completion_handler_type>; 398 399 public: 400 explicit 401 async_completion(_CompletionToken& __t) 402 : completion_handler(std::forward<__handler_type>(__t)), 403 result(completion_handler) 404 { } 405 406 async_completion(const async_completion&) = delete; 407 async_completion& operator=(const async_completion&) = delete; 408 409 __handler_type completion_handler; 410 __result_type result; 411 }; 412 413 414 class execution_context 415 { 416 public: 417 class service 418 { 419 protected: 420 // construct / copy / destroy: 421 422 explicit 423 service(execution_context& __owner) : _M_context(__owner) { } 424 425 service(const service&) = delete; 426 service& operator=(const service&) = delete; 427 428 virtual ~service() { } // TODO should not be inline 429 430 // service observers: 431 432 execution_context& context() const noexcept { return _M_context; } 433 434 private: 435 // service operations: 436 437 virtual void shutdown() noexcept = 0; 438 virtual void notify_fork(fork_event) { } 439 440 friend class execution_context; 441 execution_context& _M_context; 442 }; 443 444 // construct / copy / destroy: 445 446 execution_context() { } 447 448 execution_context(const execution_context&) = delete; 449 execution_context& operator=(const execution_context&) = delete; 450 451 virtual ~execution_context() 452 { 453 shutdown(); 454 destroy(); 455 } 456 457 // execution context operations: 458 459 void 460 notify_fork(fork_event __e) 461 { 462 auto __l = [=](auto& __svc) { __svc._M_ptr->notify_fork(__e); }; 463 if (__e == fork_event::prepare) 464 std::for_each(_M_services.rbegin(), _M_services.rend(), __l); 465 else 466 std::for_each(_M_services.begin(), _M_services.end(), __l); 467 } 468 469 protected: 470 // execution context protected operations: 471 472 void 473 shutdown() 474 { 475 std::for_each(_M_services.rbegin(), _M_services.rend(), 476 [=](auto& __svc) { 477 if (__svc._M_active) 478 { 479 __svc._M_ptr->shutdown(); 480 __svc._M_active = false; 481 } 482 }); 483 } 484 485 void 486 destroy() 487 { 488 while (_M_services.size()) 489 _M_services.pop_back(); 490 _M_keys.clear(); 491 } 492 493 protected: 494 495 template
496 static void 497 _S_deleter(service* __svc) { delete static_cast<_Service*>(__svc); } 498 499 struct _ServicePtr 500 { 501 template
502 explicit 503 _ServicePtr(_Service* __svc) 504 : _M_ptr(__svc, &_S_deleter<_Service>), _M_active(true) { } 505 506 std::unique_ptr
_M_ptr; 507 bool _M_active; 508 }; 509 510 #if defined(_GLIBCXX_HAS_GTHREADS) 511 using mutex_type = std::mutex; 512 #else 513 struct mutex_type 514 { 515 void lock() const { } 516 void unlock() const { } 517 }; 518 #endif 519 mutable mutex_type _M_mutex; 520 521 // Sorted in order of beginning of service object lifetime. 522 std::list<_ServicePtr> _M_services; 523 524 template
525 service* 526 _M_add_svc(_Args&&... __args) 527 { 528 _M_services.push_back( 529 _ServicePtr{new _Service{*this, std::forward<_Args>(__args)...}} ); 530 return _M_services.back()._M_ptr.get(); 531 } 532 533 using __key_type = void(*)(); 534 535 template
536 static __key_type 537 _S_key() { return reinterpret_cast<__key_type>(&_S_key<_Key>); } 538 539 std::unordered_map<__key_type, service*> _M_keys; 540 541 template
542 friend typename _Service::key_type& 543 use_service(execution_context&); 544 545 template
546 friend _Service& 547 make_service(execution_context&, _Args&&...); 548 549 template
550 friend bool 551 has_service(const execution_context&) noexcept; 552 }; 553 554 // service access: 555 556 template
557 typename _Service::key_type& 558 use_service(execution_context& __ctx) 559 { 560 using _Key = typename _Service::key_type; 561 static_assert(is_base_of
::value, 562 "a service type must derive from execution_context::service"); 563 static_assert(is_base_of<_Key, _Service>::value, 564 "a service type must match or derive from its key_type"); 565 auto __key = execution_context::_S_key<_Key>(); 566 lock_guard
__lock(__ctx._M_mutex); 567 auto& __svc = __ctx._M_keys[__key]; 568 if (__svc == nullptr) 569 { 570 __try { 571 __svc = __ctx._M_add_svc<_Service>(); 572 } __catch(...) { 573 __ctx._M_keys.erase(__key); 574 __throw_exception_again; 575 } 576 } 577 return static_cast<_Key&>(*__svc); 578 } 579 580 template
581 _Service& 582 make_service(execution_context& __ctx, _Args&&... __args) 583 { 584 using _Key = typename _Service::key_type; 585 static_assert(is_base_of
::value, 586 "a service type must derive from execution_context::service"); 587 static_assert(is_base_of<_Key, _Service>::value, 588 "a service type must match or derive from its key_type"); 589 auto __key = execution_context::_S_key<_Key>(); 590 lock_guard
__lock(__ctx._M_mutex); 591 auto& __svc = __ctx._M_keys[__key]; 592 if (__svc != nullptr) 593 throw service_already_exists(); 594 __try { 595 __svc = __ctx._M_add_svc<_Service>(std::forward<_Args>(__args)...); 596 } __catch(...) { 597 __ctx._M_keys.erase(__key); 598 __throw_exception_again; 599 } 600 return static_cast<_Service&>(*__svc); 601 } 602 603 template
604 inline bool 605 has_service(const execution_context& __ctx) noexcept 606 { 607 using _Key = typename _Service::key_type; 608 static_assert(is_base_of
::value, 609 "a service type must derive from execution_context::service"); 610 static_assert(is_base_of<_Key, _Service>::value, 611 "a service type must match or derive from its key_type"); 612 lock_guard
__lock(__ctx._M_mutex); 613 return __ctx._M_keys.count(execution_context::_S_key<_Key>()); 614 } 615 616 template
> 617 struct __is_executor_impl : false_type 618 { }; 619 620 // Check Executor requirements. 621 template
> 622 auto 623 __executor_reqs(_Up* __x = 0, const _Up* __cx = 0, void(*__f)() = 0, 624 const allocator
& __a = {}) 625 -> enable_if_t<__is_value_constructible<_Tp>::value, __void_t< 626 decltype(*__cx == *__cx), 627 decltype(*__cx != *__cx), 628 decltype(__x->context()), 629 decltype(__x->on_work_started()), 630 decltype(__x->on_work_finished()), 631 decltype(__x->dispatch(std::move(__f), __a)), 632 decltype(__x->post(std::move(__f), __a)), 633 decltype(__x->defer(std::move(__f), __a)) 634 >>; 635 636 template
637 struct __is_executor_impl<_Tp, decltype(__executor_reqs<_Tp>())> 638 : true_type 639 { }; 640 641 template
642 struct is_executor : __is_executor_impl<_Tp> 643 { }; 644 645 template
646 constexpr bool is_executor_v = is_executor<_Tp>::value; 647 648 template
> 649 struct __uses_executor_impl : false_type 650 { }; 651 652 template
653 struct __uses_executor_impl<_Tp, _Executor, 654 __void_t
> 655 : is_convertible<_Executor, typename _Tp::executor_type> 656 { }; 657 658 template
659 struct uses_executor : __uses_executor_impl<_Tp, _Executor>::type 660 { }; 661 662 template
663 constexpr bool uses_executor_v = uses_executor<_Tp, _Executor>::value; 664 665 template
666 class executor_binder 667 { 668 struct __use_exec { }; 669 670 public: 671 // types: 672 673 using target_type = _Tp; 674 using executor_type = _Executor; 675 676 // construct / copy / destroy: 677 678 executor_binder(_Tp __t, const _Executor& __ex) 679 : executor_binder(__use_exec{}, std::move(__t), __ex) 680 { } 681 682 executor_binder(const executor_binder&) = default; 683 executor_binder(executor_binder&&) = default; 684 685 template
686 executor_binder(const executor_binder<_Up, _OtherExecutor>& __other) 687 : executor_binder(__use_exec{}, __other.get(), __other.get_executor()) 688 { } 689 690 template
691 executor_binder(executor_binder<_Up, _OtherExecutor>&& __other) 692 : executor_binder(__use_exec{}, std::move(__other.get()), 693 __other.get_executor()) 694 { } 695 696 template
697 executor_binder(executor_arg_t, const _Executor& __ex, 698 const executor_binder<_Up, _OtherExecutor>& __other) 699 : executor_binder(__use_exec{}, __other.get(), __ex) 700 { } 701 702 template
703 executor_binder(executor_arg_t, const _Executor& __ex, 704 executor_binder<_Up, _OtherExecutor>&& __other) 705 : executor_binder(__use_exec{}, std::move(__other.get()), __ex) 706 { } 707 708 ~executor_binder(); 709 710 // executor binder access: 711 712 _Tp& get() noexcept { return _M_target; } 713 const _Tp& get() const noexcept { return _M_target; } 714 executor_type get_executor() const noexcept { return _M_ex; } 715 716 // executor binder invocation: 717 718 template
719 result_of_t<_Tp&(_Args&&...)> 720 operator()(_Args&&... __args) 721 { return std::__invoke(get(), std::forward<_Args>(__args)...); } 722 723 template
724 result_of_t
725 operator()(_Args&&... __args) const 726 { return std::__invoke(get(), std::forward<_Args>(__args)...); } 727 728 private: 729 template
730 using __use_exec_cond 731 = __and_
, 732 is_constructible<_Tp, executor_arg_t, _Executor, _Up>>; 733 734 template
::value>> 736 executor_binder(__use_exec, _Up&& __u, _Exec&& __ex) 737 : _M_ex(std::forward<_Exec>(__ex)), 738 _M_target(executor_arg, _M_ex, std::forward<_Up>(__u)) 739 { } 740 741 template
::value>> 743 executor_binder(__use_exec, _Up&& __u, const _Exec& __ex) 744 : _M_ex(std::forward<_Exec>(__ex)), 745 _M_target(std::forward<_Up>(__u)) 746 { } 747 748 _Executor _M_ex; 749 _Tp _M_target; 750 }; 751 752 template
753 class async_result
, _Signature> 754 { 755 using __inner = async_result<_Tp, _Signature>; 756 757 public: 758 using completion_handler_type = 759 executor_binder
; 760 761 using return_type = typename __inner::return_type; 762 763 explicit 764 async_result(completion_handler_type& __h) 765 : _M_target(__h.get()) { } 766 767 async_result(const async_result&) = delete; 768 async_result& operator=(const async_result&) = delete; 769 770 return_type get() { return _M_target.get(); } 771 772 private: 773 __inner _M_target; 774 }; 775 776 template
777 struct associated_allocator
, _ProtoAlloc> 778 { 779 using type = associated_allocator_t<_Tp, _ProtoAlloc>; 780 781 static type 782 get(const executor_binder<_Tp, _Executor>& __b, 783 const _ProtoAlloc& __a = _ProtoAlloc()) noexcept 784 { return associated_allocator<_Tp, _ProtoAlloc>::get(__b.get(), __a); } 785 }; 786 787 template
788 struct associated_executor
, _Executor1> 789 { 790 using type = _Executor; 791 792 static type 793 get(const executor_binder<_Tp, _Executor>& __b, 794 const _Executor1& = _Executor1()) noexcept 795 { return __b.get_executor(); } 796 }; 797 798 template
799 class executor_work_guard 800 { 801 public: 802 // types: 803 804 using executor_type = _Executor; 805 806 // construct / copy / destroy: 807 808 explicit 809 executor_work_guard(const executor_type& __ex) noexcept 810 : _M_ex(__ex), _M_owns(true) 811 { _M_ex.on_work_started(); } 812 813 executor_work_guard(const executor_work_guard& __other) noexcept 814 : _M_ex(__other._M_ex), _M_owns(__other._M_owns) 815 { 816 if (_M_owns) 817 _M_ex.on_work_started(); 818 } 819 820 executor_work_guard(executor_work_guard&& __other) noexcept 821 : _M_ex(__other._M_ex), _M_owns(__other._M_owns) 822 { __other._M_owns = false; } 823 824 executor_work_guard& operator=(const executor_work_guard&) = delete; 825 826 ~executor_work_guard() 827 { 828 if (_M_owns) 829 _M_ex.on_work_finished(); 830 } 831 832 // executor work guard observers: 833 834 executor_type get_executor() const noexcept { return _M_ex; } 835 836 bool owns_work() const noexcept { return _M_owns; } 837 838 // executor work guard modifiers: 839 840 void reset() noexcept 841 { 842 if (_M_owns) 843 _M_ex.on_work_finished(); 844 _M_owns = false; 845 } 846 847 private: 848 _Executor _M_ex; 849 bool _M_owns; 850 }; 851 852 853 class system_context : public execution_context 854 { 855 public: 856 // types: 857 858 using executor_type = system_executor; 859 860 // construct / copy / destroy: 861 862 system_context() = delete; 863 system_context(const system_context&) = delete; 864 system_context& operator=(const system_context&) = delete; 865 866 ~system_context() 867 { 868 stop(); 869 join(); 870 } 871 872 // system_context operations: 873 874 executor_type get_executor() noexcept; 875 876 void stop() 877 { 878 lock_guard
__lock(_M_mtx); 879 _M_stopped = true; 880 _M_cv.notify_all(); 881 } 882 883 bool stopped() const noexcept 884 { 885 lock_guard
__lock(_M_mtx); 886 return _M_stopped; 887 } 888 889 void join() 890 { 891 if (_M_thread.joinable()) 892 _M_thread.join(); 893 } 894 895 private: 896 friend system_executor; 897 898 struct __tag { explicit __tag() = default; }; 899 system_context(__tag) { } 900 901 #ifndef _GLIBCXX_HAS_GTHREADS 902 struct thread 903 { 904 bool joinable() const { return false; } 905 void join() { } 906 }; 907 struct condition_variable 908 { 909 void notify_all() { } 910 }; 911 #endif 912 913 thread _M_thread; 914 mutable mutex_type _M_mtx; // XXX can we reuse base's _M_mutex? 915 condition_variable _M_cv; 916 queue
> _M_tasks; 917 bool _M_stopped = false; 918 919 #ifdef _GLIBCXX_HAS_GTHREADS 920 void 921 _M_run() 922 { 923 while (true) 924 { 925 function
__f; 926 { 927 unique_lock
__lock(_M_mtx); 928 _M_cv.wait(__lock, 929 [this]{ return _M_stopped || !_M_tasks.empty(); }); 930 if (_M_stopped) 931 return; 932 __f = std::move(_M_tasks.front()); 933 _M_tasks.pop(); 934 } 935 __f(); 936 } 937 } 938 #endif 939 940 void 941 _M_post(std::function
__f __attribute__((__unused__))) 942 { 943 lock_guard
__lock(_M_mtx); 944 if (_M_stopped) 945 return; 946 #ifdef _GLIBCXX_HAS_GTHREADS 947 if (!_M_thread.joinable()) 948 _M_thread = std::thread(&system_context::_M_run, this); 949 _M_tasks.push(std::move(__f)); // XXX allocator not used 950 _M_cv.notify_one(); 951 #else 952 __throw_system_error(EOPNOTSUPP); 953 #endif 954 } 955 956 static system_context& 957 _S_get() noexcept 958 { 959 static system_context __sc(__tag{}); 960 return __sc; 961 } 962 }; 963 964 class system_executor 965 { 966 public: 967 // executor operations: 968 969 system_executor() { } 970 971 system_context& 972 context() const noexcept { return system_context::_S_get(); } 973 974 void on_work_started() const noexcept { } 975 void on_work_finished() const noexcept { } 976 977 template
978 void 979 dispatch(_Func&& __f, const _ProtoAlloc& __a) const 980 { decay_t<_Func>{std::forward<_Func>(__f)}(); } 981 982 template
983 void 984 post(_Func&& __f, const _ProtoAlloc&) const // XXX allocator not used 985 { 986 system_context::_S_get()._M_post(std::forward<_Func>(__f)); 987 } 988 989 template
990 void 991 defer(_Func&& __f, const _ProtoAlloc& __a) const 992 { post(std::forward<_Func>(__f), __a); } 993 }; 994 995 inline system_executor 996 system_context::get_executor() noexcept 997 { return {}; } 998 999 class bad_executor : public std::exception 1000 { 1001 virtual const char* what() const noexcept { return "bad executor"; } 1002 }; 1003 1004 inline void __throw_bad_executor() // TODO make non-inline 1005 { 1006 #if __cpp_exceptions 1007 throw bad_executor(); 1008 #else 1009 __builtin_abort(); 1010 #endif 1011 } 1012 1013 class executor 1014 { 1015 template
1016 using _Context_t = decltype(std::declval<_Executor&>().context()); 1017 1018 public: 1019 // construct / copy / destroy: 1020 1021 executor() noexcept = default; 1022 1023 executor(nullptr_t) noexcept { } 1024 executor(const executor&) noexcept = default; 1025 executor(executor&&) noexcept = default; 1026 1027 template
>>> 1029 executor(_Executor __e) 1030 : _M_target(make_shared<_Tgt1<_Executor>>(std::move(__e))) 1031 { } 1032 1033 template
>>> 1035 executor(allocator_arg_t, const _ProtoAlloc& __a, _Executor __e) 1036 : _M_target(allocate_shared<_Tgt2<_Executor, _ProtoAlloc>>(__a, 1037 std::move(__e), __a)) 1038 { } 1039 1040 executor& operator=(const executor&) noexcept = default; 1041 executor& operator=(executor&&) noexcept = default; 1042 1043 executor& 1044 operator=(nullptr_t) noexcept 1045 { 1046 _M_target = nullptr; 1047 return *this; 1048 } 1049 1050 template
1051 executor& 1052 operator=(_Executor __e) 1053 { 1054 executor(std::move(__e)).swap(*this); 1055 return *this; 1056 } 1057 1058 ~executor() = default; 1059 1060 // executor modifiers: 1061 1062 void 1063 swap(executor& __other) noexcept 1064 { _M_target.swap(__other._M_target); } 1065 1066 template
1067 void 1068 assign(_Executor __e, const _Alloc& __a) 1069 { executor(allocator_arg, __a, std::move(__e)).swap(*this); } 1070 1071 // executor operations: 1072 1073 execution_context& 1074 context() const noexcept 1075 { 1076 __glibcxx_assert( _M_target ); 1077 return _M_target->context(); 1078 } 1079 1080 void 1081 on_work_started() const noexcept 1082 { 1083 __glibcxx_assert( _M_target ); 1084 return _M_target->on_work_started(); 1085 } 1086 1087 void 1088 on_work_finished() const noexcept 1089 { 1090 __glibcxx_assert( _M_target ); 1091 return _M_target->on_work_finished(); 1092 } 1093 1094 template
1095 void 1096 dispatch(_Func&& __f, const _Alloc& __a) const 1097 { 1098 if (!_M_target) 1099 __throw_bad_executor(); 1100 // _M_target->dispatch({allocator_arg, __a, std::forward<_Func>(__f)}); 1101 _M_target->dispatch(std::forward<_Func>(__f)); 1102 } 1103 1104 template
1105 void 1106 post(_Func&& __f, const _Alloc& __a) const 1107 { 1108 if (!_M_target) 1109 __throw_bad_executor(); 1110 // _M_target->post({allocator_arg, __a, std::forward<_Func>(__f)}); 1111 _M_target->post(std::forward<_Func>(__f)); 1112 } 1113 1114 template
1115 void 1116 defer(_Func&& __f, const _Alloc& __a) const 1117 { 1118 if (!_M_target) 1119 __throw_bad_executor(); 1120 // _M_target->defer({allocator_arg, __a, std::forward<_Func>(__f)}); 1121 _M_target->defer(std::forward<_Func>(__f)); 1122 } 1123 1124 // executor capacity: 1125 1126 explicit operator bool() const noexcept 1127 { return static_cast
(_M_target); } 1128 1129 // executor target access: 1130 1131 #if __cpp_rtti 1132 const type_info& 1133 target_type() const noexcept 1134 { 1135 if (_M_target) 1136 return *static_cast
(_M_target->target_type()); 1137 return typeid(void); 1138 } 1139 #endif 1140 1141 template
1142 _Executor* 1143 target() noexcept 1144 { 1145 void* __p = nullptr; 1146 if (_M_target) 1147 { 1148 if (_M_target->_M_func == &_Tgt1
>::_S_func) 1149 __p = _M_target->_M_func(_M_target.get(), nullptr); 1150 #if __cpp_rtti 1151 else 1152 __p = _M_target->target(&typeid(_Executor)); 1153 #endif 1154 } 1155 return static_cast<_Executor*>(__p); 1156 } 1157 1158 template
1159 const _Executor* 1160 target() const noexcept 1161 { 1162 const void* __p = nullptr; 1163 if (_M_target) 1164 { 1165 if (_M_target->_M_func == &_Tgt1
>::_S_func) 1166 return (_Executor*)_M_target->_M_func(_M_target.get(), nullptr); 1167 #if __cpp_rtti 1168 else 1169 __p = _M_target->target(&typeid(_Executor)); 1170 #endif 1171 } 1172 return static_cast
(__p); 1173 } 1174 1175 private: 1176 struct _Tgt 1177 { 1178 virtual void on_work_started() const noexcept = 0; 1179 virtual void on_work_finished() const noexcept = 0; 1180 virtual execution_context& context() const noexcept = 0; 1181 virtual void dispatch(std::function
) const = 0; 1182 virtual void post(std::function
) const = 0; 1183 virtual void defer(std::function
) const = 0; 1184 virtual const void* target_type() const noexcept = 0; 1185 virtual void* target(const void*) noexcept = 0; 1186 virtual bool _M_equals(_Tgt*) const noexcept = 0; 1187 1188 using _Func = void* (_Tgt*, const _Tgt*); 1189 _Func* _M_func; // Provides access to target without RTTI 1190 }; 1191 1192 template
1193 struct _Tgt1 : _Tgt 1194 { 1195 explicit 1196 _Tgt1(_Ex&& __ex) 1197 : _M_ex(std::move(__ex)) 1198 { this->_M_func = &_S_func; } 1199 1200 void 1201 on_work_started() const noexcept override 1202 { _M_ex.on_work_started(); } 1203 1204 void 1205 on_work_finished() const noexcept override 1206 { _M_ex.on_work_finished(); } 1207 1208 execution_context& 1209 context() const noexcept override 1210 { return _M_ex.context(); } 1211 1212 void 1213 dispatch(std::function
__f) const override 1214 { _M_ex.dispatch(std::move(__f), allocator
()); } 1215 1216 void 1217 post(std::function
__f) const override 1218 { _M_ex.post(std::move(__f), allocator
()); } 1219 1220 void 1221 defer(std::function
__f) const override 1222 { _M_ex.defer(std::move(__f), allocator
()); } 1223 1224 const void* 1225 target_type() const noexcept override 1226 { 1227 #if __cpp_rtti 1228 return &typeid(_Ex); 1229 #else 1230 return nullptr; 1231 #endif 1232 } 1233 1234 void* 1235 target(const void* __ti) noexcept override 1236 { 1237 #if __cpp_rtti 1238 if (*static_cast
(__ti) == typeid(_Ex)) 1239 return std::__addressof(_M_ex); 1240 #endif 1241 return nullptr; 1242 } 1243 1244 bool 1245 _M_equals(_Tgt* __tgt) const noexcept override 1246 { 1247 #if __cpp_rtti 1248 if (const void* __p = __tgt->target(&typeid(_Ex))) 1249 return *static_cast
(__p) == _M_ex; 1250 #endif 1251 return false; 1252 } 1253 1254 _Ex _M_ex [[__no_unique_address__]]; 1255 1256 static void* 1257 _S_func(_Tgt* __p, const _Tgt* __q) noexcept 1258 { 1259 auto& __ex = static_cast<_Tgt1*>(__p)->_M_ex; 1260 if (__q) 1261 { 1262 if (__ex == static_cast
(__q)->_M_ex) 1263 return __p; 1264 else 1265 return nullptr; 1266 } 1267 else 1268 return std::__addressof(__ex); 1269 } 1270 }; 1271 1272 template
1273 struct _Tgt2 : _Tgt1<_Ex> 1274 { 1275 explicit 1276 _Tgt2(_Ex&& __ex, const _Alloc& __a) 1277 : _Tgt1<_Ex>(std::move(__ex)), _M_alloc(__a) { } 1278 1279 void 1280 dispatch(std::function
__f) const override 1281 { this->_M_ex.dispatch(std::move(__f), _M_alloc); } 1282 1283 void 1284 post(std::function
__f) const override 1285 { this->_M_ex.post(std::move(__f), _M_alloc); } 1286 1287 void 1288 defer(std::function
__f) const override 1289 { this->_M_ex.defer(std::move(__f), _M_alloc); } 1290 1291 _Alloc _M_alloc [[__no_unique_address__]]; 1292 }; 1293 1294 // Partial specialization for std::allocator
. 1295 // Don't store the allocator. 1296 template
1297 struct _Tgt2<_Ex, std::allocator<_Tp>> : _Tgt1<_Ex> 1298 { }; 1299 1300 friend bool 1301 operator==(const executor& __a, const executor& __b) noexcept 1302 { 1303 _Tgt* __ta = __a._M_target.get(); 1304 _Tgt* __tb = __b._M_target.get(); 1305 if (__ta == __tb) 1306 return true; 1307 if (!__ta || !__tb) 1308 return false; 1309 if (__ta->_M_func == __tb->_M_func) 1310 return __ta->_M_func(__ta, __tb); 1311 return __ta->_M_equals(__tb); 1312 } 1313 1314 shared_ptr<_Tgt> _M_target; 1315 }; 1316 1317 template<> struct is_executor
: true_type { }; 1318 1319 /// executor comparisons 1320 inline bool 1321 operator==(const executor& __e, nullptr_t) noexcept 1322 { return !__e; } 1323 1324 inline bool 1325 operator==(nullptr_t, const executor& __e) noexcept 1326 { return !__e; } 1327 1328 inline bool 1329 operator!=(const executor& __a, const executor& __b) noexcept 1330 { return !(__a == __b); } 1331 1332 inline bool 1333 operator!=(const executor& __e, nullptr_t) noexcept 1334 { return (bool)__e; } 1335 1336 inline bool 1337 operator!=(nullptr_t, const executor& __e) noexcept 1338 { return (bool)__e; } 1339 1340 /// Swap two executor objects. 1341 inline void swap(executor& __a, executor& __b) noexcept { __a.swap(__b); } 1342 1343 1344 template
1345 struct __dispatcher 1346 { 1347 explicit 1348 __dispatcher(_CompletionHandler& __h) 1349 : _M_h(std::move(__h)), _M_w(net::make_work_guard(_M_h)) 1350 { } 1351 1352 void operator()() 1353 { 1354 auto __alloc = net::get_associated_allocator(_M_h); 1355 _M_w.get_executor().dispatch(std::move(_M_h), __alloc); 1356 _M_w.reset(); 1357 } 1358 1359 _CompletionHandler _M_h; 1360 decltype(net::make_work_guard(_M_h)) _M_w; 1361 }; 1362 1363 template
1364 inline __dispatcher<_CompletionHandler> 1365 __make_dispatcher(_CompletionHandler& __h) 1366 { return __dispatcher<_CompletionHandler>{__h}; } 1367 1368 1369 1370 // dispatch: 1371 1372 template
1373 inline __deduced_t<_CompletionToken, void()> 1374 dispatch(_CompletionToken&& __token) 1375 { 1376 async_completion<_CompletionToken, void()> __cmpl{__token}; 1377 auto __ex = net::get_associated_executor(__cmpl.completion_handler); 1378 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1379 __ex.dispatch(std::move(__cmpl.completion_handler), __alloc); 1380 return __cmpl.result.get(); 1381 } 1382 1383 template
1384 inline 1385 enable_if_t
::value, 1386 __deduced_t<_CompletionToken, void()>> 1387 dispatch(const _Executor& __ex, _CompletionToken&& __token) 1388 { 1389 async_completion<_CompletionToken, void()> __cmpl{__token}; 1390 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1391 __ex.dispatch(net::__make_dispatcher(__cmpl.completion_handler), 1392 __alloc); 1393 return __cmpl.result.get(); 1394 } 1395 1396 template
1397 inline 1398 enable_if_t<__is_exec_context<_ExecutionContext>::value, 1399 __deduced_t<_CompletionToken, void()>> 1400 dispatch(_ExecutionContext& __ctx, _CompletionToken&& __token) 1401 { 1402 return net::dispatch(__ctx.get_executor(), 1403 forward<_CompletionToken>(__token)); 1404 } 1405 1406 // post: 1407 1408 template
1409 inline __deduced_t<_CompletionToken, void()> 1410 post(_CompletionToken&& __token) 1411 { 1412 async_completion<_CompletionToken, void()> __cmpl{__token}; 1413 auto __ex = net::get_associated_executor(__cmpl.completion_handler); 1414 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1415 __ex.post(std::move(__cmpl.completion_handler), __alloc); 1416 return __cmpl.result.get(); 1417 } 1418 1419 template
1420 inline 1421 enable_if_t
::value, 1422 __deduced_t<_CompletionToken, void()>> 1423 post(const _Executor& __ex, _CompletionToken&& __token) 1424 { 1425 async_completion<_CompletionToken, void()> __cmpl{__token}; 1426 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1427 __ex.post(net::__make_dispatcher(__cmpl.completion_handler), __alloc); 1428 return __cmpl.result.get(); 1429 } 1430 1431 template
1432 inline 1433 enable_if_t<__is_exec_context<_ExecutionContext>::value, 1434 __deduced_t<_CompletionToken, void()>> 1435 post(_ExecutionContext& __ctx, _CompletionToken&& __token) 1436 { 1437 return net::post(__ctx.get_executor(), 1438 forward<_CompletionToken>(__token)); 1439 } 1440 1441 // defer: 1442 1443 template
1444 inline __deduced_t<_CompletionToken, void()> 1445 defer(_CompletionToken&& __token) 1446 { 1447 async_completion<_CompletionToken, void()> __cmpl{__token}; 1448 auto __ex = net::get_associated_executor(__cmpl.completion_handler); 1449 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1450 __ex.defer(std::move(__cmpl.completion_handler), __alloc); 1451 return __cmpl.result.get(); 1452 } 1453 1454 template
1455 inline 1456 enable_if_t
::value, 1457 __deduced_t<_CompletionToken, void()>> 1458 defer(const _Executor& __ex, _CompletionToken&& __token) 1459 { 1460 async_completion<_CompletionToken, void()> __cmpl{__token}; 1461 auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); 1462 __ex.defer(net::__make_dispatcher(__cmpl.completion_handler), __alloc); 1463 return __cmpl.result.get(); 1464 } 1465 1466 template
1467 inline 1468 enable_if_t<__is_exec_context<_ExecutionContext>::value, 1469 __deduced_t<_CompletionToken, void()>> 1470 defer(_ExecutionContext& __ctx, _CompletionToken&& __token) 1471 { 1472 return net::defer(__ctx.get_executor(), 1473 forward<_CompletionToken>(__token)); 1474 } 1475 1476 1477 template
1478 class strand 1479 { 1480 public: 1481 // types: 1482 1483 using inner_executor_type = _Executor; 1484 1485 // construct / copy / destroy: 1486 1487 strand(); // TODO make state 1488 1489 explicit strand(_Executor __ex) : _M_inner_ex(__ex) { } // TODO make state 1490 1491 template
1492 strand(allocator_arg_t, const _Alloc& __a, _Executor __ex) 1493 : _M_inner_ex(__ex) { } // TODO make state 1494 1495 strand(const strand& __other) noexcept 1496 : _M_state(__other._M_state), _M_inner_ex(__other._M_inner_ex) { } 1497 1498 strand(strand&& __other) noexcept 1499 : _M_state(std::move(__other._M_state)), 1500 _M_inner_ex(std::move(__other._M_inner_ex)) { } 1501 1502 template
1503 strand(const strand<_OtherExecutor>& __other) noexcept 1504 : _M_state(__other._M_state), _M_inner_ex(__other._M_inner_ex) { } 1505 1506 template
1507 strand(strand<_OtherExecutor>&& __other) noexcept 1508 : _M_state(std::move(__other._M_state)), 1509 _M_inner_ex(std::move(__other._M_inner_ex)) { } 1510 1511 strand& 1512 operator=(const strand& __other) noexcept 1513 { 1514 static_assert(is_copy_assignable<_Executor>::value, 1515 "inner executor type must be CopyAssignable"); 1516 1517 // TODO lock __other 1518 // TODO copy state 1519 _M_inner_ex = __other._M_inner_ex; 1520 return *this; 1521 } 1522 1523 strand& 1524 operator=(strand&& __other) noexcept 1525 { 1526 static_assert(is_move_assignable<_Executor>::value, 1527 "inner executor type must be MoveAssignable"); 1528 1529 // TODO move state 1530 _M_inner_ex = std::move(__other._M_inner_ex); 1531 return *this; 1532 } 1533 1534 template
1535 strand& 1536 operator=(const strand<_OtherExecutor>& __other) noexcept 1537 { 1538 static_assert(is_convertible<_OtherExecutor, _Executor>::value, 1539 "inner executor type must be compatible"); 1540 1541 // TODO lock __other 1542 // TODO copy state 1543 _M_inner_ex = __other._M_inner_ex; 1544 return *this; 1545 } 1546 1547 template
1548 strand& 1549 operator=(strand<_OtherExecutor>&& __other) noexcept 1550 { 1551 static_assert(is_convertible<_OtherExecutor, _Executor>::value, 1552 "inner executor type must be compatible"); 1553 1554 // TODO move state 1555 _M_inner_ex = std::move(__other._M_inner_ex); 1556 return *this; 1557 } 1558 1559 ~strand() 1560 { 1561 // the task queue outlives this object if non-empty 1562 // TODO create circular ref in queue? 1563 } 1564 1565 // strand operations: 1566 1567 inner_executor_type 1568 get_inner_executor() const noexcept 1569 { return _M_inner_ex; } 1570 1571 bool 1572 running_in_this_thread() const noexcept 1573 { return _M_state->running_in_this_thread(); } 1574 1575 execution_context& 1576 context() const noexcept 1577 { return _M_inner_ex.context(); } 1578 1579 void on_work_started() const noexcept { _M_inner_ex.on_work_started(); } 1580 void on_work_finished() const noexcept { _M_inner_ex.on_work_finished(); } 1581 1582 template
1583 void 1584 dispatch(_Func&& __f, const _Alloc& __a) const 1585 { 1586 if (running_in_this_thread()) 1587 decay_t<_Func>{std::forward<_Func>(__f)}(); 1588 else 1589 post(std::forward<_Func>(__f), __a); 1590 } 1591 1592 template
1593 void 1594 post(_Func&& __f, const _Alloc& __a) const; // TODO 1595 1596 template
1597 void 1598 defer(_Func&& __f, const _Alloc& __a) const 1599 { post(std::forward<_Func>(__f), __a); } 1600 1601 private: 1602 friend bool 1603 operator==(const strand& __a, const strand& __b) 1604 { return __a._M_state == __b._M_state; } 1605 1606 // TODO add synchronised queue 1607 struct _State 1608 { 1609 #if defined(_GLIBCXX_HAS_GTHREADS) 1610 bool 1611 running_in_this_thread() const noexcept 1612 { return std::this_thread::get_id() == _M_running_on; } 1613 1614 std::thread::id _M_running_on; 1615 #else 1616 bool running_in_this_thread() const { return true; } 1617 #endif 1618 }; 1619 shared_ptr<_State> _M_state; 1620 _Executor _M_inner_ex; 1621 }; 1622 1623 #if defined(_GLIBCXX_HAS_GTHREADS) 1624 1625 // Completion token for asynchronous operations initiated with use_future. 1626 template
1627 struct __use_future_ct 1628 { 1629 std::tuple<_Func, _Alloc> _M_t; 1630 }; 1631 1632 template
1633 struct __use_future_ct<_Func, std::allocator<_Tp>> 1634 { 1635 _Func _M_f; 1636 }; 1637 1638 template
> 1639 class use_future_t 1640 { 1641 public: 1642 // use_future_t types: 1643 using allocator_type = _ProtoAllocator; 1644 1645 // use_future_t members: 1646 constexpr 1647 use_future_t() 1648 noexcept(is_nothrow_default_constructible<_ProtoAllocator>::value) 1649 : _M_alloc() { } 1650 1651 explicit 1652 use_future_t(const _ProtoAllocator& __a) noexcept : _M_alloc(__a) { } 1653 1654 template
1655 use_future_t<_OtherAllocator> 1656 rebind(const _OtherAllocator& __a) const noexcept 1657 { return use_future_t<_OtherAllocator>(__a); } 1658 1659 allocator_type get_allocator() const noexcept { return _M_alloc; } 1660 1661 template
1662 auto 1663 operator()(_Func&& __f) const 1664 { 1665 using _Token = __use_future_ct
, _ProtoAllocator>; 1666 return _Token{ {std::forward<_Func>(__f), _M_alloc} }; 1667 } 1668 1669 private: 1670 _ProtoAllocator _M_alloc; 1671 }; 1672 1673 template
1674 class use_future_t
> 1675 { 1676 public: 1677 // use_future_t types: 1678 using allocator_type = std::allocator<_Tp>; 1679 1680 // use_future_t members: 1681 constexpr use_future_t() noexcept = default; 1682 1683 explicit 1684 use_future_t(const allocator_type& __a) noexcept { } 1685 1686 template
1687 use_future_t
> 1688 rebind(const std::allocator<_Up>& __a) const noexcept 1689 { return use_future_t
>(__a); } 1690 1691 allocator_type get_allocator() const noexcept { return {}; } 1692 1693 template
1694 auto 1695 operator()(_Func&& __f) const 1696 { 1697 using _Token = __use_future_ct
, allocator_type>; 1698 return _Token{std::forward<_Func>(__f)}; 1699 } 1700 }; 1701 1702 constexpr use_future_t<> use_future = use_future_t<>(); 1703 1704 template
1705 class async_result<__use_future_ct<_Func, _Alloc>, _Res(_Args...)>; 1706 1707 template
1708 struct __use_future_ex; 1709 1710 // Completion handler for asynchronous operations initiated with use_future. 1711 template
1712 struct __use_future_ch 1713 { 1714 template
1715 explicit 1716 __use_future_ch(__use_future_ct<_Func, _Alloc>&& __token) 1717 : _M_f{ std::move(std::get<0>(__token._M_t)) }, 1718 _M_promise{ std::get<1>(__token._M_t) } 1719 { } 1720 1721 template
1722 explicit 1723 __use_future_ch(__use_future_ct<_Func, std::allocator<_Tp>>&& __token) 1724 : _M_f{ std::move(__token._M_f) } 1725 { } 1726 1727 void 1728 operator()(_Args&&... __args) 1729 { 1730 __try 1731 { 1732 _M_promise.set_value(_M_f(std::forward<_Args>(__args)...)); 1733 } 1734 __catch(__cxxabiv1::__forced_unwind&) 1735 { 1736 __throw_exception_again; 1737 } 1738 __catch(...) 1739 { 1740 _M_promise.set_exception(std::current_exception()); 1741 } 1742 } 1743 1744 using __result = result_of_t<_Func(decay_t<_Args>...)>; 1745 1746 future<__result> get_future() { return _M_promise.get_future(); } 1747 1748 private: 1749 template
1750 friend struct __use_future_ex; 1751 1752 _Func _M_f; 1753 mutable promise<__result> _M_promise; 1754 }; 1755 1756 // Specialization of async_result for operations initiated with use_future. 1757 template
1758 class async_result<__use_future_ct<_Func, _Alloc>, _Res(_Args...)> 1759 { 1760 public: 1761 using completion_handler_type = __use_future_ch<_Func, _Args...>; 1762 using return_type = future
; 1763 1764 explicit 1765 async_result(completion_handler_type& __h) 1766 : _M_future(__h.get_future()) 1767 { } 1768 1769 async_result(const async_result&) = delete; 1770 async_result& operator=(const async_result&) = delete; 1771 1772 return_type get() { return std::move(_M_future); } 1773 1774 private: 1775 return_type _M_future; 1776 }; 1777 1778 template
1779 struct __use_future_ex 1780 { 1781 template
1782 __use_future_ex(const _Handler& __h, _Executor __ex) 1783 : _M_t(__h._M_promise, __ex) 1784 { } 1785 1786 template
1787 void 1788 dispatch(_Fn&& __fn) 1789 { 1790 __try 1791 { 1792 std::get<1>(_M_t).dispatch(std::forward<_Fn>(__fn)); 1793 } 1794 __catch(__cxxabiv1::__forced_unwind&) 1795 { 1796 __throw_exception_again; 1797 } 1798 __catch(...) 1799 { 1800 std::get<0>(_M_t).set_exception(std::current_exception()); 1801 } 1802 } 1803 1804 template
1805 void 1806 post(_Fn&& __fn) 1807 { 1808 __try 1809 { 1810 std::get<1>(_M_t).post(std::forward<_Fn>(__fn)); 1811 } 1812 __catch(__cxxabiv1::__forced_unwind&) 1813 { 1814 __throw_exception_again; 1815 } 1816 __catch(...) 1817 { 1818 std::get<0>(_M_t).set_exception(std::current_exception()); 1819 } 1820 } 1821 1822 template
1823 void 1824 defer(_Fn&& __fn) 1825 { 1826 __try 1827 { 1828 std::get<1>(_M_t).defer(std::forward<_Fn>(__fn)); 1829 } 1830 __catch(__cxxabiv1::__forced_unwind&) 1831 { 1832 __throw_exception_again; 1833 } 1834 __catch(...) 1835 { 1836 std::get<0>(_M_t).set_exception(std::current_exception()); 1837 } 1838 } 1839 1840 private: 1841 tuple
&, _Executor> _M_t; 1842 }; 1843 1844 template
1845 struct associated_executor<__use_future_ch<_Func, _Args...>, _Executor> 1846 { 1847 private: 1848 using __handler = __use_future_ch<_Func, _Args...>; 1849 1850 using type = __use_future_ex
; 1851 1852 static type 1853 get(const __handler& __h, const _Executor& __ex) 1854 { return { __h, __ex }; } 1855 }; 1856 1857 #if 0 1858 1859 // [async.use.future.traits] 1860 template
1861 class handler_type
, _Ret(_Args...)> // TODO uglify name 1862 { 1863 template
1864 struct __is_error_result : false_type { }; 1865 1866 template
1867 struct __is_error_result
: true_type { }; 1868 1869 template
1870 struct __is_error_result
: true_type { }; 1871 1872 static exception_ptr 1873 _S_exptr(exception_ptr& __ex) 1874 { return std::move(__ex); } 1875 1876 static exception_ptr 1877 _S_exptr(const error_code& __ec) 1878 { return make_exception_ptr(system_error(__ec)); } 1879 1880 template
1881 struct _Type; 1882 1883 // N == 0 1884 template
1885 struct _Type<_IsError> 1886 { 1887 std::promise
_M_promise; 1888 1889 void 1890 operator()() 1891 { 1892 _M_promise.set_value(); 1893 } 1894 }; 1895 1896 // N == 1, U0 is error_code or exception_ptr 1897 template
1898 struct _Type
1899 { 1900 std::promise
_M_promise; 1901 1902 template
1903 void 1904 operator()(_Arg0&& __a0) 1905 { 1906 if (__a0) 1907 _M_promise.set_exception(_S_exptr(__a0)); 1908 else 1909 _M_promise.set_value(); 1910 } 1911 }; 1912 1913 // N == 1, U0 is not error_code or exception_ptr 1914 template
1915 struct _Type
1916 { 1917 std::promise<_UArg0> _M_promise; 1918 1919 template
1920 void 1921 operator()(_Arg0&& __a0) 1922 { 1923 _M_promise.set_value(std::forward<_Arg0>(__a0)); 1924 } 1925 }; 1926 1927 // N == 2, U0 is error_code or exception_ptr 1928 template
1929 struct _Type
1930 { 1931 std::promise<_UArg1> _M_promise; 1932 1933 template
1934 void 1935 operator()(_Arg0&& __a0, _Arg1&& __a1) 1936 { 1937 if (__a0) 1938 _M_promise.set_exception(_S_exptr(__a0)); 1939 else 1940 _M_promise.set_value(std::forward<_Arg1>(__a1)); 1941 } 1942 }; 1943 1944 // N >= 2, U0 is not error_code or exception_ptr 1945 template
1946 struct _Type
1947 { 1948 static_assert(sizeof...(_UArgs) > 1, "wrong partial specialization"); 1949 1950 std::promise
> _M_promise; 1951 1952 template
1953 void 1954 operator()(_Args&&... __args) 1955 { 1956 _M_promise.set_value( 1957 std::forward_as_tuple(std::forward<_Args>(__args)...)); 1958 } 1959 }; 1960 1961 // N > 2, U0 is error_code or exception_ptr 1962 template
1963 struct _Type
1964 { 1965 static_assert(sizeof...(_UArgs) > 1, "wrong partial specialization"); 1966 1967 std::promise
> _M_promise; 1968 1969 template
1970 void 1971 operator()(_Arg0&& __a0, _Args&&... __args) 1972 { 1973 if (__a0) 1974 _M_promise.set_exception(_S_exptr(__a0)); 1975 else 1976 _M_promise.set_value( 1977 std::forward_as_tuple(std::forward<_Args>(__args)...)); 1978 } 1979 }; 1980 1981 public: 1982 using type = 1983 _Type<__is_error_result<_Args...>::value, decay_t<_Args>...>; 1984 }; 1985 1986 1987 template
1988 struct async_result
, _Ret(_Args...)> 1989 { 1990 using completion_handler_type 1991 = typename handler_type
, _Ret(_Args...)>::type; 1992 1993 using return_type = void; // XXX TODO ???; 1994 1995 explicit 1996 async_result(completion_handler_type& __h) : _M_handler(__h) { } 1997 1998 auto get() { return _M_handler._M_provider.get_future(); } 1999 2000 async_result(const async_result&) = delete; 2001 async_result& operator=(const async_result&) = delete; 2002 2003 return_type get() { return _M_handler._M_promise.get_future(); } 2004 2005 private: 2006 completion_handler_type& _M_handler; 2007 }; 2008 2009 // TODO specialize associated_executor for 2010 // async_result
, Sig>::completion_handler_type 2011 // to use a __use_future_ex 2012 // (probably need to move _Type outside of handler_type so we don't have 2013 // a non-deduced context) 2014 2015 #endif 2016 2017 // [async.packaged.task.specializations] 2018 template
2019 class async_result
, _Signature> 2020 { 2021 public: 2022 using completion_handler_type = packaged_task<_Ret(_Args...)>; 2023 using return_type = future<_Ret>; 2024 2025 explicit 2026 async_result(completion_handler_type& __h) 2027 : _M_future(__h.get_future()) { } 2028 2029 async_result(const async_result&) = delete; 2030 async_result& operator=(const async_result&) = delete; 2031 2032 return_type get() { return std::move(_M_future); } 2033 2034 private: 2035 return_type _M_future; 2036 }; 2037 2038 #endif // _GLIBCXX_HAS_GTHREADS 2039 2040 /// @} 2041 2042 } // namespace v1 2043 } // namespace net 2044 } // namespace experimental 2045 2046 template
2047 struct uses_allocator
2048 : true_type {}; 2049 2050 _GLIBCXX_END_NAMESPACE_VERSION 2051 } // namespace std 2052 2053 #endif // C++14 2054 2055 #endif // _GLIBCXX_EXPERIMENTAL_EXECUTOR
Contact us
|
About us
|
Term of use
|
Copyright © 2000-2025 MyWebUniversity.com ™