/*** * ==++== * * Copyright (c) Microsoft Corporation. All rights reserved. * * ==--== * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ * * concrt.h * * Main public header file for ConcRT. This is the only header file a C++ program must include to use the core concurrency runtime features. * * The Agents And Message Blocks Library and the Parallel Patterns Library (PPL) are defined in separate header files. * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- ****/ #pragma once #include #if !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM)) #error ERROR: Concurrency Runtime is supported only on X64, X86 and ARM architectures. #endif /* !(defined (_M_X64) || defined (_M_IX86) || defined (_M_ARM)) */ #if defined (_M_CEE) #error ERROR: Concurrency Runtime is not supported when compiling /clr. #endif /* defined (_M_CEE) */ #ifndef __cplusplus #error ERROR: Concurrency Runtime is supported only for C++. #endif /* __cplusplus */ #define _CONCRT_H #include #include #include #include #include #include #include #pragma pack(push,_CRT_PACKING) #pragma push_macro("new") #undef new // Forward declare structs needed from Windows header files struct _SECURITY_ATTRIBUTES; typedef _SECURITY_ATTRIBUTES* LPSECURITY_ATTRIBUTES; struct _GROUP_AFFINITY; typedef _GROUP_AFFINITY* PGROUP_AFFINITY; // Define essential types needed from Windows header files typedef unsigned long DWORD; #ifndef _HRESULT_DEFINED #define _HRESULT_DEFINED #ifdef __midl typedef LONG HRESULT; #else /* __midl */ typedef __success(return >= 0) long HRESULT; #endif /* __midl */ #endif /* _HRESULT_DEFINED */ typedef void * HANDLE; // Undefine Yield that is possibly defined by windows.h, and _YieldProcessor #undef Yield #undef _YieldProcessor #if (defined (_M_IX86) || defined (_M_X64)) #define _YieldProcessor _mm_pause #else /* (defined (_M_IX86) || defined (_M_X64)) */ inline void _YieldProcessor() {} #endif /* (defined (_M_IX86) || defined (_M_X64)) */ // Make sure the exchange pointer intrinsics works on x86 architecture #if defined (_M_IX86) && !defined(FIXED_592562) // Leave enabled until onflict with inline function in 8.1 SDK winnt.h header is fixed #undef _InterlockedExchangePointer #undef _InterlockedCompareExchangePointer #define _InterlockedExchangePointer(_Target, _Value) reinterpret_cast(static_cast<__w64 long>(_InterlockedExchange( \ static_cast(reinterpret_cast<__w64 long volatile *>(static_cast(_Target))), \ static_cast(reinterpret_cast<__w64 long>(static_cast(_Value)))))) #define _InterlockedCompareExchangePointer(_Target, _Exchange, _Comparand) reinterpret_cast(static_cast<__w64 long>(_InterlockedCompareExchange( \ static_cast(reinterpret_cast<__w64 long volatile *>(static_cast(_Target))), \ static_cast(reinterpret_cast<__w64 long>(static_cast(_Exchange))), \ static_cast(reinterpret_cast<__w64 long>(static_cast(_Comparand)))))) #endif /* defined (_M_IX86) */ #if (defined (_M_IX86) || defined (_M_ARM)) #define _InterlockedIncrementSizeT(_Target) static_cast(_InterlockedIncrement(reinterpret_cast(_Target))) #define _InterlockedDecrementSizeT(_Target) static_cast(_InterlockedDecrement(reinterpret_cast(_Target))) #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast(_InterlockedCompareExchange( \ reinterpret_cast(_Target), \ static_cast(_Exchange), \ static_cast(_Comparand))) typedef _W64 unsigned long DWORD_PTR, *PDWORD_PTR; #else /* (defined (_M_IX86) || defined (_M_ARM)) */ #define _InterlockedIncrementSizeT(_Target) static_cast(_InterlockedIncrement64(reinterpret_cast<__int64 volatile *>(_Target))) #define _InterlockedDecrementSizeT(_Target) static_cast(_InterlockedDecrement64(reinterpret_cast<__int64 volatile *>(_Target))) #define _InterlockedCompareExchangeSizeT(_Target, _Exchange, _Comparand) static_cast(_InterlockedCompareExchange64( \ reinterpret_cast<__int64 volatile *>(_Target), \ static_cast<__int64>(_Exchange), \ static_cast<__int64>(_Comparand))) typedef unsigned __int64 DWORD_PTR, *PDWORD_PTR; #endif /* (defined (_M_IX86) || defined (_M_ARM)) */ #if defined (_DEBUG) #if _MSC_VER // Turn off compiler warnings that are exacerbated by constructs in this // file's definitions: // Warning C4127: conditional expression is constant. This is caused by // the macros with "do { ... } while (false)" syntax. The syntax is // a good way to ensure that a statement-like macro can be used in all // contexts (specifically if statements), but the compiler warns about // the "while (false)" part. #define _CONCRT_ASSERT(x) __pragma (warning (suppress: 4127)) do {_ASSERTE(x); __assume(x);} while(false) #else #define _CONCRT_ASSERT(x) do {_ASSERTE(x); __assume(x);} while(false) #endif #else /* defined (_DEBUG) */ #define _CONCRT_ASSERT(x) __assume(x) #endif /* defined (_DEBUG) */ // Used internally to represent the smallest unit in which to allocate hidden types typedef void * _CONCRT_BUFFER; #define _LISTENTRY_SIZE ((2 * sizeof(void *) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)) #define _SAFERWLIST_SIZE ((3 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)) /// /// The Concurrency namespace provides classes and functions that access the Concurrency Runtime, /// a concurrent programming framework for C++. For more information, see . /// /**/ namespace Concurrency { /// /// Pauses the current context for a specified amount of time. /// /// /// The number of milliseconds the current context should be paused for. If the parameter is set to /// the value 0, the current context should yield execution to other runnable contexts before continuing. /// /// /// If this method is called on a Concurrency Runtime scheduler context, the scheduler will find a different context to run on the underlying /// resource. Because the scheduler is cooperative in nature, this context cannot resume exactly after the number of milliseconds specified. /// If the scheduler is busy executing other tasks that do not cooperatively yield to the scheduler, the wait period could be /// indefinite. /// /**/ _CRTIMP void __cdecl wait(unsigned int _Milliseconds); /// /// Allocates a block of memory of the size specified from the Concurrency Runtime Caching Suballocator. /// /// /// The number of bytes of memory to allocate. /// /// /// A pointer to newly allocated memory. /// /// /// For more information about which scenarios in your application could benefit from using the Caching Suballocator, /// see . /// /// /**/ _CRTIMP void * __cdecl Alloc(size_t _NumBytes); /// /// Releases a block of memory previously allocated by the Alloc method to the Concurrency Runtime Caching Suballocator. /// /// /// A pointer to memory previously allocated by the Alloc method which is to be freed. If the parameter /// is set to the value NULL, this method will ignore it and return immediately. /// /// /// For more information about which scenarios in your application could benefit from using the Caching Suballocator, /// see . /// /// /**/ _CRTIMP void __cdecl Free(_Pre_maybenull_ _Post_invalid_ void * _PAllocation); /// /// Concurrency::details contains definitions of support routines in the public namespaces and one or more macros. /// Users should not directly interact with this internal namespace. /// /**/ #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP /// /// Restricts the execution resources used by the Concurrency Runtime internal worker threads to the affinity set specified. /// It is valid to call this method only before the Resource Manager has been created, or between two Resource Manager lifetimes. /// It can be invoked multiple times as long as the Resource Manager does not exist at the time of invocation. After an affinity limit /// has been set, it remains in effect until the next valid call to the set_task_execution_resources method. /// The affinity mask provided need not be a subset of the process affinity mask. The process affinity will be updated if necessary. /// /// /// The affinity mask that the Concurrency Runtime worker threads are to be restricted to. Use this method on a system with greater than 64 /// hardware threads only if you want to limit the Concurrency Runtime to a subset of the current processor group. In general, you should /// use the version of the method that accepts an array of group affinities as a parameter, to restrict affinity on machines with greater /// than 64 hardware threads. /// /// /// The method will throw an invalid_operation exception if a Resource Manager is present at /// the time it is invoked, and an invalid_argument exception if the affinity specified results in an empty set of resources. /// The version of the method that takes an array of group affinities as a parameter should only be used on operating systems with version /// Windows 7 or higher. Otherwise, an invalid_operation exception is thrown. /// Programatically modifying the process affinity after this method has been invoked will not cause the Resource Manager to re-evaluate /// the affinity it is restricted to. Therefore, all changes to process affinity should be made before calling this method. /// /**/ _CRTIMP void __cdecl set_task_execution_resources(DWORD_PTR _ProcessAffinityMask); /// /// Restricts the execution resources used by the Concurrency Runtime internal worker threads to the affinity set specified. /// It is valid to call this method only before the Resource Manager has been created, or between two Resource Manager lifetimes. /// It can be invoked multiple times as long as the Resource Manager does not exist at the time of invocation. After an affinity limit /// has been set, it remains in effect until the next valid call to the set_task_execution_resources method. /// The affinity mask provided need not be a subset of the process affinity mask. The process affinity will be updated if necessary. /// /// /// The number of GROUP_AFFINITY entries in the array specified by the parameter . /// /// /// An array of GROUP_AFFINITY entries. /// /// /// The method will throw an invalid_operation exception if a Resource Manager is present at /// the time it is invoked, and an invalid_argument exception if the affinity specified results in an empty set of resources. /// The version of the method that takes an array of group affinities as a parameter should only be used on operating systems with version /// Windows 7 or higher. Otherwise, an invalid_operation exception is thrown. /// Programatically modifying the process affinity after this method has been invoked will not cause the Resource Manager to re-evaluate /// the affinity it is restricted to. Therefore, all changes to process affinity should be made before calling this method. /// /**/ _CRTIMP void __cdecl set_task_execution_resources(unsigned short _Count, PGROUP_AFFINITY _PGroupAffinity); #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */ /// /// An elementary abstraction for a task, defined as void (__cdecl * TaskProc)(void *). A TaskProc is called to /// invoke the body of a task. /// /**/ typedef void (__cdecl * TaskProc)(void *); // // Forward declarations: // class Scheduler; class ScheduleGroup; class Context; namespace details { // // Forward declarations: // class ContextBase; class _TaskCollectionBase; // // A utility to hide operator delete from certain objects while still allowing the runtime to delete them internally. // template void _InternalDeleteHelper(_T * _PObject) { delete _PObject; } // The purpose of the class is solely to direct allocations of ConcRT classes // through a single point, using an internal allocator. struct _AllocBase { // Standard operator new void * operator new(size_t _Size) { return Concurrency::Alloc(_Size); } // Standard operator delete void operator delete(void * _Ptr) throw() { Concurrency::Free(_Ptr); } // Standard operator new, no-throw version void * operator new(size_t _Size, const std::nothrow_t&) throw() { void * _Ptr; try { _Ptr = Concurrency::Alloc(_Size); } catch(...) { _Ptr = NULL; } return (_Ptr); } // Standard operator delete, no-throw version void operator delete(void * _Ptr, const std::nothrow_t&) throw() { operator delete(_Ptr); } // Standard operator new array void * operator new[](size_t _Size) { return operator new(_Size); } // Standard operator delete array void operator delete[](void * _Ptr) throw() { operator delete(_Ptr); } // Standard operator new array, no-throw version void * operator new[](size_t _Size, const std::nothrow_t& _No_throw) throw () { return operator new(_Size, _No_throw); } // Standard operator delete array, no-throw version void operator delete[](void * _Ptr, const std::nothrow_t& _No_throw) throw() { operator delete(_Ptr, _No_throw); } // Standard operator new with void* placement void * operator new(size_t, void * _Location) throw() { return _Location; } // Standard operator delete with void* placement void operator delete(void *, void *) throw() { } // Standard operator new array with void* placement void * __cdecl operator new[](size_t, void * _Location) throw() { return _Location; } // Standard operator delete array with void* placement void __cdecl operator delete[](void *, void *) throw() { } }; // Stubs to allow the header files to access runtime functionality for WINAPI_PARTITION apps. class _Context { public: _CRTIMP _Context(::Concurrency::Context * _PContext = NULL) : _M_pContext(_PContext) {} _CRTIMP static _Context __cdecl _CurrentContext(); _CRTIMP static void __cdecl _Yield(); _CRTIMP static void __cdecl _Oversubscribe(bool _BeginOversubscription); _CRTIMP bool _IsSynchronouslyBlocked() const; private: ::Concurrency::Context * _M_pContext; }; class _Scheduler { public: _CRTIMP _Scheduler(::Concurrency::Scheduler * _PScheduler = NULL) : _M_pScheduler(_PScheduler) {} _CRTIMP unsigned int _Reference(); _CRTIMP unsigned int _Release(); _CRTIMP Concurrency::Scheduler * _GetScheduler() { return _M_pScheduler; } private: ::Concurrency::Scheduler * _M_pScheduler; }; class _CurrentScheduler { public: _CRTIMP static void __cdecl _ScheduleTask(TaskProc _Proc, void * _Data); _CRTIMP static unsigned int __cdecl _Id(); _CRTIMP static unsigned int __cdecl _GetNumberOfVirtualProcessors(); _CRTIMP static _Scheduler __cdecl _Get(); }; // // Wrappers for atomic access // template struct _Subatomic_impl { }; template<> struct _Subatomic_impl<4> { template static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) { // For the compiler, a volatile write has release semantics. In addition, on ARM, // the volatile write will emit a data memory barrier before the write. _Location = _Rhs; } template static _Ty _LoadWithAquire(volatile _Ty& _Location) { // For the compiler, a volatile read has acquire semantics. In addition, on ARM, // the volatile read will emit a data memory barrier after the read. return _Location; } template static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) { return (_Ty)_InterlockedCompareExchange((volatile long*)&_Location, (long)_NewValue, (long)_Comperand); } template static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) { return (_Ty)_InterlockedExchangeAdd((volatile long*)&_Location, (long)_Addend); } template static _Ty _Increment(volatile _Ty& _Location) { return (_Ty)_InterlockedIncrement((volatile long*)&_Location); } template static _Ty _Decrement(volatile _Ty& _Location) { return (_Ty)_InterlockedDecrement((volatile long*)&_Location); } }; #if defined (_M_X64) template<> struct _Subatomic_impl<8> { template static void _StoreWithRelease(volatile _Ty& _Location, _Ty _Rhs) { // For the compiler, a volatile write has release semantics. _Location = _Rhs; } template static _Ty _LoadWithAquire(volatile _Ty& _Location) { // For the compiler, a volatile read has acquire semantics. return _Location; } template static _Ty _CompareAndSwap(volatile _Ty& _Location, _Ty _NewValue, _Ty _Comperand) { return (_Ty)_InterlockedCompareExchange64((volatile __int64*)&_Location, (__int64)_NewValue, (__int64)_Comperand); } template static _Ty _FetchAndAdd(volatile _Ty& _Location, _Ty _Addend) { return (_Ty)_InterlockedExchangeAdd64((volatile __int64*)&_Location, (__int64)_Addend); } template static _Ty _Increment(volatile _Ty& _Location) { return (_Ty)_InterlockedIncrement64((volatile __int64*)&_Location); } template static _Ty _Decrement(volatile _Ty& _Location) { return (_Ty)_InterlockedDecrement64((volatile __int64*)&_Location); } }; #endif /* defined (_M_X64) */ // // Wrapper for atomic access. Only works for 4-byte or 8-byte types (for example, int, long, long long, size_t, pointer). // Anything else might fail to compile. // template class _Subatomic { private: volatile _Ty _M_value; public: operator _Ty() const volatile { return _Subatomic_impl::_LoadWithAquire(_M_value); } _Ty operator=(_Ty _Rhs) { _Subatomic_impl::_StoreWithRelease(_M_value, _Rhs); return _Rhs; } _Ty _CompareAndSwap(_Ty _NewValue, _Ty _Comperand) { return _Subatomic_impl::_CompareAndSwap(_M_value, _NewValue, _Comperand); } _Ty _FetchAndAdd(_Ty _Addend) { return _Subatomic_impl::_FetchAndAdd(_M_value, _Addend); } _Ty operator++() { return _Subatomic_impl::_Increment(_M_value); } _Ty operator++(int) { return _Subatomic_impl::_Increment(_M_value) - 1; } _Ty operator--() { return _Subatomic_impl::_Decrement(_M_value); } _Ty operator--(int) { return _Subatomic_impl::_Decrement(_M_value) + 1; } _Ty operator+=(_Ty _Addend) { return _FetchAndAdd(_Addend) + _Addend; } }; // // An internal exception that is used for cancellation. Users do not "see" this exception except through the // resulting stack unwind. This exception should never be intercepted by user code. It is intended // for use by the runtime only. // class _Interruption_exception : public std::exception { public: explicit _CRTIMP _Interruption_exception(const char * _Message) throw(); _CRTIMP _Interruption_exception() throw(); }; // // An RAII class that spin-waits on a "rented" flag. // class _SpinLock { private: volatile long& _M_flag; public: _CRTIMP _SpinLock(volatile long& _Flag); _CRTIMP ~_SpinLock(); private: _SpinLock(const _SpinLock&); void operator=(const _SpinLock&); }; // // A class that holds the count used for spinning and is dependent // on the number of hardware threads // struct _SpinCount { // Initializes the spinCount to either 0 or SPIN_COUNT, depending on // the number of hardware threads. static void __cdecl _Initialize(); // Returns the current value of s_spinCount _CRTIMP static unsigned int __cdecl _Value(); // The number of iterations used for spinning static unsigned int _S_spinCount; }; /// /// Default method for yielding during a spin wait /// /**/ void _CRTIMP __cdecl _UnderlyingYield(); /// /// Returns the hardware concurrency available to the Concurrency Runtime, taking into account process affinity, or any restrictions /// in place because of the set_task_execution_resources method. /// /**/ unsigned int _CRTIMP __cdecl _GetConcurrency(); /// /// Implements busy wait with no backoff /// /**/ template class _CRTIMP _SpinWait { public: typedef void (__cdecl *_YieldFunction)(); /// /// Construct a spin wait object /// /**/ _SpinWait(_YieldFunction _YieldMethod = _UnderlyingYield) : _M_yieldFunction(_YieldMethod), _M_state(_StateInitial) { // Defer initialization of other fields to _SpinOnce(). } /// /// Set a dynamic spin count. /// /**/ void _SetSpinCount(unsigned int _Count) { _CONCRT_ASSERT(_M_state == _StateInitial); if (_Count == 0) { // Specify a count of 0 if we are on a single proc. _M_state = _StateSingle; } else { _M_currentSpin = _Count; _M_currentYield = _YieldCount; _M_state = _StateSpin; } } /// /// Spins for one time quantum,until a maximum spin is reached. /// /// /// false if spin count has reached steady state, true otherwise. /// /// /// If the spin count is not changing do not spin again /// because there is either only one processor, or the maximum spin has been reached and blocking is /// probably a better solution. However, if called again, SpinOnce will spin for a maximum spin count. /// /**/ bool _SpinOnce() { switch (_M_state) { case _StateSpin: { unsigned long _Count = _NumberOfSpins(); for (unsigned long _I = 0; _I < _Count; _I++) { _YieldProcessor(); } if (!_ShouldSpinAgain()) { _M_state = (_M_currentYield == 0) ? _StateBlock : _StateYield; } return true; } case _StateYield: _CONCRT_ASSERT(_M_currentYield > 0); if (--_M_currentYield == 0) { _M_state = _StateBlock; } // Execute the yield _DoYield(); return true; case _StateBlock: // Reset to defaults if client does not block _Reset(); return false; case _StateSingle: // No need to spin on a single processor: just execute the yield _DoYield(); return false; case _StateInitial: // Reset counters to their default value and Spin once. _Reset(); return _SpinOnce(); default: // Unreached return false; }; } protected: /// /// State of the spin wait class. /// /**/ enum _SpinState { _StateInitial, _StateSpin, _StateYield, _StateBlock, _StateSingle }; /// /// Yields its time slice using the specified yieldFunciton /// /**/ void _DoYield() { bool _ShouldYield = (_YieldCount != 0); if (_ShouldYield) { _CONCRT_ASSERT(_M_yieldFunction != NULL); _M_yieldFunction(); } else { _YieldProcessor(); } } /// /// Resets the counts and state to the default. /// /**/ void _Reset() { _M_state = _StateInitial; // Reset to the default spin value. The value specified // by the client is ignored on a reset. _SetSpinCount(_SpinCount::_Value()); _CONCRT_ASSERT(_M_state != _StateInitial); } /// /// Determines the current spin count /// /// /// The number of spins to execute for this iteration /// /**/ unsigned long _NumberOfSpins() { return 1; } /// /// Determines whether maximum spin has been reached /// /// /// false if spin count has reached steady state, true otherwise. /// /**/ bool _ShouldSpinAgain() { return (--_M_currentSpin > 0); } unsigned long _M_currentSpin; unsigned long _M_currentYield; _SpinState _M_state; _YieldFunction _M_yieldFunction; }; typedef _SpinWait<> _SpinWaitBackoffNone; typedef _SpinWait<0> _SpinWaitNoYield; // // This reentrant lock uses CRITICAL_SECTION and is intended for use when kernel blocking // is desirable and where it is either known that the lock will be taken recursively in // the same thread, or not known that a non-reentrant lock can be used safely. // class _ReentrantBlockingLock { public: // Constructor for _ReentrantBlockingLock _CRTIMP _ReentrantBlockingLock(); // Destructor for _ReentrantBlockingLock _CRTIMP ~_ReentrantBlockingLock(); // Acquire the lock, spin if necessary _CRTIMP void _Acquire(); // Tries to acquire the lock, does not spin. // Returns true if the acquisition worked, false otherwise _CRTIMP bool _TryAcquire(); // Releases the lock _CRTIMP void _Release(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the specified lock explicit _Scoped_lock(_ReentrantBlockingLock& _Lock) : _M_lock(_Lock) { _M_lock._Acquire(); } // Destroys the holder and releases the lock ~_Scoped_lock() { _M_lock._Release(); } private: _ReentrantBlockingLock& _M_lock; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; private: // Critical section requires windows.h. Hide the implementation so that // user code need not include windows. _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; }; // // This reentrant lock is a pure spin lock and is intended for use when kernel blocking // is desirable and where it is either known that the lock will be taken recursively in // the same thread, or not known that a non-reentrant lock can be used safely. // class _ReentrantLock { public: // Constructor for _ReentrantLock _CRTIMP _ReentrantLock(); // Acquire the lock, spin if necessary _CRTIMP void _Acquire(); // Tries to acquire the lock, does not spin // Returns true if the acquisition worked, false otherwise _CRTIMP bool _TryAcquire(); // Releases the lock _CRTIMP void _Release(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the specified lock explicit _Scoped_lock(_ReentrantLock& _Lock) : _M_lock(_Lock) { _M_lock._Acquire(); } // Destroys the holder and releases the lock ~_Scoped_lock() { _M_lock._Release(); } private: _ReentrantLock& _M_lock; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; private: long _M_recursionCount; volatile long _M_owner; }; // // This non-reentrant lock uses CRITICAL_SECTION and is intended for use in situations // where it is known that the lock will not be taken recursively, and can be more // efficiently implemented. // class _NonReentrantBlockingLock { public: // Constructor for _NonReentrantBlockingLock // // The constructor is exported because _NonReentrantLock is // included in DevUnitTests. _CRTIMP _NonReentrantBlockingLock(); // Constructor for _NonReentrantBlockingLock _CRTIMP ~_NonReentrantBlockingLock(); // Acquire the lock, spin if necessary _CRTIMP void _Acquire(); // Tries to acquire the lock, does not spin // Returns true if the lock is taken, false otherwise _CRTIMP bool _TryAcquire(); // Releases the lock _CRTIMP void _Release(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the specified lock explicit _Scoped_lock(_NonReentrantBlockingLock& _Lock) : _M_lock(_Lock) { _M_lock._Acquire(); } // Destroys the holder and releases the lock ~_Scoped_lock() { _M_lock._Release(); } private: _NonReentrantBlockingLock& _M_lock; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; private: // Critical section requires windows.h. Hide the implementation so that // user code need not include windows.h _CONCRT_BUFFER _M_criticalSection[(4 * sizeof(void *) + 2 * sizeof(long) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; }; // // A Reader-Writer Lock is intended for use in situations with many readers and rare // writers. // // A writer request immediately blocks future readers and then waits until all current // readers drain. A reader request does not block future writers and must wait until // all writers are done, even those that cut in front of it. In any race between a // reader and a writer, the writer always wins. // class _ReaderWriterLock { public: // Constructor for _ReaderWriterLock // // The constructor and destructor are exported because _ReaderWriterLock is // included in DevUnitTests. _CRTIMP _ReaderWriterLock(); // Acquire lock for reading. Spins until all writers finish, new writers // can cut in front of a waiting reader. _CRTIMP void _AcquireRead(); // Release lock for reading. The last reader changes m_state to State.kFree _CRTIMP void _ReleaseRead(); // Acquire lock for writing. Spin until no readers exist, then acquire lock // and prevent new readers. _CRTIMP void _AcquireWrite(); // Release lock for writing. _CRTIMP void _ReleaseWrite(); // Try to acquire the write lock, do not spin if unable to acquire. // Returns true if the acquisition worked, false otherwise _CRTIMP bool _TryAcquireWrite(); // Returns true if it is in write state, false otherwise bool _HasWriteLock() const { return (_M_state == _Write); } // Guarantees that all writers are out of the lock. This does nothing if there are no pending writers. void _FlushWriteOwners(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the writer lock explicit _Scoped_lock(_ReaderWriterLock& _Lock) : _M_lock(_Lock) { _M_lock._AcquireWrite(); } // Destroys the holder and releases the writer lock ~_Scoped_lock() { _M_lock._ReleaseWrite(); } private: _ReaderWriterLock& _M_lock; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; // An exception safe RAII wrapper for reads. class _Scoped_lock_read { public: // Constructs a holder and acquires the reader lock explicit _Scoped_lock_read(_ReaderWriterLock& _Lock) : _M_lock(_Lock) { _M_lock._AcquireRead(); } // Destroys the holder and releases the reader lock ~_Scoped_lock_read() { _M_lock._ReleaseRead(); } private: _ReaderWriterLock& _M_lock; _Scoped_lock_read(const _Scoped_lock_read&); // no copy constructor _Scoped_lock_read const & operator=(const _Scoped_lock_read&); // no assignment operator }; private: // State enum where: // -1 --> write mode // 0 --> free // n > 0 --> n readers have locked in read mode. enum _State { _Write = -1, _Free = 0, _Read = 1 }; // The current state of the lock, mapping to the State enum. This is also // an indicator of the number of readers holding the lock, for any number > 0. volatile long _M_state; // A writer increments this as soon as it wants to lock and decrements this // after releasing the lock. To prevent writers from starving, a reader will // wait until this counter is zero, and only then will try to obtain the lock. volatile long _M_numberOfWriters; // Spin-Wait-Until variant static void __cdecl _WaitEquals(volatile const long& _Location, long _Value, long _Mask = 0xFFFFFFFF); }; // // Exception safe RAII wrappers for _malloca() // // // _MallocaArrayHolder is used when the allocation size is known up front, and the memory must be allocated in a contiguous space // template class _MallocaArrayHolder { public: _MallocaArrayHolder() : _M_ElemArray(NULL), _M_ElemsConstructed(0) {} // _Initialize takes the pointer to the memory allocated by the user using _malloca void _Initialize(_ElemType * _Elem) { // The object must be initialized exactly once _CONCRT_ASSERT(_M_ElemArray == NULL && _M_ElemsConstructed == 0); _M_ElemArray = _Elem; _M_ElemsConstructed = 0; } // _InitOnRawMalloca take the raw pointer returned by _malloca directly // It will initialize itself with that pointer and return a strong typed pointer. // To be noted that the constructor will NOT be called. _ElemType * _InitOnRawMalloca(void * _MallocaRet) { if (_MallocaRet == nullptr) throw std::bad_alloc(); _Initialize(static_cast<_ElemType *>(_MallocaRet)); return static_cast<_ElemType *>(_MallocaRet); } // Register the next slot for destruction. Because we only keep the index of the last slot to be destructed, // this method must be called sequentially from 0 to N where N < _ElemCount. void _IncrementConstructedElemsCount() { _CONCRT_ASSERT(_M_ElemArray != NULL); // must already be initialized _M_ElemsConstructed++; } virtual ~_MallocaArrayHolder() { for( size_t _I=0; _I < _M_ElemsConstructed; ++_I ) { _M_ElemArray[_I]._ElemType::~_ElemType(); } // Works even when object was not initialized, that is, _M_ElemArray == NULL _freea(_M_ElemArray); } private: _ElemType * _M_ElemArray; size_t _M_ElemsConstructed; // Copy construction and assignment are not supported. _MallocaArrayHolder(const _MallocaArrayHolder & ); _MallocaArrayHolder& operator = (const _MallocaArrayHolder & ); }; // // _MallocaListHolder is used when the allocation size is not known up front, and the elements are added to the list dynamically // template class _MallocaListHolder { public: // Returns the size required to allocate the payload itself and the pointer to the next element size_t _GetAllocationSize() const { return sizeof(_ElemNodeType); } _MallocaListHolder() : _M_FirstNode(NULL) { } // Add the next element to the list. The memory is allocated in the caller's frame by _malloca void _AddNode(_ElemType * _Elem) { _ElemNodeType * _Node = reinterpret_cast<_ElemNodeType *>(_Elem); _Node->_M_Next = _M_FirstNode; _M_FirstNode = reinterpret_cast<_ElemNodeType *>(_Elem); } // _AddRawMallocaNode take the raw pointer returned by _malloca directly // It will add that bucket of memory to the list and return a strong typed pointer. // To be noted that the constructor will NOT be called. _ElemType * _AddRawMallocaNode(void * _MallocaRet) { if (_MallocaRet == nullptr) throw std::bad_alloc(); _AddNode(static_cast<_ElemType *>(_MallocaRet)); return static_cast<_ElemType *>(_MallocaRet); } // Walk the list and destruct, then free each element _At_(this->_M_FirstNode, _Pre_valid_) virtual ~_MallocaListHolder() { for( _ElemNodeType * _Node = _M_FirstNode; _Node != NULL; ) { auto _M_Next = _Node->_M_Next; _Node->_M_Elem._ElemType::~_ElemType(); _freea(_Node); _Node = _M_Next; } } private: class _ElemNodeType { friend class _MallocaListHolder; _ElemType _M_Elem; _ElemNodeType * _M_Next; // Always instantiated using malloc, so default constructor and destructor are not needed. _ElemNodeType(); ~_ElemNodeType(); // Copy construction and assignment are not supported. _ElemNodeType(const _ElemNodeType & ); _ElemNodeType & operator = (const _ElemNodeType & ); }; _ElemNodeType* _M_FirstNode; // Copy construction and assignment are not supported. _MallocaListHolder(const _MallocaListHolder & ); _MallocaListHolder & operator = (const _MallocaListHolder & ); }; // Forward declarations class _StructuredTaskCollection; class _TaskCollection; class _UnrealizedChore; } // namespace details //************************************************************************** // Public Namespace: // // Anything in the Concurrency namespace is intended for direct client consumption. // //************************************************************************** /// /// This class describes an exception thrown because of a failure to acquire a critical resource in the Concurrency Runtime. /// /// /// This exception is typically thrown when a call to the operating system from within the Concurrency Runtime /// fails. The error code which would normally be returned from a call to the Win32 method GetLastError is /// converted to a value of type HRESULT and can be retrieved using the get_error_code method. /// /**/ class scheduler_resource_allocation_error : public std::exception { public: /// /// Constructs a scheduler_resource_allocation_error object. /// /// /// A descriptive message of the error. /// /// /// The HRESULT value of the error that caused the exception. /// /**/ _CRTIMP scheduler_resource_allocation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw(); /// /// Constructs a scheduler_resource_allocation_error object. /// /// /// The HRESULT value of the error that caused the exception. /// /**/ explicit _CRTIMP scheduler_resource_allocation_error(HRESULT _Hresult) throw(); /// /// Returns the error code that caused the exception. /// /// /// The HRESULT value of the error that caused the exception. /// /**/ _CRTIMP HRESULT get_error_code() const throw(); private: HRESULT _Hresult; }; /// /// This class describes an exception thrown because of a failure to create a worker execution context in the Concurrency Runtime. /// /// /// This exception is typically thrown when a call to the operating system to create execution contexts from within the Concurrency Runtime /// fails. Execution contexts are threads that execute tasks in the Concurrency Runtime. The error code which would normally be returned /// from a call to the Win32 method GetLastError is converted to a value of type HRESULT and can be retrieved using the base /// class method get_error_code. /// /**/ class scheduler_worker_creation_error : public scheduler_resource_allocation_error { public: /// /// Constructs a scheduler_worker_creation_error object. /// /// /// A descriptive message of the error. /// /// /// The HRESULT value of the error that caused the exception. /// /**/ _CRTIMP scheduler_worker_creation_error(_In_z_ const char * _Message, HRESULT _Hresult) throw(); /// /// Constructs a scheduler_worker_creation_error object. /// /// /// The HRESULT value of the error that caused the exception. /// /**/ explicit _CRTIMP scheduler_worker_creation_error(HRESULT _Hresult) throw(); }; /// /// This class describes an exception thrown when an unsupported operating system is used. /// /**/ class unsupported_os : public std::exception { public: /// /// Constructs an unsupported_os object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP unsupported_os(_In_z_ const char * _Message) throw(); /// /// Constructs an unsupported_os object. /// /**/ _CRTIMP unsupported_os() throw(); }; /// /// This class describes an exception thrown when an operation is performed which requires a scheduler /// to be attached to the current context and one is not. /// /// /// /**/ class scheduler_not_attached : public std::exception { public: /// /// Constructs a scheduler_not_attached object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP scheduler_not_attached(_In_z_ const char * _Message) throw(); /// /// Constructs a scheduler_not_attached object. /// /**/ _CRTIMP scheduler_not_attached() throw(); }; /// /// This class describes an exception thrown when the Attach method is called on a Scheduler /// object which is already attached to the current context. /// /// /// /**/ class improper_scheduler_attach : public std::exception { public: /// /// Constructs an improper_scheduler_attach object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP improper_scheduler_attach(_In_z_ const char * _Message) throw(); /// /// Constructs an improper_scheduler_attach object. /// /**/ _CRTIMP improper_scheduler_attach() throw(); }; /// /// This class describes an exception thrown when the CurrentScheduler::Detach method is called on /// a context which has not been attached to any scheduler using the Attach method of a Scheduler object. /// /// /// /// /**/ class improper_scheduler_detach : public std::exception { public: /// /// Constructs an improper_scheduler_detach object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP improper_scheduler_detach(_In_z_ const char * _Message) throw(); /// /// Constructs an improper_scheduler_detach object. /// /**/ _CRTIMP improper_scheduler_detach() throw(); }; /// /// This class describes an exception thrown when the Reference method is called on a Scheduler /// object that is shutting down, from a context that is not part of that scheduler. /// /// /// /**/ class improper_scheduler_reference : public std::exception { public: /// /// Constructs an improper_scheduler_reference object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP improper_scheduler_reference(_In_z_ const char* _Message) throw(); /// /// Constructs an improper_scheduler_reference object. /// /**/ _CRTIMP improper_scheduler_reference() throw(); }; /// /// This class describes an exception thrown when the Scheduler::SetDefaultSchedulerPolicy method is /// called when a default scheduler already exists within the process. /// /// /**/ class default_scheduler_exists : public std::exception { public: /// /// Constructs a default_scheduler_exists object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP default_scheduler_exists(_In_z_ const char * _Message) throw(); /// /// Constructs a default_scheduler_exists object. /// /**/ _CRTIMP default_scheduler_exists() throw(); }; /// /// This class describes an exception thrown when calls to the Block and Unblock methods of a /// Context object are not properly paired. /// /// /// Calls to the Block and Unblock methods of a Context object must always be properly paired. /// The Concurrency Runtime allows the operations to happen in either order. For example, a call to Block /// can be followed by a call to Unblock, or vice-versa. This exception would be thrown if, for instance, two calls to the /// Unblock method were made in a row, on a Context object which was not blocked. /// /// /// /// /**/ class context_unblock_unbalanced : public std::exception { public: /// /// Constructs a context_unblock_unbalanced object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP context_unblock_unbalanced(_In_z_ const char * _Message) throw(); /// /// Constructs a context_unblock_unbalanced object. /// /**/ _CRTIMP context_unblock_unbalanced() throw(); }; /// /// This class describes an exception thrown when the Unblock method of a Context object is called /// from the same context. This would indicate an attempt by a given context to unblock itself. /// /// /// /**/ class context_self_unblock : public std::exception { public: /// /// Constructs a context_self_unblock object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP context_self_unblock(_In_z_ const char * _Message) throw(); /// /// Constructs a context_self_unblock object. /// /**/ _CRTIMP context_self_unblock() throw(); }; /// /// This class describes an exception thrown when there are tasks still scheduled to a task_group or /// structured_task_group object at the time that object's destructor executes. This exception will never be thrown /// if the destructor is reached because of a stack unwinding as the result of an exception. /// /// /// Absent exception flow, you are responsible for calling either the wait or run_and_wait method of a task_group or /// structured_task_group object before allowing that object to destruct. The runtime throws this exception as an /// indication that you forgot to call the wait or run_and_wait method. /// /// /// /// /// /// /// /**/ class missing_wait : public std::exception { public: /// /// Constructs a missing_wait object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP missing_wait(_In_z_ const char * _Message) throw(); /// /// Constructs a missing_wait object. /// /**/ _CRTIMP missing_wait() throw(); }; /// /// This class describes an exception thrown when a messaging block is given a pointer to a target which is /// invalid for the operation being performed. /// /// /// This exception is typically thrown for reasons such as a target attempting to consume a message which is reserved /// for a different target or releasing a reservation that it does not hold. /// /// /**/ class bad_target : public std::exception { public: /// /// Constructs a bad_target object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP bad_target(_In_z_ const char * _Message) throw(); /// /// Constructs a bad_target object. /// /**/ _CRTIMP bad_target() throw(); }; /// /// This class describes an exception thrown when a messaging block is unable to find a requested message. /// /// /**/ class message_not_found : public std::exception { public: /// /// Constructs a message_not_found object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP message_not_found(_In_z_ const char * _Message) throw(); /// /// Constructs a message_not_found object. /// /**/ _CRTIMP message_not_found() throw(); }; /// /// This class describes an exception thrown when the link_target method of a messaging block is /// called and the messaging block is unable to link to the target. This can be the result of exceeding the number of /// links the messaging block is allowed or attempting to link a specific target twice to the same source. /// /// /**/ class invalid_link_target : public std::exception { public: /// /// Constructs an invalid_link_target object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_link_target(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_link_target object. /// /**/ _CRTIMP invalid_link_target() throw(); }; /// /// This class describes an exception thrown when an invalid or unknown key is passed to a SchedulerPolicy /// object constructor, or the SetPolicyValue method of a SchedulerPolicy object is passed a key that must /// be changed using other means such as the SetConcurrencyLimits method. /// /// /// /// /// /**/ class invalid_scheduler_policy_key : public std::exception { public: /// /// Constructs an invalid_scheduler_policy_key object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_scheduler_policy_key(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_scheduler_policy_key object. /// /**/ _CRTIMP invalid_scheduler_policy_key() throw(); }; /// /// This class describes an exception thrown when a policy key of a SchedulerPolicy object is /// set to an invalid value for that key. /// /// /// /// /// /**/ class invalid_scheduler_policy_value : public std::exception { public: /// /// Constructs an invalid_scheduler_policy_value object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_scheduler_policy_value(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_scheduler_policy_value object. /// /**/ _CRTIMP invalid_scheduler_policy_value() throw(); }; /// /// This class describes an exception thrown when an attempt is made to set the concurrency limits of a /// SchedulerPolicy object such that the value of the MinConcurrency key is less than the value of the /// MaxConcurrency key. /// /// /// /// /**/ class invalid_scheduler_policy_thread_specification : public std::exception { public: /// /// Constructs an invalid_scheduler_policy_value object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_scheduler_policy_thread_specification(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_scheduler_policy_value object. /// /**/ _CRTIMP invalid_scheduler_policy_thread_specification() throw(); }; /// /// This class describes an exception thrown when an invalid operation is performed that is not more accurately /// described by another exception type thrown by the Concurrency Runtime. /// /// /// The various methods which throw this exception will generally document under what circumstances they will throw it. /// /**/ class invalid_operation : public std::exception { public: /// /// Constructs an invalid_operation object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_operation(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_operation object. /// /**/ _CRTIMP invalid_operation() throw(); }; /// /// This class describes an exception thrown when the Concurrency Runtime detects that you neglected to call the /// CurrentScheduler::Detach method on a context that attached to a second scheduler using the Attach method /// of the Scheduler object. /// /// /// This exception is thrown only when you nest one scheduler inside another by calling the Attach method of a /// Scheduler object on a context that is already owned by or attached to another scheduler. The Concurrency Runtime /// throws this exception opportunistically when it can detect the scenario as an aid to locating the problem. Not every /// instance of neglecting to call the CurrentScheduler::Detach method is guaranteed to throw this exception. /// /// /// /// /**/ class nested_scheduler_missing_detach : public std::exception { public: /// /// Constructs a nested_scheduler_missing_detach object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP nested_scheduler_missing_detach(_In_z_ const char * _Message) throw(); /// /// Constructs a nested_scheduler_missing_detach object. /// /**/ _CRTIMP nested_scheduler_missing_detach() throw(); }; /// /// This class describes an exception thrown when an operation has timed out. /// /**/ class operation_timed_out : public std::exception { public: /// /// Constructs an operation_timed_out object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP operation_timed_out(_In_z_ const char * _Message) throw(); /// /// Constructs an operation_timed_out object. /// /**/ _CRTIMP operation_timed_out() throw(); }; /// /// This class describes an exception thrown when a task_handle object is scheduled multiple times /// using the run method of a task_group or structured_task_group object without an intervening /// call to either the wait or run_and_wait methods. /// /// /// /// /// /// /// /// /// /// /**/ class invalid_multiple_scheduling : public std::exception { public: /// /// Constructs an invalid_multiple_scheduling object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_multiple_scheduling(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_multiple_scheduling object. /// /**/ _CRTIMP invalid_multiple_scheduling() throw(); }; /// /// This class describes an exception thrown when the Context::Oversubscribe method is called with /// the parameter set to false without a prior call to the /// Context::Oversubscribe method with the parameter set to true. /// /// /**/ class invalid_oversubscribe_operation : public std::exception { public: /// /// Constructs an invalid_oversubscribe_operation object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP invalid_oversubscribe_operation(_In_z_ const char * _Message) throw(); /// /// Constructs an invalid_oversubscribe_operation object. /// /**/ _CRTIMP invalid_oversubscribe_operation() throw(); }; /// /// This class describes an exception thrown when a lock is acquired improperly. /// /// /// Typically, this exception is thrown when an attempt is made to acquire a non-reentrant lock /// recursively on the same context. /// /// /// /**/ class improper_lock : public std::exception { public: /// /// Constructs an improper_lock exception. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP improper_lock(_In_z_ const char * _Message) throw(); /// /// Constructs an improper_lock exception. /// /**/ _CRTIMP improper_lock() throw(); }; /// /// This class describes an exception thrown by the PPL tasks layer in order to force the current task /// to cancel. It is also thrown by the get() method on task, for a /// canceled task. /// /// /// /**/ class task_canceled : public std::exception { public: /// /// Constructs a task_canceled object. /// /// /// A descriptive message of the error. /// /**/ explicit _CRTIMP task_canceled(_In_z_ const char * _Message) throw(); /// /// Constructs a task_canceled object. /// /**/ _CRTIMP task_canceled() throw(); }; /// /// An abstraction of a physical location on hardware. /// /**/ class location { public: /// /// Constructs a location object. /// /// /// A default constructed location represents the system as a whole. /// /**/ location() : _M_type(_System), _M_reserved(0), _M_pBinding(NULL), _M_ptr(NULL) { } /// /// Constructs a location object. /// /**/ location(const location& _Src) { _Assign(_Src); } #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP /// /// Returns a location object which represents a given NUMA node. /// /// /// The NUMA node number to construct a location for. /// /// /// A location representing the NUMA node specified by the parameter. /// /**/ _CRTIMP static location __cdecl from_numa_node(unsigned short _NumaNodeNumber); #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */ /// /// Returns a location object representing the most specific place the calling thread is executing. /// /// /// A location representing the most specific place the calling thread is executing. /// /**/ _CRTIMP static location __cdecl current(); /// /// Assigns the contents of a different location object to this one. /// /// /// The source location object. /// /**/ location& operator=(const location& _Rhs) { _Assign(_Rhs); return *this; } /// /// Destroys a location object. /// /**/ ~location() { } /// /// Determines whether two location objects represent the same location. /// /// /// true if the two locations are identical, and false otherwise. /// /**/ bool operator==(const location& _Rhs) const { return (_M_type == _Rhs._M_type && _M_ptr == _Rhs._M_ptr); } /// /// Determines whether two location objects represent different location. /// /// /// true if the two locations are different, false otherwise. /// /**/ bool operator!=(const location& _Rhs) const { return !operator==(_Rhs); } //************************************************** // // Runtime internal public pieces of location. No code outside the core of ConcRT can depend on anything // below. It is internal implementation detail: // /// /// Returns a location representing the scheduling node that the calling thread is executing. /// /**/ _CRTIMP static location __cdecl _Current_node(); /// /// Describes the type of the given location. /// /**/ enum _Type { /// /// Indicates that the location represents the "system location". This has no specific affinity. /// _System, // _M_id is meaningless /// /// Indicates that the location represents a particular NUMA node. /// _NumaNode, // _M_id is the Windows NUMA node number /// /// Indicates that the location represents a particular scheduling node. /// _SchedulingNode, // _M_id is the unique identifier for the scheduling node /// /// Indicates that the location represents a paritcular execution resource. /// _ExecutionResource, // _M_id is the unique identifier for the execution resource }; /// /// Constructs a specific location. /// /**/ location(_Type _LocationType, unsigned int _Id, unsigned int _BindingId = 0, _Inout_opt_ void *_PBinding = NULL); /// /// Determines whether two locations have an intersection. This is a fast intersection which avoids certain checks by knowing that /// the *this* pointer is a virtual processor location for a validly bound virtual processor. /// /// /// The location to intersect with this. /// /// /// An indication as to whether the two locations intersect. /// /**/ bool _FastVPIntersects(const location& _Rhs) const; /// /// Determines whether two locations have an intersection. This is a fast intersection which avoids certain checks by knowing that /// the *this* pointer is a node for a validly bound node. /// /// /// The location to intersect with this. /// /// /// An indication as to whether the two locations intersect. /// /**/ bool _FastNodeIntersects(const location& _Rhs) const; /// /// Assigns _Rhs to this location. /// /**/ void _Assign(const location& _Rhs) { _M_type = _Rhs._M_type; _M_reserved = _Rhs._M_reserved; _M_ptr = _Rhs._M_ptr; _M_bindingId = _Rhs._M_bindingId; _M_pBinding = _Rhs._M_pBinding; } /// /// Internal routine that tells whether a location represents the "system location". This indicates no specific placement. /// /**/ bool _Is_system() const { return (_Type)_M_type == _System; } /// /// Returns the internal binding as a specified object. /// /**/ template T* _As() const { return reinterpret_cast(_M_pBinding); } /// /// Returns the ID which this location object represents. /// /**/ unsigned int _GetId() const { return _M_id; } /// /// Returns the type which this location object represents. /// /**/ _Type _GetType() const { return (_Type)_M_type; } /// /// Gets the binding ID for this location. /// /**/ unsigned int _GetBindingId() const { return _M_bindingId; } private: // Indicates the type of location (as _Type) unsigned int _M_type : 28; // Flags on the location. Reserved for future use. unsigned int _M_reserved : 4; // If the location has a tight binding, this is the unique identifier of the scheduler to which the binding has specific meaning. unsigned int _M_bindingId; // Defines the agnostic (abstract hardware) binding of the location. union { // The identifier for the binding (NUMA node number, scheduler node ID, execution resource ID) unsigned int _M_id; // Pointer binding. void *_M_ptr; }; // The specific binding to a scheduler. (For example, a specific virtual processor for something like location::current() ) // This will be NULL if there is no tight binding. void *_M_pBinding; }; #ifdef _CRT_USE_WINAPI_FAMILY_DESKTOP_APP /// /// Represents an abstraction for a schedule group. Schedule groups organize a set of related work that benefits from being /// scheduled close together either temporally, by executing another task in the same group before moving to another group, or /// spatially, by executing multiple items within the same group on the same NUMA node or physical socket. /// /// /// /// /**/ class ScheduleGroup { public: /// /// Schedules a light-weight task within the schedule group. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// Calling the ScheduleTask method implicitly places a reference count on the schedule group which is removed by the runtime /// at an appropriate time after the task executes. /// /// /**/ virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0; /// /// Returns an identifier for the schedule group that is unique within the scheduler to which the group belongs. /// /// /// An identifier for the schedule group that is unique within the scheduler to which the group belongs. /// /**/ virtual unsigned int Id() const =0; /// /// Increments the schedule group reference count. /// /// /// The newly incremented reference count. /// /// /// This is typically used to manage the lifetime of the schedule group for composition. When the reference count of a schedule /// group falls to zero, the schedule group is deleted by the runtime. A schedule group created using either the /// CurrentScheduler::CreateScheduleGroup method, or the /// Scheduler::CreateScheduleGroup method starts out with a reference /// count of one. /// /// /// /// /**/ virtual unsigned int Reference() =0; /// /// Decrements the scheduler group reference count. /// /// /// The newly decremented reference count. /// /// /// This is typically used to manage the lifetime of the schedule group for composition. When the reference count of a schedule /// group falls to zero, the schedule group is deleted by the runtime. After you have called the Release method the specific number /// of times to remove the creation reference count and any additional references placed using the Reference method, you cannot /// utilize the schedule group further. Doing so will result in undefined behavior. /// A schedule group is associated with a particular scheduler instance. You must ensure that all references to the /// schedule group are released before all references to the scheduler are released, because the latter could result in the scheduler /// being destroyed. Doing otherwise results in undefined behavior. /// /// /// /// /**/ virtual unsigned int Release() =0; protected: // // Privatize operator delete. Clients should utilize Release to relinquish a schedule group. // template friend void Concurrency::details::_InternalDeleteHelper(_T * _PObject); virtual ~ScheduleGroup() {}; }; /// /// Special value for the policy keys MinConcurrency and MaxConcurrency. Defaults to the number of hardware /// threads on the machine in the absence of other constraints. /// /// /**/ const unsigned int MaxExecutionResources = 0xFFFFFFFF; /// /// Special value for the policy key ContextPriority indicating that the thread priority of all contexts in the scheduler /// should be the same as that of the thread which created the scheduler. /// /// /**/ const unsigned int INHERIT_THREAD_PRIORITY = 0x0000F000; /// /// Policy keys describing aspects of scheduler behavior. Each policy element is described by a key-value pair. For more information /// about scheduler policies and their impact on schedulers, see . /// /// /// /// /// /**/ enum PolicyElementKey { /// /// The type of threads that the scheduler will utilize for underlying execution contexts. For more information, see /// . /// Valid values : A member of the SchedulerType enumeration, for example, ThreadScheduler /// Default value : ThreadScheduler. This translates to Win32 threads on all operating systems. /// /**/ SchedulerKind, /// /// The maximum concurrency level desired by the scheduler. The resource manager will try to initially allocate this many virtual processors. /// The special value MaxExecutionResources indicates that the desired concurrency level /// is same as the number of hardware threads on the machine. If the value specified for MinConcurrency is greater than the number /// of hardware threads on the machine and MaxConcurrency is specified as MaxExecutionResources, the value for MaxConcurrency /// is raised to match what is set for MinConcurrency. /// Valid values : Positive integers and the special value MaxExecutionResources /// Default value : MaxExecutionResources /// /**/ MaxConcurrency, /// /// The minimum concurrency level that must be provided to the scheduler by the resource manager. The number of virtual processors assigned /// to a scheduler will never go below the minimum. The special value MaxExecutionResources /// indicates that the minimum concurrency level is same as the number of hardware threads on the machine. If the value specified for /// MaxConcurrency is less than the number of hardware threads on the machine and MinConcurrency is specified as /// MaxExecutionResources, the value for MinConcurrency is lowered to match what is set for MaxConcurrency. /// Valid values : Non-negative integers and the special value MaxExecutionResources. Note that for scheduler policies /// used for the construction of Concurrency Runtime schedulers, the value 0 is invalid. /// Default value : 1 /// /**/ MinConcurrency, /// /// Tentative number of virtual processors per hardware thread. The target oversubscription factor can be increased by the Resource Manager, /// if necessary, to satisfy MaxConcurrency with the hardware threads on the machine. /// Valid values : Positive integers /// Default value : 1 /// /**/ TargetOversubscriptionFactor, /// /// When the SchedulingProtocol policy key is set to the value EnhanceScheduleGroupLocality, this specifies the maximum number /// of runnable contexts allowed to be cached in per virtual processor local queues. Such contexts will typically run in last-in-first-out /// (LIFO) order on the virtual processor that caused them to become runnable. Note that this policy key has no meaning when the /// SchedulingProtocol key is set to the value EnhanceForwardProgress. /// Valid values : Non-negative integers /// Default value : 8 /// /**/ LocalContextCacheSize, /// /// The reserved stack size of each context in the scheduler in kilobytes. /// Valid values : Positive integers /// Default value : 0, indicating that the process' default value for stack size be used. /// /**/ ContextStackSize, /// /// The operating system thread priority of each context in the scheduler. If this key is set to the value /// INHERIT_THREAD_PRIORITY the contexts in the scheduler will inherit the priority of the thread that created the scheduler. /// Valid values : Any of the valid values for the Windows SetThreadPriority function and the special value /// INHERIT_THREAD_PRIORITY /// Default value : THREAD_PRIORITY_NORMAL /// /**/ ContextPriority, /// /// Describes which scheduling algorithm will be used by the scheduler. For more information, see . /// Valid values : A member of the SchedulingProtocolType enumeration, either EnhanceScheduleGroupLocality /// or EnhanceForwardProgress /// Default value : EnhanceScheduleGroupLocality /// /**/ SchedulingProtocol, /// /// Determines whether the resources for the scheduler will be rebalanced according to statistical information gathered from the /// scheduler or only based on the subscription level of underlying hardware threads. For more information, see /// . /// Valid values : A member of the DynamicProgressFeedbackType enumeration, either ProgressFeedbackEnabled or /// ProgressFeedbackDisabled /// Default value : ProgressFeedbackEnabled /// /**/ DynamicProgressFeedback, /// /// Determines whether and how scheduler threads will initialize the Windows Runtime. This policy key only carries meaning for applications /// executing on operating systems with version Windows 8 or higher. For more information, see . /// Valid values : A member of the WinRTInitializationType enumeration, either InitializeWinRTAsMTA or /// DoNotInitializeWinRT /// Default value : InitializeWinRTAsMTA /// /**/ WinRTInitialization, /// /// The maximum policy element key. Not a valid element key. /// /**/ MaxPolicyElementKey }; /// /// Used by the SchedulerKind policy to describe the type of threads that the scheduler should utilize for underlying execution contexts. /// For more information on available scheduler policies, see . /// /// /// /**/ enum SchedulerType { /// /// Indicates an explicit request of regular Win32 threads. /// /**/ ThreadScheduler, /// /// User-mode schedulable (UMS) threads are not supported in the Concurrency Runtime in Visual Studio 2012. Using UmsThreadDefault /// as a value for the SchedulerType policy will not result in an error. However, a scheduler created with that policy will /// default to using Win32 threads. /// /**/ UmsThreadDefault = ThreadScheduler }; #pragma deprecated(UmsThreadDefault) /// /// Used by the SchedulingProtocol policy to describe which scheduling algorithm will be utilized for the scheduler. For more /// information on available scheduler policies, see . /// /// /// /**/ enum SchedulingProtocolType { /// /// The scheduler prefers to continue to work on tasks within the current schedule group before moving to another schedule group. /// Unblocked contexts are cached per virtual-processor and are typically scheduled in a last-in-first-out (LIFO) fashion by the /// virtual processor which unblocked them. /// /**/ EnhanceScheduleGroupLocality, /// /// The scheduler prefers to round-robin through schedule groups after executing each task. Unblocked contexts are typically /// scheduled in a first-in-first-out (FIFO) fashion. Virtual processors do not cache unblocked contexts. /// /**/ EnhanceForwardProgress }; /// /// Used by the DynamicProgressFeedback policy to describe whether resources for the scheduler will be rebalanced according to /// statistical information gathered from the scheduler or only based on virtual processors going in and out of the idle state through /// calls to the Activate and Deactivate methods on the IVirtualProcessorRoot interface. For more information /// on available scheduler policies, see . /// /// /**/ enum DynamicProgressFeedbackType { /// /// The scheduler does not gather progress information. Rebalancing is done based solely on the subscription level of the underlying /// hardware thread. For more information on subscription levels, see /// IExecutionResource::CurrentSubscriptionLevel. /// This value is reserved for use by the runtime. /// /**/ ProgressFeedbackDisabled, /// /// The scheduler gathers progress information and passes it to the resource manager. The resource manager will utilize this statistical /// information to rebalance resources on behalf of the scheduler in addition to the subscription level of the underlying /// hardware thread. For more information on subscription levels, see /// IExecutionResource::CurrentSubscriptionLevel. /// /**/ ProgressFeedbackEnabled }; /// /// Used by the WinRTInitialization policy to describe whether and how the Windows Runtime will be initialized on scheduler threads /// for an application which runs on operating systems with version Windows 8 or higher. For more information on available scheduler policies, /// see . /// /// /**/ enum WinRTInitializationType { /// /// When the application is run on operating systems with version Windows 8 or higher, each thread within the scheduler will initialize the /// Windows Runtime and declare that it is part of the multithreaded apartment. /// /**/ InitializeWinRTAsMTA, /// /// When the application is run on operating systems with version Windows 8 or higher, threads within the scheduler will not initialize the /// Windows Runtime . /// /**/ DoNotInitializeWinRT }; /// /// The SchedulerPolicy class contains a set of key/value pairs, one for each policy element, that control the behavior of a /// scheduler instance. /// /// /// For more information about the policies which can be controlled using the SchedulerPolicy class, see /// . /// /// /// /// /// /**/ class SchedulerPolicy { public: /// /// Constructs a new scheduler policy and populates it with values for policy keys /// supported by Concurrency Runtime schedulers and the Resource Manager. /// /// /// The first constructor creates a new scheduler policy where all policies will be initialized to their default values. /// The second constructor creates a new scheduler policy that uses a named-parameter style of initialization. Values after /// the parameter are supplied as key/value pairs. Any policy key which is not specified in this /// constructor will have its default value. This constructor could throw the exceptions /// invalid_scheduler_policy_key, invalid_scheduler_policy_value or /// invalid_scheduler_policy_thread_specification. /// The third constructor is a copy constructor. Often, the most convenient way to define a new scheduler policy is to copy an /// existing policy and modify it using the SetPolicyValue or SetConcurrencyLimits methods. /// /// /// /// /// /**/ _CRTIMP SchedulerPolicy(); /// /// Constructs a new scheduler policy and populates it with values for policy keys /// supported by Concurrency Runtime schedulers and the Resource Manager. /// /// /// The number of key/value pairs that follow the parameter. /// /// /// The first constructor creates a new scheduler policy where all policies will be initialized to their default values. /// The second constructor creates a new scheduler policy that uses a named-parameter style of initialization. Values after /// the parameter are supplied as key/value pairs. Any policy key which is not specified in this /// constructor will have its default value. This constructor could throw the exceptions /// invalid_scheduler_policy_key, invalid_scheduler_policy_value or /// invalid_scheduler_policy_thread_specification. /// The third constructor is a copy constructor. Often, the most convenient way to define a new scheduler policy is to copy an /// existing policy and modify it using the SetPolicyValue or SetConcurrencyLimits methods. /// /// /// /// /// /**/ _CRTIMP SchedulerPolicy(size_t _PolicyKeyCount, ...); /// /// Constructs a new scheduler policy and populates it with values for policy keys /// supported by Concurrency Runtime schedulers and the Resource Manager. /// /// /// The source policy to copy. /// /// /// The first constructor creates a new scheduler policy where all policies will be initialized to their default values. /// The second constructor creates a new scheduler policy that uses a named-parameter style of initialization. Values after /// the parameter are supplied as key/value pairs. Any policy key which is not specified in this /// constructor will have its default value. This constructor could throw the exceptions /// invalid_scheduler_policy_key, invalid_scheduler_policy_value or /// invalid_scheduler_policy_thread_specification. /// The third constructor is a copy constructor. Often, the most convenient way to define a new scheduler policy is to copy an /// existing policy and modify it using the SetPolicyValue or SetConcurrencyLimits methods. /// /// /// /// /// /**/ _CRTIMP SchedulerPolicy(const SchedulerPolicy& _SrcPolicy); /// /// Assigns the scheduler policy from another scheduler policy. /// /// /// The policy to assign to this policy. /// /// /// A reference to the scheduler policy. /// /// /// Often, the most convenient way to define a new scheduler policy is to copy an existing policy and modify it using the /// SetPolicyValue or SetConcurrencyLimits methods. /// /// /// /// /**/ _CRTIMP SchedulerPolicy& operator=(const SchedulerPolicy& _RhsPolicy); /// /// Destroys a scheduler policy. /// /**/ _CRTIMP ~SchedulerPolicy(); /// /// Retrieves the value of the policy key supplied as the parameter. /// /// /// The policy key to retrieve a value for. /// /// /// If the key specified by the parameter is supported, the policy value for the key cast to an unsigned int. /// /// /// The method will throw invalid_scheduler_policy_key for an invalid policy key. /// /// /// /// /**/ _CRTIMP unsigned int GetPolicyValue(PolicyElementKey _Key) const; /// /// Sets the value of the policy key supplied as the parameter and returns the old value. /// /// /// The policy key to set a value for. /// /// /// The value to set the policy key to. /// /// /// If the key specified by the parameter is supported, the old policy value for the key cast to an unsigned int. /// /// /// The method will throw invalid_scheduler_policy_key for an invalid policy key /// or any policy key whose value cannot be set by the SetPolicyValue method. /// The method will throw invalid_scheduler_policy_value for a value that /// is not supported for the key specified by the parameter. /// Note that this method is not allowed to set the MinConcurrency or MaxConcurrency policies. To set these values, use /// the SetConcurrencyLimits method. /// /// /// /// /**/ _CRTIMP unsigned int SetPolicyValue(PolicyElementKey _Key, unsigned int _Value); /// /// Simultaneously sets the MinConcurrency and MaxConcurrency policies on the SchedulerPolicy object. /// /// /// The value for the MinConcurrency policy key. /// /// /// The value for the MaxConcurrency policy key. /// /// /// The method will throw invalid_scheduler_policy_thread_specification /// if the value specified for the MinConcurrency policy is greater than that specified for the MaxConcurrency policy. /// The method can also throw invalid_scheduler_policy_value for other /// invalid values. /// /// /// /// /**/ _CRTIMP void SetConcurrencyLimits(unsigned int _MinConcurrency, unsigned int _MaxConcurrency = MaxExecutionResources); /// /// Checks if this policy is a valid policy for a Concurrency Runtime scheduler. If it is not, an appropriate exception will be thrown. /// /// /// The method will throw invalid_scheduler_policy_value if a policy value supplied /// in the SchedulerPolicy object cannot be used to create a Concurrency Runtime scheduler. Note that such a policy is not necessarily /// invalid. The Concurrency Runtime Resource Manager also utilizes the SchedulerPolicy class to describe requirements. /// /**/ void _ValidateConcRTPolicy() const; private: struct _PolicyBag { union { unsigned int _M_pPolicyBag[MaxPolicyElementKey]; struct { SchedulerType _M_schedulerKind; unsigned int _M_maxConcurrency; unsigned int _M_minConcurrency; unsigned int _M_targetOversubscriptionFactor; unsigned int _M_localContextCacheSize; unsigned int _M_contextStackSize; unsigned int _M_contextPriority; SchedulingProtocolType _M_schedulingProtocol; DynamicProgressFeedbackType _M_dynamicProgressFeedback; WinRTInitializationType _M_WinRTInitialization; } _M_specificValues; } _M_values; } *_M_pPolicyBag; /// /// Initializes the scheduler policy. /// /**/ void _Initialize(size_t _PolicyKeyCount, va_list * _PArgs); /// /// Make this policy a copy of the source policy. /// /**/ void _Assign(const SchedulerPolicy& _SrcPolicy); /// /// Returns true if the key supplied is a supported key. /// /**/ static bool __cdecl _ValidPolicyKey(PolicyElementKey _Key); /// /// Returns true if a policy value is in a valid range. /// /**/ static bool __cdecl _ValidPolicyValue(PolicyElementKey _Key, unsigned int _Value); /// /// Returns true if concurrency limit combinations are valid. /// /**/ static bool __cdecl _AreConcurrencyLimitsValid(unsigned int _MinConcurrency, unsigned int _MaxConcurrency); bool _AreConcurrencyLimitsValid() const; /// /// Test the concurrency combinations of a policy. /// /**/ bool _ArePolicyCombinationsValid() const; /// /// Resolves one or more of the policy keys that are set to defaults, based on the characteristics of the underlying system. /// /**/ void _ResolvePolicyValues(); /// /// Stringify policy keys. /// /**/ static char * __cdecl _StringFromPolicyKey(unsigned int _Index); }; /// /// Represents an abstraction for the current scheduler associated with the calling context. /// /// /// If there is no scheduler (see Scheduler) associated with the calling context, many /// methods within the CurrentScheduler class will result in attachment of the process' default scheduler. This may /// also imply that the process' default scheduler is created during such a call. /// /// /// /// /**/ class CurrentScheduler { private: CurrentScheduler() {} public: /// /// Returns a unique identifier for the current scheduler. /// /// /// If a scheduler is associated with the calling context, a unique identifier for that scheduler; otherwise, the value -1. /// /// /// This method will not result in scheduler attachment if the calling context is not already associated with a scheduler. /// /**/ _CRTIMP static unsigned int __cdecl Id(); /// /// Returns a copy of the policy that the current scheduler was created with. /// /// /// A copy of the policy that that the current scheduler was created with. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /// /**/ _CRTIMP static SchedulerPolicy __cdecl GetPolicy(); /// /// Returns a pointer to the scheduler associated with the calling context, also referred to as the current scheduler. /// /// /// A pointer to the scheduler associated with the calling context (the current scheduler). /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. No additional reference is placed on the Scheduler object /// returned by this method. /// /**/ _CRTIMP static Scheduler * __cdecl Get(); /// /// Returns the current number of virtual processors for the scheduler associated with the calling context. /// /// /// If a scheduler is associated with the calling context, the current number of virtual processors for that scheduler; otherwise, /// the value -1. /// /// /// This method will not result in scheduler attachment if the calling context is not already associated with a scheduler. /// The return value from this method is an instantaneous sampling of the number of virtual processors for the scheduler associated /// with the calling context. This value can be stale the moment it is returned. /// /**/ _CRTIMP static unsigned int __cdecl GetNumberOfVirtualProcessors(); /// /// Creates a new scheduler whose behavior is described by the parameter and attaches it to the calling context. /// The newly created scheduler will become the current scheduler for the calling context. /// /// /// The scheduler policy that describes the behavior of the newly created scheduler. /// /// /// The attachment of the scheduler to the calling context implicitly places a reference count on the scheduler. /// After a scheduler is created with the Create method, you must call the /// CurrentScheduler::Detach method at some point in the future in order to allow the scheduler to shut down. /// If this method is called from a context that is already attached to a different scheduler, the existing scheduler is remembered /// as the previous scheduler, and the newly created scheduler becomes the current scheduler. When you call the CurrentScheduler::Detach /// method at a later point, the previous scheduler is restored as the current scheduler. /// This method can throw a variety of exceptions, including /// scheduler_resource_allocation_error and invalid_scheduler_policy_value. /// /// /// /// /// /// /**/ _CRTIMP static void __cdecl Create(const SchedulerPolicy& _Policy); /// /// Detaches the current scheduler from the calling context and restores the previously attached scheduler as the current /// scheduler, if one exists. After this method returns, the calling context is then managed by the scheduler that was previously /// attached to the context using either the CurrentScheduler::Create or Scheduler::Attach method. /// /// /// The Detach method implicitly removes a reference count from the scheduler. /// If there is no scheduler attached to the calling context, calling this method will result in a /// scheduler_not_attached exception being thrown. /// Calling this method from a context that is internal to and managed by a scheduler, or a context that was attached using /// a method other than the Scheduler::Attach or /// CurrentScheduler::Create methods, will result in an improper_scheduler_detach /// exception being thrown. /// /// /// /**/ _CRTIMP static void __cdecl Detach(); /// /// Causes the Windows event handle passed in the parameter to be signaled when the scheduler associated with /// the current context shuts down and destroys itself. At the time the event is signaled, all work that had been scheduled to the /// scheduler is complete. Multiple shutdown events can be registered through this method. /// /// /// A handle to a Windows event object which will be signaled by the runtime when the scheduler associated with the current context /// shuts down and destroys itself. /// /// /// If there is no scheduler attached to the calling context, calling this method will result in a /// scheduler_not_attached exception being thrown. /// /**/ _CRTIMP static void __cdecl RegisterShutdownEvent(HANDLE _ShutdownEvent); /// /// Creates a new schedule group within the scheduler associated with the calling context. The version that takes the parameter /// causes tasks within the newly created schedule group to be biased towards executing at the location /// specified by that parameter. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your reference on the scheduler, by detaching the current context from it. /// /// /// /// /// /**/ _CRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup(); /// /// Creates a new schedule group within the scheduler associated with the calling context. The version that takes the parameter /// causes tasks within the newly created schedule group to be biased towards executing at the location /// specified by that parameter. /// /// /// A reference to a location where the tasks within the schedule group will be biased towards executing at. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your reference on the scheduler, by detaching the current context from it. /// /// /// /// /// /**/ _CRTIMP static ScheduleGroup * __cdecl CreateScheduleGroup(location& _Placement); /// /// Schedules a light-weight task within the scheduler associated with the calling context. The light-weight task will be placed /// in a schedule group determined by the runtime. The version that takes the parameter causes the task /// to be biased towards executing at the specified location. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /// /// /// /**/ _CRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data); /// /// Schedules a light-weight task within the scheduler associated with the calling context. The light-weight task will be placed /// in a schedule group determined by the runtime. The version that takes the parameter causes the task /// to be biased towards executing at the specified location. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// A reference to a location where the light-weight task will be biased towards executing at. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /// /// /// /**/ _CRTIMP static void __cdecl ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement); /// /// Determines whether a given location is available on the current scheduler. /// /// /// A reference to the location to query the current scheduler about. /// /// /// An indication of whether or not the location specified by the argument is available on the current /// scheduler. /// /// /// This method will not result in scheduler attachment if the calling context is not already associated with a scheduler. /// Note that the return value is an instantaneous sampling of whether the given location is available. In the presence of multiple /// schedulers, dynamic resource management can add or take away resources from schedulers at any point. Should this happen, the given /// location can change availability. /// /**/ _CRTIMP static bool __cdecl IsAvailableLocation(const location& _Placement); }; /// /// Represents an abstraction for a Concurrency Runtime scheduler. /// /// /// The Concurrency Runtime scheduler uses execution contexts, which map to the operating system execution contexts, such as a thread, /// to execute the work queued to it by your application. At any time, the concurrency level of a scheduler is equal to the number of virtual processor /// granted to it by the Resource Manager. A virtual processor is an abstraction for a processing resource and maps to a hardware thread on the /// underlying system. Only a single scheduler context can execute on a virtual processor at a given time. /// The Concurrency Runtime will create a default scheduler per process to execute parallel work. In addition you can create your own scheduler /// instances and manipulate it using this class. /// /// /// /// /**/ class Scheduler { protected: /// /// An object of the Scheduler class can only created using factory methods, or implicitly. /// /// /// The process' default scheduler is created implicitly when you utilize many of the runtime functions which require a scheduler /// to be attached to the calling context. Methods within the CurrentScheduler class and features of the PPL and agents layers /// typically perform implicit attachment. /// You can also create a scheduler explicitly through either the CurrentScheduler::Create method or the Scheduler::Create /// method. /// /// /// /// /**/ Scheduler() {} /// /// An object of the Scheduler class is implicitly destroyed when all external references to it cease to exist. /// /**/ virtual ~Scheduler() {} public: /// /// Creates a new scheduler whose behavior is described by the parameter, places an initial reference on /// the scheduler, and returns a pointer to it. /// /// /// The scheduler policy that describes behavior of the newly created scheduler. /// /// /// A pointer to a newly created scheduler. This Scheduler object has an initial reference count placed on it. /// /// /// After a scheduler is created with the Create method, you must call the Release method at some point /// in the future in order to remove the initial reference count and allow the scheduler to shut down. /// A scheduler created with this method is not attached to the calling context. It can be attached to a context using the /// Attach method. /// This method can throw a variety of exceptions, including /// scheduler_resource_allocation_error and invalid_scheduler_policy_value. /// /// /// /// /// /// /**/ _CRTIMP static Scheduler * __cdecl Create(const SchedulerPolicy& _Policy); /// /// Returns a unique identifier for the scheduler. /// /// /// A unique identifier for the scheduler. /// /**/ virtual unsigned int Id() const =0; /// /// Returns the current number of virtual processors for the scheduler. /// /// /// The current number of virtual processors for the scheduler. /// The return value from this method is an instantaneous sampling of the number of virtual processors for the scheduler. /// This value can be stale the moment it is returned. /// /**/ virtual unsigned int GetNumberOfVirtualProcessors() const =0; /// /// Returns a copy of the policy that the scheduler was created with. /// /// /// A copy of the policy that the scheduler was created with. /// /// /// /// /**/ virtual SchedulerPolicy GetPolicy() const =0; /// /// Increments the scheduler reference count. /// /// /// The newly incremented reference count. /// /// /// This is typically used to manage the lifetime of the scheduler for composition. When the reference count of a scheduler /// falls to zero, the scheduler will shut down and destruct itself after all work on the scheduler has completed. /// The method will throw an improper_scheduler_reference exception if the reference /// count prior to calling the Reference method was zero and the call is made from a context that is not owned by the scheduler. /// /// /// /**/ virtual unsigned int Reference() =0 ; /// /// Decrements the scheduler reference count. /// /// /// The newly decremented reference count. /// /// /// This is typically used to manage the lifetime of the scheduler for composition. When the reference count of a scheduler /// falls to zero, the scheduler will shut down and destruct itself after all work on the scheduler has completed. /// /// /// /**/ virtual unsigned int Release() =0; /// /// Causes the Windows event handle passed in the parameter to be signaled when the scheduler /// shuts down and destroys itself. At the time the event is signaled, all work that had been scheduled to the /// scheduler is complete. Multiple shutdown events can be registered through this method. /// /// /// A handle to a Windows event object which will be signaled by the runtime when the scheduler shuts down and destroys itself. /// /**/ virtual void RegisterShutdownEvent(HANDLE _Event) =0; /// /// Attaches the scheduler to the calling context. After this method returns, the calling context is managed by the scheduler and /// the scheduler becomes the current scheduler. /// /// /// Attaching a scheduler implicitly places a reference on the scheduler. /// At some point in the future, you must call the CurrentScheduler::Detach /// method in order to allow the scheduler to shut down. /// If this method is called from a context that is already attached to a different scheduler, the existing scheduler is remembered /// as the previous scheduler, and the newly created scheduler becomes the current scheduler. When you call the CurrentScheduler::Detach /// method at a later point, the previous scheduler is restored as the current scheduler. /// This method will throw an improper_scheduler_attach exception if this scheduler /// is the current scheduler of the calling context. /// /// /**/ virtual void Attach() =0; /// /// Allows a user defined policy to be used to create the default scheduler. This method can be called only when no default /// scheduler exists within the process. After a default policy has been set, it remains in effect until the next valid call /// to either the SetDefaultSchedulerPolicy or the ResetDefaultSchedulerPolicy /// method. /// /// /// The policy to be set as the default scheduler policy. /// /// /// If the SetDefaultSchedulerPolicy method is called when a default scheduler already exists within the process, the runtime /// will throw a default_scheduler_exists exception. /// /// /// /// /// /**/ _CRTIMP static void __cdecl SetDefaultSchedulerPolicy(const SchedulerPolicy& _Policy); /// /// Resets the default scheduler policy to the runtime default. The next time a default scheduler is created, it will use the /// runtime default policy settings. /// /// /// This method can be called while a default scheduler exists within the process. It will not affect the policy of the existing /// default scheduler. However, if the default scheduler were to shutdown, and a new default were to be created at a later /// point, the new scheduler would use the runtime default policy settings. /// /// /// /**/ _CRTIMP static void __cdecl ResetDefaultSchedulerPolicy(); /// /// Creates a new schedule group within the scheduler. The version that takes the parameter causes tasks /// within the newly created schedule group to be biased towards executing at the location specified by that parameter. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your references on the scheduler. /// /// /// /// /// /**/ virtual ScheduleGroup * CreateScheduleGroup() =0; /// /// Creates a new schedule group within the scheduler. The version that takes the parameter causes tasks /// within the newly created schedule group to be biased towards executing at the location specified by that parameter. /// /// /// A reference to a location where the tasks within the schedule group will biased towards executing at. /// /// /// A pointer to the newly created schedule group. This ScheduleGroup object has an initial reference count placed on it. /// /// /// You must invoke the Release method on a schedule group when you are /// done scheduling work to it. The scheduler will destroy the schedule group when all work queued to it has completed. /// Note that if you explicitly created this scheduler, you must release all references to schedule groups within it, before /// you release your references on the scheduler. /// /// /// /// /// /**/ virtual ScheduleGroup * CreateScheduleGroup(location& _Placement) =0; /// /// Schedules a light-weight task within the scheduler. The light-weight task will be placed in a schedule group determined by the runtime. /// The version that takes the parameter causes the task to be biased towards executing at the specified location. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// /// /**/ virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data) =0; /// /// Schedules a light-weight task within the scheduler. The light-weight task will be placed in a schedule group determined by the runtime. /// The version that takes the parameter causes the task to be biased towards executing at the specified location. /// /// /// A pointer to the function to execute to perform the body of the light-weight task. /// /// /// A void pointer to the data that will be passed as a parameter to the body of the task. /// /// /// A reference to a location where the light-weight task will be biased towards executing at. /// /// /// /// /**/ virtual void ScheduleTask(TaskProc _Proc, _Inout_opt_ void * _Data, location& _Placement) =0; /// /// Determines whether a given location is available on the scheduler. /// /// /// A reference to the location to query the scheduler about. /// /// /// An indication of whether or not the location specified by the argument is available on the scheduler. /// /// /// Note that the return value is an instantaneous sampling of whether the given location is available. In the presence of multiple /// schedulers, dynamic resource management can add or take away resources from schedulers at any point. Should this happen, the given /// location can change availability. /// /**/ virtual bool IsAvailableLocation(const location& _Placement) const =0; }; /// /// Represents an abstraction for an execution context. /// /// /// The Concurrency Runtime scheduler (see Scheduler) uses execution contexts to execute the work queued /// to it by your application. A Win32 thread is an example of an execution context on a Windows /// operating system. /// At any time, the concurrency level of a scheduler is equal to the number of virtual processors granted to it by the Resource Manager. /// A virtual processor is an abstraction for a processing resource and maps to a hardware thread on the underlying system. Only a single scheduler /// context can execute on a virtual processor at a given time. /// The scheduler is cooperative in nature and an executing context can yield its virtual processor to a different context at any time if /// it wishes to enter a wait state. When its wait it satisfied, it cannot resume until an available virtual processor from the scheduler begins /// executing it. /// /// /// /**/ class Context { public: /// /// Returns an identifier for the context that is unique within the scheduler to which the context belongs. /// /// /// An identifier for the context that is unique within the scheduler to which the context belongs. /// /**/ virtual unsigned int GetId() const =0; /// /// Returns an identifier for the virtual processor that the context is currently executing on. /// /// /// If the context is currently executing on a virtual processor, an identifier for the virtual processor that the context /// is currently executing on; otherwise, the value -1. /// /// /// The return value from this method is an instantaneous sampling of the virtual processor that the context is executing /// on. This value can be stale the moment it is returned and cannot be relied upon. Typically, this method is used /// for debugging or tracing purposes only. /// /**/ virtual unsigned int GetVirtualProcessorId() const =0; /// /// Returns an identifier for the schedule group that the context is currently working on. /// /// /// An identifier for the schedule group the context is currently working on. /// /// /// The return value from this method is an instantaneous sampling of the schedule group that the context is executing /// on. If this method is called on a context other than the current context, the value can be stale the moment it is /// returned and cannot be relied upon. Typically, this method is used for debugging or tracing purposes only. /// /// /**/ virtual unsigned int GetScheduleGroupId() const =0; /// /// Returns an identifier for the current context that is unique within the scheduler to which the current context belongs. /// /// /// If the current context is attached to a scheduler, an identifier for the current context that is unique within the scheduler /// to which the current context belongs; otherwise, the value -1. /// /**/ _CRTIMP static unsigned int __cdecl Id(); /// /// Returns an identifier for the virtual processor that the current context is executing on. /// /// /// If the current context is attached to a scheduler, an identifier for the virtual processor that the current context is /// executing on; otherwise, the value -1. /// /// /// The return value from this method is an instantaneous sampling of the virtual processor that the current context is executing /// on. This value can be stale the moment it is returned and cannot be relied upon. Typically, this method is used /// for debugging or tracing purposes only. /// /**/ _CRTIMP static unsigned int __cdecl VirtualProcessorId(); /// /// Returns an identifier for the schedule group that the current context is working on. /// /// /// If the current context is attached to a scheduler and working on a schedule group, an identifier for the scheduler group that the /// current context is working on; otherwise, the value -1. /// /// /**/ _CRTIMP static unsigned int __cdecl ScheduleGroupId(); /// /// Blocks the current context. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// If the calling context is running on a virtual processor, the virtual processor will find another runnable context to /// execute or can potentially create a new one. /// After the Block method has been called or will be called, you must pair it with a call to the /// Unblock method from another execution context in order for it to run again. Be aware that there is a critical period between /// the point where your code publishes its context for another thread to be able to call the Unblock method and the point /// where the actual method call to Block is made. During this period, you must not call any method which /// can in turn block and unblock for its own reasons (for example, acquiring a lock). Calls to the Block and Unblock method /// do not track the reason for the blocking and unblocking. Only one object should have ownership of a Block-Unblock /// pair. /// This method can throw a variety of exceptions, including /// scheduler_resource_allocation_error. /// /// /// /**/ _CRTIMP static void __cdecl Block(); /// /// Unblocks the context and causes it to become runnable. /// /// /// It is perfectly legal for a call to the Unblock method to come before a corresponding call to the /// Block method. As long as calls to the Block and Unblock methods are properly paired, the runtime properly handles the natural race of /// either ordering. An Unblock call coming before a Block call simply negates the effect of the Block call. /// There are several exceptions which can be thrown from this method. If a context attempts to call the Unblock method on /// itself, a context_self_unblock exception will be thrown. If calls to Block and /// Unblock are not properly paired (for example, two calls to Unblock are made for a context which is currently running), a /// context_unblock_unbalanced exception will be thrown. /// /// Be aware that there is a critical period between the point where your code publishes its context for another thread to /// be able to call the Unblock method and the point where the actual method call to Block is made. During this period, /// you must not call any method which can in turn block and unblock for its own reasons (for example, acquiring a lock). /// Calls to the Block and Unblock method do not track the reason for the blocking and unblocking. Only one object should have /// ownership of a Block and Unblock pair. /// /// /// /**/ virtual void Unblock() =0; /// /// Determines whether or not the context is synchronously blocked. A context is considered to be synchronously /// blocked if it explicitly performed an action which led to blocking. /// /// /// Whether the context is synchronously blocked. /// /// /// A context is considered to be synchronously blocked if it explicitly performed an action which led to blocking. On the thread scheduler, /// this would indicate a direct call to the Context::Block method or a synchronization object which was built using the /// Context::Block method. /// The return value from this method is an instantaneous sample of whether the context is synchronously blocked. This value may /// be stale the moment it is returned and can only be used under very specific circumstances. /// /// /**/ virtual bool IsSynchronouslyBlocked() const =0; /// /// Yields execution so that another context can execute. If no other context is available to yield to, /// the method simply returns. /// /// /// This yield variant is intended for use within spin loops. /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /**/ _CRTIMP static void __cdecl _SpinYield(); /// /// Yields execution so that another context can execute. If no other context is available to yield to, the scheduler /// can yield to another operating system thread. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /// /// /**/ _CRTIMP static void __cdecl Yield(); /// /// Returns an indication of whether the task collection which is currently executing inline on the current context /// is in the midst of an active cancellation (or will be shortly). /// /// /// If a scheduler is attached to the calling context and a task group is executing a task inline on that context, /// an indication of whether that task group is in the midst of an active cancellation (or will be shortly); otherwise, /// the value false. /// This method will not result in scheduler attachment if the calling context is not already associated with a scheduler. /// /**/ _CRTIMP static bool __cdecl IsCurrentTaskCollectionCanceling(); /// /// Returns a pointer to the current context. /// /// /// A pointer to the current context. /// /// /// This method will result in the process' default scheduler being created and/or attached to the calling context if there is no /// scheduler currently associated with the calling context. /// /**/ _CRTIMP static Context * __cdecl CurrentContext(); /// /// Injects an additional virtual processor into a scheduler for the duration of a block of code when invoked on a context executing /// on one of the virtual processors in that scheduler. /// /// /// If true, an indication that an extra virtual processor should be added for the duration of the oversubscription. /// If false, an indication that the oversubscription should end and the previously added virtual processor should be removed. /// /// /**/ _CRTIMP static void __cdecl Oversubscribe(bool _BeginOversubscription); protected: // // Privatize operator delete. The scheduler internally manages contexts. // template friend void Concurrency::details::_InternalDeleteHelper(_T * _PObject); virtual ~Context() {}; }; #endif /* _CRT_USE_WINAPI_FAMILY_DESKTOP_APP */ /// /// Value indicating that a wait timed out. /// /// /// /// /**/ const size_t COOPERATIVE_WAIT_TIMEOUT = SIZE_MAX; /// /// Value indicating that a wait should never time out. /// /// /// /// /**/ const unsigned int COOPERATIVE_TIMEOUT_INFINITE = (unsigned int)-1; /// /// A non-reentrant mutex which is explicitly aware of the Concurrency Runtime. /// /// /// For more information, see . /// /// /**/ class critical_section { public: /// /// Constructs a new critical section. /// /**/ _CRTIMP critical_section(); /// /// Destroys a critical section. /// /// /// It is expected that the lock is no longer held when the destructor runs. Allowing the critical section to destruct with the lock /// still held results in undefined behavior. /// /**/ _CRTIMP ~critical_section(); /// /// Acquires this critical section. /// /// /// It is often safer to utilize the scoped_lock construct to acquire and release /// a critical_section object in an exception safe way. /// If the lock is already held by the calling context, an improper_lock exception will be /// thrown. /// /// /// /**/ _CRTIMP void lock(); /// /// Tries to acquire the lock without blocking. /// /// /// If the lock was acquired, the value true; otherwise, the value false. /// /// /**/ _CRTIMP bool try_lock(); /// /// Tries to acquire the lock without blocking for a specific number of milliseconds. /// /// /// The number of milliseconds to wait before timing out. /// /// /// If the lock was acquired, the value true; otherwise, the value false. /// /// /**/ _CRTIMP bool try_lock_for(unsigned int _Timeout); /// /// Unlocks the critical section. /// /// /// /**/ _CRTIMP void unlock(); /// /// A reference to a critical_section object. /// /**/ typedef critical_section& native_handle_type; /// /// Returns a platform specific native handle, if one exists. /// /// /// A reference to the critical section. /// /// /// A critical_section object is not associated with a platform specific native handle for the Windows operating system. /// The method simply returns a reference to the object itself. /// /**/ _CRTIMP native_handle_type native_handle(); /// /// Guarantees that if any context holds the lock at the time the method is called, that context has released /// the lock before this method returns. /// /// /// If no context holds the lock at the instant this method is called, it returns instantly. /// /**/ void _Flush_current_owner(); /// /// Acquires this critical section given a specific node to lock. /// /// /// The node that needs to own the lock. /// /// /// An indication if the node being locked is external to the critical_section. /// /// /// If the lock is already held by the calling context, an .improper_lock exception will be thrown. /// /**/ bool _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode); /// /// An exception safe RAII wrapper for a critical_section object. /// /**/ class scoped_lock { public: /// /// Constructs a scoped_lock object and acquires the critical_section object passed in the /// parameter. If the critical section is held by another thread, this call will block. /// /// /// The critical section to lock. /// /// /**/ explicit _CRTIMP scoped_lock(critical_section& _Critical_section); /// /// Destroys a scoped_lock object and releases the critical section supplied in its constructor. /// /// /**/ _CRTIMP ~scoped_lock(); private: critical_section& _M_critical_section; _CONCRT_BUFFER _M_node[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; scoped_lock(const scoped_lock&); // no copy constructor scoped_lock const & operator=(const scoped_lock&); // no assignment operator }; private: /// /// The node allocated on the stack never really owns the lock because it would go out of scope and the insides would not be visible /// in unlock() where it could potentially need to unblock the next in the queue. Instead, its state is transferred to the internal /// node which is used as a scratch node. /// /// /// The node that needs to own the lock. /// /**/ void _Switch_to_active(void * _PLockingNode); _CONCRT_BUFFER _M_activeNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; void * volatile _M_pHead; void * volatile _M_pTail; /// /// Hide copy constructor for a critical section /// /**/ critical_section(const critical_section&); /// /// Hide assignment operator for a critical section /// /**/ critical_section& operator=(const critical_section&); }; /// /// A writer-preference queue-based reader-writer lock with local only spinning. The lock grants first in - first out (FIFO) access to writers /// and starves readers under a continuous load of writers. /// /// /// For more information, see . /// /// /**/ class reader_writer_lock { public: /// /// Constructs a new reader_writer_lock object. /// /**/ _CRTIMP reader_writer_lock(); /// /// Destroys the reader_writer_lock object. /// /// /// It is expected that the lock is no longer held when the destructor runs. Allowing the reader writer lock to destruct with the lock /// still held results in undefined behavior. /// /**/ _CRTIMP ~reader_writer_lock(); /// /// Acquires the reader-writer lock as a writer. /// /// /// It is often safer to utilize the scoped_lock construct to acquire and release /// a reader_writer_lock object as a writer in an exception safe way. /// After a writer attempts to acquire the lock, any future readers will block until the writers have successfully acquired /// and released the lock. This lock is biased towards writers and can starve readers under a continuous load of writers. /// Writers are chained so that a writer exiting the lock releases the next writer in line. /// If the lock is already held by the calling context, an improper_lock exception will be /// thrown. /// /// /**/ _CRTIMP void lock(); /// /// Attempts to acquire the reader-writer lock as a writer without blocking. /// /// /// If the lock was acquired, the value true; otherwise, the value false. /// /// /**/ _CRTIMP bool try_lock(); /// /// Acquires the reader-writer lock as a reader. If there are writers, active readers have to wait until they are done. /// The reader simply registers an interest in the lock and waits for writers to release it. /// /// /// It is often safer to utilize the scoped_lock_read construct to acquire /// and release a reader_writer_lock object as a reader in an exception safe way. /// If there are writers waiting on the lock, the reader will wait until all writers in line have acquired /// and released the lock. This lock is biased towards writers and can starve readers under a continuous load of writers. /// /// /**/ _CRTIMP void lock_read(); /// /// Attempts to acquire the reader-writer lock as a reader without blocking. /// /// /// If the lock was acquired, the value true; otherwise, the value false. /// /// /**/ _CRTIMP bool try_lock_read(); /// /// Unlocks the reader-writer lock based on who locked it, reader or writer. /// /// /// If there are writers waiting on the lock, the release of the lock will always go to the next writer in FIFO /// order. This lock is biased towards writers and can starve readers under a continuous load of writers. /// /// /// /// /// /**/ _CRTIMP void unlock(); /// /// Acquires a write lock given a specific write node to lock. /// /// /// The node that needs to own the lock. /// /// /// An indication if the node being locked is external to the reader_writer_lock object. /// /// /// If the lock is already held by the calling context, an .improper_lock exception will be /// thrown. /// /**/ void _Acquire_lock(void * _PLockingNode, bool _FHasExternalNode); /// /// An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a writer. /// /**/ class scoped_lock { public: /// /// Constructs a scoped_lock object and acquires the reader_writer_lock object passed in the /// parameter as a writer. If the lock is held by another thread, this call will block. /// /// /// The reader_writer_lock object to acquire as a writer. /// /**/ explicit _CRTIMP scoped_lock(reader_writer_lock& _Reader_writer_lock); /// /// Destroys a reader_writer_lock object and releases the lock supplied in its constructor. /// /**/ _CRTIMP ~scoped_lock(); private: reader_writer_lock& _M_reader_writer_lock; _CONCRT_BUFFER _M_writerNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; scoped_lock(const scoped_lock&); // no copy constructor scoped_lock const & operator=(const scoped_lock&); // no assignment operator }; /// /// An exception safe RAII wrapper that can be used to acquire reader_writer_lock lock objects as a reader. /// /**/ class scoped_lock_read { public: /// /// Constructs a scoped_lock_read object and acquires the reader_writer_lock object passed in the /// parameter as a reader. If the lock is held by another thread as a writer or there /// are pending writers, this call will block. /// /// /// The reader_writer_lock object to acquire as a reader. /// /**/ explicit _CRTIMP scoped_lock_read(reader_writer_lock& _Reader_writer_lock); /// /// Destroys a scoped_lock_read object and releases the lock supplied in its constructor. /// /**/ _CRTIMP ~scoped_lock_read(); private: reader_writer_lock& _M_reader_writer_lock; scoped_lock_read(const scoped_lock_read&); // no copy constructor scoped_lock_read const & operator=(const scoped_lock_read&); // no assignment operator }; private: /// /// Called for the first context in the writer queue. It sets the queue head and it tries to /// claim the lock if readers are not active. /// /// /// The first writer in the queue. /// /**/ bool _Set_next_writer(void * _PWriter); /// /// Called when writers are done with the lock, or when lock was free for claiming by /// the first reader coming in. If in the meantime there are more writers interested /// the list of readers is finalized and they are convoyed, while head of the list /// is reset to NULL. /// /// /// Pointer to the head of the reader list. /// /**/ void * _Get_reader_convoy(); /// /// Called from unlock() when a writer is holding the lock. Writer unblocks the next writer in the list /// and is being retired. If there are no more writers, but there are readers interested, then readers /// are unblocked. /// /**/ void _Unlock_writer(); /// /// Called from unlock() when a reader is holding the lock. Reader count is decremented and if this /// is the last reader it checks whether there are interested writers that need to be unblocked. /// /**/ void _Unlock_reader(); /// /// When the last writer leaves the lock, it needs to reset the tail to NULL so that the next coming /// writer would know to try to grab the lock. If the CAS to NULL fails, then some other writer /// managed to grab the tail before the reset, so this writer needs to wait until the link to /// the next writer is complete before trying to release the next writer. /// /// /// Last writer in the queue. /// /**/ void _Remove_last_writer(void * _PWriter); /// /// The writer node allocated on the stack never really owns the lock because it would go out of scope and the insides would not be /// visible in unlock() where it could potentially need to unblock the next writer in the queue. Instead, its state is transferred to the internal /// writer node which is used as a scratch node. /// /// /// The writer that needs to own the lock. /// /**/ void _Switch_to_active(void * _PWriter); _CONCRT_BUFFER _M_activeWriter[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; void * _M_pReaderHead; void * _M_pWriterHead; void * _M_pWriterTail; volatile long _M_lockState; /// /// Hide copy constructor for a reader_writer_lock /// /**/ reader_writer_lock (const reader_writer_lock& _Lock); /// /// Hide assignment operator for a reader_writer_lock /// /**/ reader_writer_lock& operator=(const reader_writer_lock& _Lock); }; /// /// A manual reset event which is explicitly aware of the Concurrency Runtime. /// /// /// For more information, see . /// /**/ class event { public: /// /// Constructs a new event. /// /**/ _CRTIMP event(); /// /// Destroys an event. /// /// /// It is expected that there are no threads waiting on the event when the destructor runs. Allowing the event to destruct with threads /// still waiting on it results in undefined behavior. /// /**/ _CRTIMP ~event(); /// /// Waits for the event to become signaled. /// /// /// Indicates the number of milliseconds before the wait times out. The value COOPERATIVE_TIMEOUT_INFINITE signifies that /// there is no timeout. /// /// /// If the wait was satisfied, the value 0 is returned; otherwise, the value COOPERATIVE_WAIT_TIMEOUT to indicate that /// the wait timed out without the event becoming signaled. /// /// /// COOPERATIVE_TIMEOUT_INFINITE /// COOPERATIVE_WAIT_TIMEOUT /**/ _CRTIMP size_t wait(unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE); /// /// Signals the event. /// /// /// Signaling the event can cause an arbitrary number of contexts waiting on the event to become runnable. /// /// /// /**/ _CRTIMP void set(); /// /// Resets the event to a non-signaled state. /// /// /// /**/ _CRTIMP void reset(); /// /// Waits for multiple events to become signaled. /// /// /// An array of events to wait on. The number of events within the array is indicated by the parameter. /// /// /// The count of events within the array supplied in the parameter. /// /// /// If set to the value true, the parameter specifies that all events within the array supplied in the /// parameter must become signaled in order to satisfy the wait. If set to the value false, it specifies that any event within the /// array supplied in the parameter becoming signaled will satisfy the wait. /// /// /// Indicates the number of milliseconds before the wait times out. The value COOPERATIVE_TIMEOUT_INFINITE signifies that /// there is no timeout. /// /// /// If the wait was satisfied, the index within the array supplied in the parameter which satisfied /// the wait condition; otherwise, the value COOPERATIVE_WAIT_TIMEOUT to indicate that the wait timed out without the condition /// being satisfied. /// /// /// If the parameter is set to the value true to indicate that all events must become signaled to satisfy /// the wait, the index returned by the function carries no special significance other than the fact that it is not the value /// COOPERATIVE_WAIT_TIMEOUT. /// /// /// COOPERATIVE_TIMEOUT_INFINITE /// COOPERATIVE_WAIT_TIMEOUT /**/ _CRTIMP static size_t __cdecl wait_for_multiple(_In_reads_(_Count) event ** _PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE); /// /// Value indicating that a wait should never time out. /// static const unsigned int timeout_infinite = COOPERATIVE_TIMEOUT_INFINITE; private: // Prevent bad usage of copy-constructor and copy-assignment event(const event& _Event); event& operator=(const event& _Event); void * volatile _M_pWaitChain; void * _M_pResetChain; Concurrency::critical_section _M_lock; }; namespace details { /// /// A _Condition_variable which is explicitly aware of the Concurrency Runtime. /// /**/ class _Condition_variable { public: /// /// Constructs a new _Condition_variable. /// /**/ _CRTIMP _Condition_variable(); /// /// Destroys a _Condition_variable. /// /**/ _CRTIMP ~_Condition_variable(); /// /// Waits for the _Condition_variable to become signaled. The lock argument passed in is unlocked by the _Condition_variable /// and relocked before the wait returns. /// /// /// The critical_section to unlock before waiting and relock before the wait returns. /// /// /**/ _CRTIMP void wait(Concurrency::critical_section& _Lck); /// /// Waits for the _Condition_variable to become signaled. The lock argument passed in is unlocked by the _Condition_variable /// and relocked before the wait returns. /// /// /// The critical_section to unlock before waiting and relock before the wait returns. /// /// /// A timeout, in milliseconds, for how long to wait for. /// /// /**/ _CRTIMP bool wait_for(Concurrency::critical_section& _Lck, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE); /// /// Notify a single waiter of the _Condition_variable. /// /**/ _CRTIMP void notify_one(); /// /// Notify all the waiters of the _Condition_variable. /// /**/ _CRTIMP void notify_all(); private: // Prevent bad usage of copy-constructor and copy-assignment _Condition_variable(const _Condition_variable& _Event); _Condition_variable& operator=(const _Condition_variable& _Event); void * volatile _M_pWaitChain; Concurrency::critical_section _M_lock; }; // Base class for all reference counted objects class _RefCounterBase { public: virtual ~_RefCounterBase() { _CONCRT_ASSERT(_M_refCount == 0); } // Acquires a reference // Returns the new reference count. long _Reference() { long _Refcount = _InterlockedIncrement(&_M_refCount); // 0 - 1 transition is illegal _CONCRT_ASSERT(_Refcount > 1); return _Refcount; } // Releases the reference // Returns the new reference count long _Release() { long _Refcount = _InterlockedDecrement(&_M_refCount); _CONCRT_ASSERT(_Refcount >= 0); if (_Refcount == 0) { _Destroy(); } return _Refcount; } protected: // Allow derived classes to provide their own deleter virtual void _Destroy() { delete this; } // Only allow instantiation through derived class _RefCounterBase(long _InitialCount = 1) : _M_refCount(_InitialCount) { _CONCRT_ASSERT(_M_refCount > 0); } // Reference count volatile long _M_refCount; }; class _CancellationTokenState; class _CancellationTokenRegistration; // This is a non-reentrant lock wrapper around the ConcRT critical-section // and used by agents/messaging class _NonReentrantPPLLock { public: // Constructor for _NonReentrantPPLLock _CRTIMP _NonReentrantPPLLock(); // Acquire the lock, spin if necessary _CRTIMP void _Acquire(void * _Lock_node); // Releases the lock _CRTIMP void _Release(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the specified lock _CRTIMP explicit _Scoped_lock(_NonReentrantPPLLock& _Lock); // Destroys the holder and releases the lock _CRTIMP ~_Scoped_lock(); private: _NonReentrantPPLLock& _M_lock; _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; private: // critical_section Concurrency::critical_section _M_criticalSection; }; // This is a reentrant lock implemented using the ConcRT critical section class _ReentrantPPLLock { public: // Constructor for _ReentrantPPLLock _CRTIMP _ReentrantPPLLock(); // Acquire the lock, spin if necessary _CRTIMP void _Acquire(void * _Lock_node); // Releases the lock _CRTIMP void _Release(); // An exception safe RAII wrapper. class _Scoped_lock { public: // Constructs a holder and acquires the specified lock _CRTIMP explicit _Scoped_lock(_ReentrantPPLLock& _Lock); // Destroys the holder and releases the lock _CRTIMP ~_Scoped_lock(); private: _ReentrantPPLLock& _M_lock; _CONCRT_BUFFER _M_lockNode[(4 * sizeof(void *) + 2 * sizeof(unsigned int) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; _Scoped_lock(const _Scoped_lock&); // no copy constructor _Scoped_lock const & operator=(const _Scoped_lock&); // no assignment operator }; private: // critical_section Concurrency::critical_section _M_criticalSection; // The number of times this lock has been taken recursively long _M_recursionCount; // The current owner of the lock volatile long _M_owner; }; struct _Chore { protected: // Constructors. explicit _Chore(TaskProc _PFunction) : m_pFunction(_PFunction) { } _Chore() { } virtual ~_Chore() { } public: // The function which invokes the work of the chore. TaskProc m_pFunction; }; // _UnrealizedChore represents an unrealized chore -- a unit of work that scheduled in a work // stealing capacity. Some higher level construct (language or library) will map atop this to provide // an usable abstraction to clients. class _UnrealizedChore : public _Chore, public _AllocBase { public: // Constructor for an unrealized chore. _UnrealizedChore() : _M_pTaskCollection(NULL) { } virtual ~_UnrealizedChore() {} // Method that executes the unrealized chore. void _Invoke() { _M_pChoreFunction(this); } // Sets the attachment state of the chore at the time of stealing. void _SetDetached(bool _FDetached); // Returns the owning collection of the chore. Concurrency::details::_TaskCollectionBase* _OwningCollection() const { return _M_pTaskCollection; } // Set flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it. // The flag is ignored by _StructuredTaskCollection void _SetRuntimeOwnsLifetime(bool fValue) { _M_fRuntimeOwnsLifetime = fValue; } // Returns the flag that indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it. // The flag is ignored by _StructuredTaskCollection bool _GetRuntimeOwnsLifetime() const { return _M_fRuntimeOwnsLifetime; } // Allocator to be used when runtime owns lifetime. template static _ChoreType * _InternalAlloc(const _Function& _Func) { // This is always invoked from the PPL layer by the user and can never be attached to the default scheduler. Therefore '_concrt_new' is not required here _ChoreType * _Chore = new _ChoreType(_Func); _Chore->_M_fRuntimeOwnsLifetime = true; return _Chore; } // Internal helper routine to prepare for execution as a stolen chore. void _PrepareSteal(ContextBase *_PContext); protected: // Invocation bridge between the _UnrealizedChore and PPL. template static void __cdecl _InvokeBridge(void * _PContext) { auto _PChore = static_cast<_ChoreType *>(_PContext); (*_PChore)(); } // Place associated task collection in a safe state. _CRTIMP void _CheckTaskCollection(); private: friend class _StructuredTaskCollection; friend class _TaskCollection; typedef void (__cdecl * CHOREFUNC)(_UnrealizedChore * _PChore); // The collection of work to which this particular chore belongs. Concurrency::details::_TaskCollectionBase * _M_pTaskCollection; // Internal invocation inside the scheduler. CHOREFUNC _M_pChoreFunction; // Indicates whether the scheduler owns the lifetime of the object and is responsible for freeing it. // This flag is ignored by _StructuredTaskCollection bool _M_fRuntimeOwnsLifetime; // An indication of whether the chore (if stolen) was detached. bool _M_fDetached; // Helper routines void _PrepareStealStructured(ContextBase *_PContext); void _PrepareStealUnstructured(ContextBase *_PContext); // The internal wrapper around invocation of stolen structured chores. __declspec(noinline) static void __cdecl _StructuredChoreWrapper(_UnrealizedChore * _PChore); // The internal wrapper around invocation of stolen unstructured chores. __declspec(noinline) static void __cdecl _UnstructuredChoreWrapper(_UnrealizedChore * _PChore); // To free memory allocated with _InternalAlloc. static void _InternalFree(_UnrealizedChore * _PChore); // Cancellation via token to a stolen chore static void __cdecl _CancelViaToken(::Concurrency::details::ContextBase *pContext); }; // Represents possible results of waiting on a task collection. enum _TaskCollectionStatus { _NotComplete, _Completed, _Canceled }; // _TaskCollectionBase represents an abstract set of work and provides shared waiting semantics for stolen work. class _TaskCollectionBase { public: // Constructs a new task collection. _TaskCollectionBase() : _M_pTokenState(NULL), _M_completedStolenChores(_CollectionNotInitialized), _M_unpoppedChores(0), _M_pException(NULL), _M_inliningDepth(_S_notInlined) { } // Constructs a new task collection based on a given cancellation token. _TaskCollectionBase(_CancellationTokenState *_PTokenState) : _M_pTokenState(_PTokenState), _M_completedStolenChores(_CollectionNotInitialized), _M_unpoppedChores(0), _M_pException(NULL), _M_inliningDepth(_S_notInlined) { } // Returns the owning context of the task collection. void * _OwningContext() const { return _M_pOwningContext; } // Returns the inlining depth. int _InliningDepth() const { return _M_inliningDepth; } // Tells if the task collection is inlined - some thread somewhere is currently invoking wait on it. bool _IsCurrentlyInlined() const { return (_M_inliningDepth != _S_notInlined); } // Returns whether this is a structured collection or not. bool _IsStructured() { return (_M_inlineFlags & _S_structured) != 0; } // Returns the token state associated with this task collection _CancellationTokenState *_GetTokenState(_CancellationTokenRegistration **_PRegistration = NULL); protected: friend class Concurrency::details::_UnrealizedChore; friend class Concurrency::details::ContextBase; enum _TaskCollectionBaseState { _CollectionNotInitialized = LONG_MIN, _CollectionInitializationInProgress = LONG_MIN+1, _CollectionInitialized = 0 }; // Returns the exception portion of _M_pException. std::exception_ptr * _Exception() const { return (std::exception_ptr *) ((size_t)_M_pException & ~_S_cancelBitsMask); } // Indicates whether or not this task collection has an abnormal exit. bool _IsAbnormalExit() const { return _M_pException != NULL; } // Returns the cancel flags. size_t _CancelState() const { return (size_t) _M_pException & _S_cancelBitsMask; } // Returns whether or not the collection is marked for cancellation. bool _IsMarkedForCancellation() const { return (_CancelState() & _S_cancelBitsMask) != 0; } // Returns whether an inline cancellation was performed. bool _PerformedInlineCancel() const { _CONCRT_ASSERT(_CancelState() != _S_cancelStarted); return _CancelState() == _S_cancelShotdownOwner; } bool _PerformedPendingCancel() const { _CONCRT_ASSERT(_CancelState() != _S_cancelStarted); return _CancelState() == _S_cancelDeferredShootdownOwner; } // Returns the parent collection safely. _TaskCollectionBase *_SafeGetParent() { return ((_M_inliningDepth != _S_notInlined) ? _M_pParent : NULL); } // Called in order to determine whether this task collection will interrupt for a pending cancellation at or above it. bool _WillInterruptForPendingCancel(); // Called when an exception is raised on a chore on a given task collection, this makes a determination of what to do with the exception // and saves it for potential transport back to the thread performing a join on a chore collection. void _RaisedException(); // Potentially rethrows the exception which was set with _RaisedException. The caller has responsibility to ensure that _RaisedException // was called prior to calling this and that _M_pException has progressed beyond the _S_nonNull state. void _RethrowException(); // Marks the collection for cancellation and returns whether the collection was marked. bool _MarkCancellation(); // Finishes the cancellation state (changing from _S_cancelStarted to one of the other states). Note that only the // thread which successfully marked cancellation can call this. void _FinishCancelState(size_t _NewCancelState); // Called when a cancellation is raised on a chore on a given task collection. This makes a determination of what to do with the exception // and saves it for potential transport back to the thread performing a join on a chore collection. Note that every other exception // has precedence over a cancellation. void _RaisedCancel(); // Tracks the parent collection. (For example, A task collection B created during execution of a chore C on task collection A is // considered a child of A). _TaskCollectionBase * _M_pParent; // Tracks the inlining depth of this collection for cancellation purposes and packs a series of definition bits. int _M_inliningDepth : 28; int _M_inlineFlags : 4; // The cancellation token for the task collection. _CancellationTokenState *_M_pTokenState; // The context which owns the task collection. This is the context where the collection is created. void * _M_pOwningContext; // The number of unpopped chores associated with the task collection (set by the derived // class during chore association. long _M_unpoppedChores; // The number of stolen chores executed so far. volatile long _M_completedStolenChores; // The stored exception which has been marshaled from the thread a stolen chore ran upon to the thread that is waiting on the // task collection. // // The lower two bits of _M_pException are utilized for the cancellation state machine. The upper 30 are the exception pointer. This implies // that the exception pointer must be 4-byte aligned. Because of intermediate states, the exception pointer cannot be between 0x8 and 0xF. The heap should // not be allocating such... // std::exception_ptr * _M_pException; // Cancellation states static const size_t _S_cancelBitsMask = 0x3; static const size_t _S_cancelNone = 0x0; static const size_t _S_cancelStarted = 0x1; static const size_t _S_cancelDeferredShootdownOwner = 0x2; static const size_t _S_cancelShotdownOwner = 0x3; // Intermediate exceptions. static const size_t _S_nonNull = 0x8; static const size_t _S_cancelException = 0xC; // initialization state for inlining depth. static const int _S_notInlined = -1; // Inline flags. static const int _S_structured = 0x00000001; static const int _S_localCancel = 0x00000002; static const int _S_reserved = 0x0000000C; private: // Prevent bad usage of copy-constructor and copy-assignment _TaskCollectionBase(const _TaskCollectionBase& _Collection); _TaskCollectionBase& operator=(const _TaskCollectionBase& _Collection); }; /// /// Structured task collections represent groups of work which follow a strictly LIFO ordered paradigm /// queueing and waiting respectively. They can only be waited on once and can only be used from a single thread of execution. /// /**/ class _StructuredTaskCollection : public _TaskCollectionBase { public: /// /// Construct a new structured task collection. /// /**/ _StructuredTaskCollection() { _Construct(); _M_pTokenState = NULL; } /// /// Construct a new structured task collection whose cancellation is goverened by the supplied cancellation token. /// /// /// When this cancellation token is canceled, the structured task group will be canceled. /// /**/ _CRTIMP _StructuredTaskCollection(_CancellationTokenState *_PTokenState); /// /// Destruct a task collection and wait on all associated work to finish. Clients must call '_StructuredTaskCollection::_Wait' /// or '_StructuredTaskCollection::_RunAndWait' prior to destructing the object. If there are chores remaining in the queues, an /// exception (missing_wait) is thrown. If the destructor is running because of exception unwinding, it will abort any scheduled work. /// If another exception occurs because work is aborted, the process will terminate (C++ semantics). /// /**/ _CRTIMP ~_StructuredTaskCollection(); /// /// Schedules a chore that can potentially run in parallel. The chore is pushed onto the associated workstealing queue, and /// will be executed in a LIFO order. Note that the specified chore can be scheduled only on a single task collection at a given time. /// Any attempt to schedule the same chore multiple times on one or more task collection will result in an invalid_multiple_scheduling /// exception. After the chore is guaranteed to have been executed (by calling the _Wait method), it can be rescheduled to an /// arbitrary task collection. /// /// /// The new unrealized chore to schedule /// /// /// The location where the unrealized chore should execute. Specifying the value NULL here indicates that the unrealized chore does not /// have specific placement. /// /**/ _CRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation); /// /// Schedules a chore that can potentially run in parallel. The chore is pushed onto the associated workstealing queue, and /// will be executed in a LIFO order. Note that the specified chore can be scheduled only on a single task collection at a given time. /// Any attempt to schedule the same chore multiple times on one or more task collection will result in an invalid_multiple_scheduling /// exception. After the chore is guaranteed to have been executed (by calling the _Wait method), it can be rescheduled to an /// arbitrary task collection. /// /// /// The new unrealized chore to schedule /// /**/ _CRTIMP void _Schedule(_UnrealizedChore * _PChore); /// /// Cancels work on the task collection. /// /**/ _CRTIMP void _Cancel(); /// /// Informs the caller whether or not the task collection is currently in the midst of cancellation. Note that this /// does not necessarily indicate that Cancel was called on the collection (although such certainly qualifies this function /// to return true). It can be the case that the task collection is executing inline and a task collection further up in the work /// tree was canceled. In cases such as these where we can determine ahead of time that cancellation will flow through /// this collection, true will be returned as well. /// /// /// An indication of whether the task collection is in the midst of a cancellation (or is guaranteed to be shortly). /// /**/ _CRTIMP bool _IsCanceling(); /// /// A cancellation friendly wrapper with which to execute _PChore and then /// waits for all chores running in the _StructuredTaskCollection to finish (normally or abnormally). This method encapsulates /// all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks /// (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _StructuredTaskCollection /// was created, and re-thrown). After this function returns, the _StructuredTaskCollection cannot be used for scheduling further work. /// /// /// An _UnrealizedChore which when non-null will be called to invoke the chore in a cancellation friendly manner. /// /// /// An indication of the status of the wait. /// /**/ _CRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL); /// /// Waits for all chores running in the _StructuredTaskCollection to finish (normally or abnormally). This method encapsulates /// all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks /// (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _StructuredTaskCollection /// was created, and re-thrown). After this function returns, the _StructuredTaskCollection cannot be used for scheduling further work. /// /// /// An indication of the status of the wait. /// /**/ _TaskCollectionStatus _Wait() { return _RunAndWait(); } /// /// Called to cancel any contexts which stole chores from the given collection. /// /**/ void _CancelStolenContexts(); private: friend class _UnrealizedChore; void _Construct() { _M_pOwningContext = NULL; _M_inlineFlags = _S_structured; } /// /// Internal routine to abort work on the task collection. /// /**/ _CRTIMP void _Abort(); /// /// Internal routine to clean up after a cancellation token. /// _CRTIMP void _CleanupToken(); /// /// Performs task cleanup normally done at destruction time. /// /**/ bool _TaskCleanup() { // // Users are required to call Wait() before letting the destructor run. Otherwise, throw. Note that before throwing, // we must actually wait on the tasks because they contain pointers into stack frames and unwinding without the wait is // instant stack corruption. // if (_M_unpoppedChores > 0) { _Abort(); if (!__uncaught_exception()) { return false; } } return true; } /// /// Internal initialization of the structured task collection /// /**/ void _Initialize(); /// /// Waits on a specified number of stolen chores. /// /// /// The number of stolen chores to wait on. /// /**/ void _WaitOnStolenChores(long _StolenChoreCount); /// /// Indicates that a stolen chore has completed. /// /**/ void _CountUp(); /// /// The callback which is made when a cancellation occurs via a token associated with a structured_task_group on the boundary /// of two cancellation tokens. /// /**/ static void __cdecl _CancelViaToken(_StructuredTaskCollection *pCollection); // // _StructuredTaskCollection::_M_event is used to construct an structured event object only when it is needed to block. The structured event object // has no state to cleanup, therefore no dtor code is required. // _CONCRT_BUFFER _M_event[(sizeof(void*) + sizeof(_CONCRT_BUFFER) - 1) / sizeof(_CONCRT_BUFFER)]; }; /// /// Task collections represent groups of work which step outside the strict structuring of the /// _StructuredTaskCollection definition. Any groups of work which do not follow LIFO ordering, are waited /// on multiple times, or are passed between arbitrary threads require utilization of this definition /// of a task collection. It has additional overhead over the _StructuredTaskCollection. /// /**/ class _TaskCollection : public _TaskCollectionBase { public: /// /// Constructs a new task collection. /// /**/ _CRTIMP _TaskCollection(); /// /// Constructs a new task collection whose cancellation is governed by the specified cancellation token state. /// /// /// When this cancellation token is canceled, the task collection is canceled. /// /**/ _CRTIMP _TaskCollection(_CancellationTokenState *_PTokenState); /// /// Destroys a task collection. Clients must call '_TaskCollection::_Wait' or '_TaskCollection::_RunAndWait' prior to destructing /// the object. If there are chores remaining in the queues, an exception (missing_wait) is thrown. If the destructor /// is running because of exception unwinding, it will abort any scheduled work. If another exception occurs because work /// is aborted, the process will terminate (C++ semantics). /// /**/ _CRTIMP ~_TaskCollection(); /// /// Schedules a chore that can potentially run in parallel. The chore is pushed onto the associated workstealing queue, and /// will be executed in a LIFO order. The tasks scheduled into a _TaskCollection are scheduled into the current scheduler. /// Note that the specified chore can be scheduled only on a single task collection at a given time. Any attempt to schedule the same /// chore multiple times on one or more task collections will result in an invalid_multiple_scheduling exception. After the chore is /// guaranteed to have been executed (by calling the Wait method), it can be rescheduled to an arbitrary task collection. /// /// /// The new unrealized chore to schedule /// /// /// The location where the unrealized chore should execute. Specifying the value NULL here indicates that the unrealized chore does not /// have specific placement. /// /**/ _CRTIMP void _Schedule(_UnrealizedChore * _PChore, location * _PLocation); /// /// Schedules a chore that can potentially run in parallel. The chore is pushed onto the associated workstealing queue, and /// will be executed in a LIFO order. The tasks scheduled into a _TaskCollection are scheduled into the current scheduler. /// Note that the specified chore can be scheduled only on a single task collection at a given time. Any attempt to schedule the same /// chore multiple times on one or more task collections will result in an invalid_multiple_scheduling exception. After the chore is /// guaranteed to have been executed (by calling the Wait method), it can be rescheduled to an arbitrary task collection. /// /// /// The new unrealized chore to schedule /// /**/ _CRTIMP void _Schedule(_UnrealizedChore * _PChore); /// /// Cancels work on the task collection. /// /**/ _CRTIMP void _Cancel(); /// /// Informs the caller whether or not the task collection is currently in the midst of a cancellation. Note that this /// does not necessarily indicate that Cancel was called on the collection (although such certainly qualifies this function /// to return true). It can be the case that the task collection is executing inline and a task collection further up in the work /// tree was canceled. In cases such as these where we can determine ahead of time that cancellation will flow through /// this collection, true will be returned as well. /// /// /// An indication of whether the task collection is in the midst of a cancellation (or is guaranteed to be shortly). /// /**/ _CRTIMP bool _IsCanceling(); /// /// A cancellation friendly wrapper with which to execute _PChore and then /// waits for all chores running in the _TaskCollection to finish (normally or abnormally). This method encapsulates /// all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks /// (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _TaskCollection /// was created, and re-thrown). After this function returns, the _TaskCollection cannot be used for scheduling further work. /// /// /// An _UnrealizedChore which when non-null will be called to invoke the chore in a cancellation friendly manner. /// /// /// An indication of the status of the wait. /// /// /**/ _CRTIMP _TaskCollectionStatus __stdcall _RunAndWait(_UnrealizedChore * _PChore = NULL); /// /// Waits for all chores running in the _TaskCollection to finish (normally or abnormally). This method encapsulates /// all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks /// (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _TaskCollection /// was created, and re-thrown). After this function returns, the _TaskCollection cannot be used for scheduling further work. /// /// /// An indication of the status of the wait. /// /// /**/ _TaskCollectionStatus _Wait() { return _RunAndWait(); } /// /// Returns whether this task collection is marked for abnormal exit. /// /**/ bool _IsMarkedForAbnormalExit() const; /// /// Returns the object which this is an alias for. /// /**/ _TaskCollection * _OriginalCollection() const; /// /// Returns whether the task collection is an alias. /// /**/ bool _IsAlias() const; /// /// Registers a notification handler for completion of chores /// /// /// The callback function /// /// /// The completion context for the callback function /// /**/ void _RegisterCompletionHandler(TaskProc _Func, void * _PCompletionContext); private: friend class _UnrealizedChore; friend class Concurrency::details::ContextBase; /// /// Determines if the task collection is a stale alias (an object which was left over from a deferred delete /// of a direct alias but which happens to match the hash key for a newly allocated task collection) /// /**/ bool _IsStaleAlias() const; /// /// Releases an alias -- this will free it if the release is the last man out. /// /**/ void _ReleaseAlias(); /// /// Constructs an alias collection based on a specifed origin collection /// /// /// Specifies which collection the newly constructed one will alias /// /// /// Specifies whether the newly constructed collection is a direct alias /// /**/ _TaskCollection(_TaskCollection * _POriginCollection, bool _FDirectAlias); /// /// Returns the local alias of a task collection on the current context. /// /**/ _TaskCollection * _Alias(); /// /// Internal routine to abort work on the task collection. /// /// /// An indication as to whether or not to leave the task collection canceled after the abort. /// /**/ void _Abort(bool fLeaveCanceled = false); /// /// Returns whether the task collection is an indirect alias. /// /**/ bool _IsIndirectAlias() const; /// /// Returns whether the task collection is a direct alias. /// /**/ bool _IsDirectAlias() const; /// /// Returns whether this task collection has a direct alias. /// /**/ bool _HasDirectAlias() const; /// /// Cancels work on the task collection. This is an internal version. /// /// /// Indicates whether the cancellation is taking place because of /// exception unwinding within the runtime /// /// /// A snapshot of the direct alias list which is what the call will effect /// /**/ void _Cancel(bool _InsideException, _TaskCollection * _PSnapPoint); /// /// Called for every new chore put into the task collection. Assures appropriate synchronization with waiters. /// /**/ void _NotifyNewChore(); /// /// Called for every completed chore from the task collection. Assures appropriate synchronization with waiters. /// /// /// An _UnrealizedChore which will be freed if its lifetime is owned by the Runtime. /// /**/ void _NotifyCompletedChoreAndFree(_UnrealizedChore * _PChore = NULL); /// /// Waits on the given task collection and every alias. /// /// /// A snapshot of the direct alias list which is what the call will effect /// /**/ void _FullAliasWait(_TaskCollection * _PSnapPoint); /// /// Resets the task collection for future usage. /// /// /// A snapshot of the direct alias list which is what the call will effect /// /**/ void _Reset(_TaskCollection * _PSnapPoint); /// /// Called when an exception is raised on a chore on an unstructured task collection, this makes a determination of what to do with the exception /// and saves it for potential transport back to the thread performing a join on a task collection. This specifically handles situations /// on for unstructured task collections before calling _TaskCollectionBase::_RaisedException. /// /**/ void _RaisedException(); /// /// Called when a cancellation is raised on a chore on a given task collection. This makes a determination of what to do with the exception /// and saves it for potential transport back to the thread performing a join on a chore collection. Note that every other exception /// has precedence over a cancellation. /// /**/ void _RaisedCancel(); /// /// Called in order to set the cancellation status of the collection. /// /// /// The cancellation status to set /// /// /// An indication of whether the set succeeded. The set will fail if the task collection already has a cancellation status. /// /**/ bool _SetCancelState(long _Status); /// /// Called to cancel a single alias of a task collection from an arbitrary thread. /// /// /// Indicates whether the cancellation is taking place because of /// exception unwinding within the runtime /// /**/ void _CancelFromArbitraryThread(bool _InsideException); /// /// Cancels all direct aliases of the task collection. /// /// /// Indicates whether the cancellation is taking place because of /// exception unwinding within the runtime /// /// /// A snapshot of the direct alias list which is what the call will effect /// /**/ void _CancelDirectAliases(bool _InsideException, _TaskCollection * _PSnapPoint); /// /// Called to cancel any contexts which stole chores from the given collection. This is *PART* of a cancellation /// scheme. The remainder must be handled by the derived class in particular. This should be called last. /// /// /// Indicates whether the cancellation is taking place because of /// exception unwinding within the runtime /// /// /// Indicates whether the inline context is safe and blocked from becoming inaccessible during /// the duration of the call /// /**/ void _CancelStolenContexts(bool _InsideException, bool _FInlineGated); /// /// Returns the steal tracking list. /// /**/ void *_GetStealTrackingList() const; /// /// Internal initialization of the task collection /// /**/ void _Initialize(); /// /// Performs an abortive sweep of the WSQ for inline stack overflow. /// /// /// The context to sweep /// /**/ void _AbortiveSweep(void *_PCtx); /// /// A predicate function checking whether a given chore belongs to a given collection. /// /// /// The chore to check /// /// /// The data to check against /// /// /// Whether or not the chore belongs to the collection /// /**/ static bool __cdecl _CollectionMatchPredicate(_UnrealizedChore *_PChore, void *_PData); /// /// Called to sweep an aborted chore in the case of inline stack overflow. /// /// /// The chore to sweep /// /// /// The data that was passed to the sweep predicate /// /// /// An indication of whether the chore is now gone /// /**/ static bool __cdecl _SweepAbortedChore(_UnrealizedChore *_PChore, void *_PData); /// /// Performs task cleanup normally done at destruction time. /// /// /// An indication if the cleanup is exceptional and the collection should be left in a canceled state. /// /**/ bool _TaskCleanup(bool fExceptional); /// /// Called when the task collection is canceled via a cancellation token. /// /**/ static void __cdecl _CancelViaToken(_TaskCollection *pCollection); /// /// Tracks contexts that have stolen chores from this collection. This is storage for an internal list and lock. Note that this list is only /// used for detached schedule groups. /// /**/ _CONCRT_BUFFER _M_stealTracker[_SAFERWLIST_SIZE]; /// /// A count of active stealers for *CANCELLATION PURPOSES ONLY*. This is non-interlocked and guarded by the same lock as the /// stealers list on this task collection. /// /**/ long _M_activeStealersForCancellation; /// /// An indication of the exit code of the chore. Anything non-zero here indicates cancellation of one /// form or another. /// /**/ volatile long _M_exitCode; /// /// The status of the task collection. /// /**/ volatile long _M_executionStatus; /// /// An event on which to wait for stolen chores to complete. /// /**/ event _M_event; _TaskCollection * _M_pOriginalCollection; _TaskCollection * _M_pNextAlias; void * _M_pTaskExtension; int _M_taskCookies[2]; volatile long _M_flags; volatile long _M_chaining; DWORD _M_boundQueueId; int _M_stackPos; TaskProc _M_completionHandler; void * _M_pCompletionContext; }; /// /// The enum defines inlining scheduling policy for ppltasks. /// Scheduling a chore or a functor with _TaskInliningMode will give /// scheduler a hint on whether apply inline execution or not. /// /// /// As an optimization, we assigned an integer number to each option in the enum, /// which efectively stands for the maximal inlining depth (threshold) for current chore, /// and the scheduler will compare this threshold with current context's inlining depth to /// make inline decision. /// If the current context's inlining depth greater than this threshold, /// the chore will be scheduled on a new context, otherwise the chore will be scheduled inline. /// Minimal threshold 0 means do not inline; maximal threshold -1 (0xFFFFFFFF....) means always inline. /// 16 is a good default inlining threshold we figured out from experiment. /// enum _TaskInliningMode { // Disable inline scheduling _NoInline = 0, // Let runtime decide whether to do inline scheduling or not _DefaultAutoInline = 16, // Always do inline scheduling _ForceInline = -1, }; /// /// RAII wrapper used to maintain and limit ppltask maximum inline schedule depth. /// This class will keep a reference to the depth slot on current context. /// class _StackGuard { public: _StackGuard() : _Depth(_GetCurrentInlineDepth()) { // _Depth is the reference to the depth slot on context. ++_Depth; } ~_StackGuard() { // _Depth is the reference to the depth slot on context. --_Depth; } bool _ShouldInline(_TaskInliningMode _InliningMode) const { // As _TaskInliningMode is defined as inlining threshold, we can directly convert // it into size_t, and compare with current context inlining depth. return _Depth <= static_cast(_InliningMode); } private: size_t & _Depth; _StackGuard & operator =(const _StackGuard &); /// /// Return a reference to the ppltask inline schedule depth slot on current context /// The inline depth will be set to 0 when the context is first initialized, /// and the caller is responsible to maintain that depth. /// _CRTIMP static size_t & __cdecl _GetCurrentInlineDepth(); }; /// /// Async Task collections is a thin wrapper over task collection to cater to the execution of asynchronous /// chores (or tasks defined in ppltasks.h). Specifically, they manage their own lifetime by using reference /// counts. Scheduling a chore acquires a reference and on completion of its execution the reference is released. /// class _AsyncTaskCollection : public _RefCounterBase { public: /// /// Constructs a new task collection whose cancellation is governed by the specified cancellation token state. /// /// /// When this cancellation token is canceled, the task collection is canceled. /// /// /// Pointer to a new instance of _AsyncTaskCollection. /// _CRTIMP static _AsyncTaskCollection * __cdecl _NewCollection(_CancellationTokenState *_PTokenState); /// /// Schedule a chore with automatic inlining. The chore is pushed onto the associated workstealing queue, and /// will be executed in a LIFO order. The tasks scheduled into a _TaskCollection are scheduled into the current scheduler. /// Note that the specified chore can be scheduled only on a single task collection at a given time. Any attempt to schedule the same /// chore multiple times on one or more task collections will result in an invalid_multiple_scheduling exception. After the chore is /// guaranteed to have been executed (by calling the Wait method), it can be rescheduled to an arbitrary task collection. /// This schedule method will perform automatic inlining base on . /// /// /// The new unrealized chore need to be scheduled. The chore will be deleted after scheduling. /// /// /// The inlining scheduling policy for current chore. /// /// /// An indication of current chore status after scheduling. /// _TaskCollectionStatus _ScheduleWithAutoInline(_UnrealizedChore * _PChore, _TaskInliningMode _InliningMode) { _CONCRT_ASSERT(_PChore); _Reference(); if (_InliningMode == _NoInline) { _M_taskCollection._Schedule(_PChore); return _NotComplete; } else { _StackGuard _Guard; if (_Guard._ShouldInline(_InliningMode)) { return _M_taskCollection._RunAndWait(_PChore); } else { _M_taskCollection._Schedule(_PChore); return _NotComplete; } } } /// /// Cancels work on the task collection. /// void _Cancel() { _M_taskCollection._Cancel(); } /// /// A cancellation friendly wrapper with which to execute _PChore and then /// waits for all chores running in the _TaskCollection to finish (normally or abnormally). This method encapsulates /// all the running tasks in an exception handling block, and will re-throw any exceptions that occur in any of it tasks /// (if those exceptions occur on another thread, they are marshaled from that thread to the thread where the _TaskCollection /// was created, and re-thrown). After this function returns, the _TaskCollection cannot be used for scheduling further work. /// /// /// An _UnrealizedChore which when non-null will be called to invoke the chore in a cancellation friendly manner. /// /// /// An indication of the status of the wait. /// _TaskCollectionStatus _RunAndWait() { // Note that _Guard is NOT unused variable, the constructor and destructor will be called to maintain inline depth. _StackGuard _Guard; return _M_taskCollection._RunAndWait(); } private: void _NotificationHandler(); _CRTIMP virtual void _Destroy(); // Private constructor _AsyncTaskCollection(_CancellationTokenState *_PTokenState); __declspec(noinline) static void __cdecl _CompletionHandler(void * _PCompletionContext); private: // Underlying task collection where the chore is scheduled to run _TaskCollection _M_taskCollection; }; /// /// Internal maintainence structure for beacons. /// struct _Beacon_reference { volatile long _M_signals; }; typedef void (__cdecl * _UnobservedExceptionHandler)(void); _CRTIMP void __cdecl _SetUnobservedExceptionHandler(_UnobservedExceptionHandler); // Used to report unobserved task exceptions in ppltasks.h _CRTIMP void __cdecl _ReportUnobservedException(); /// /// A cancellation beacon is a flag which can be polled in an inlinable fashion using the is_signaled method in lieu of polling on /// the more expensive non inlinable is_current_task_group_canceling method. /// /// /// Cancellation beacons can be used only in the same way as structured_task_group and _StructuredTaskCollection. They are intended /// as stack based objects utilized in strictly nested RAII fashion. A beacon can *NOT* be passed to another thread or allocated on the /// heap. /// class _Cancellation_beacon { public: _CRTIMP _Cancellation_beacon(); _CRTIMP ~_Cancellation_beacon(); bool _Is_signaled() const { return (_M_pRef->_M_signals != 0); } // This method should only be called when the beacon is signaled. It confirms whether a cancellation is indeed happening and that the beacon // was not flagged due to a false positive race. If the cancellation is not confirmed, the beacon is lowered. _CRTIMP bool _Confirm_cancel(); void _Raise() { _InterlockedIncrement(&_M_pRef->_M_signals); } void _Lower() { _InterlockedDecrement(&_M_pRef->_M_signals); } private: _Beacon_reference *_M_pRef; }; // // Internal stub class. // class _TimerStub; // // Internal wrapper around timers in order to allow timer messaging blocks to share implementation with internal ConcRT runtime // timers. // class _Timer { protected: // Constructs a new timer. // // _Ms: The duration and period of the timer in milliseconds. // _FRepeating: An indication of whether the timer is repeating (periodic) or not. _CRTIMP _Timer(unsigned int _Ms, bool _FRepeating); // Destroys the timer. _CRTIMP virtual ~_Timer(); // Starts the timer. _CRTIMP void _Start(); // Stops the timer. _CRTIMP void _Stop(); private: friend class _TimerStub; // Called when the timer fires. virtual void _Fire() =0; // The actual timer HANDLE _M_hTimer; // The duration and period of the timer. unsigned int _M_ms; // Whether the timer is repeating (periodic by _M_ms) bool _M_fRepeating; }; // // Internal runtime structure that holds the trace flags and level for ETW events // provided by the Concurrent runtime. // struct _CONCRT_TRACE_INFO { volatile unsigned long EnableFlags; // Determines which class of events to log volatile unsigned char EnableLevel; // Determines the serverity of events to log void _EnableTrace(unsigned char level, unsigned long flags) { EnableFlags = flags; EnableLevel = level; } void _DisableTrace() { EnableLevel = 0; EnableFlags = 0; } bool _IsEnabled(unsigned char level, unsigned long flags) const { return ((level <= EnableLevel) && ((EnableFlags & flags) == flags)); } }; /// /// Retrieves a pointer to the internal trace flags and level information for /// the Concurrency runtime ETW provider. /// /**/ _CRTIMP const _CONCRT_TRACE_INFO * _GetConcRTTraceInfo(); /// /// Register ConcRT as an ETW Event Provider. /// /**/ void _RegisterConcRTEventTracing(); /// /// Unregister ConcRT as an ETW Event Provider. /// /**/ void _UnregisterConcRTEventTracing(); } // namespace details /// /// Enables tracing in the Concurrency Runtime. This function is deprecated because ETW tracing is now on by default. /// /// /// If tracing was correctly initiated, S_OK is returned; otherwise, E_NOT_STARTED is returned. /// /**/ __declspec(deprecated("Concurrency::EnableTracing is a deprecated function.")) _CRTIMP HRESULT __cdecl EnableTracing(); /// /// Disables tracing in the Concurrency Runtime. This function is deprecated because ETW tracing is unregistered by default. /// /// /// If tracing was correctly disabled, S_OK is returned. If tracing was not previously initiated, /// E_NOT_STARTED is returned /// /**/ __declspec(deprecated("Concurrency::DisableTracing is a deprecated function.")) _CRTIMP HRESULT __cdecl DisableTracing(); /// /// The types of events that can be traced using the tracing functionality offered by the Concurrency Runtime. /// /**/ enum ConcRT_EventType { /// /// An event type used for miscellaneous events. /// /**/ CONCRT_EVENT_GENERIC = 0, /// /// An event type that marks the beginning of a start/end event pair. /// /**/ CONCRT_EVENT_START = 1, /// /// An event type that marks the beginning of a start/end event pair. /// /**/ CONCRT_EVENT_END = 2, /// /// An event type that represents the act of a context blocking. /// /**/ CONCRT_EVENT_BLOCK = 3, /// /// An event type that represents the act of unblocking a context. /// /**/ CONCRT_EVENT_UNBLOCK = 4, /// /// An event type that represents the act of a context yielding. /// /**/ CONCRT_EVENT_YIELD = 5, /// /// An event type that represents the act of a context becoming idle. /// /**/ CONCRT_EVENT_IDLE = 6, /// /// An event type that represents the act of a attaching to a scheduler. /// /**/ CONCRT_EVENT_ATTACH = 7, /// /// An event type that represents the act of a detaching from a scheduler. /// /**/ CONCRT_EVENT_DETACH = 8, }; // Common trace header structure for all ConcRT diagnostic events // struct CONCRT_TRACE_EVENT_HEADER_COMMON // { // EVENT_TRACE_HEADER header; // DWORD VirtualProcessorID; // DWORD SchedulerID; // DWORD ContextID; // DWORD ScheduleGroupID; // }; /// /// The ETW provider GUID for the Concurrency Runtime. /// /**/ extern "C" const __declspec(selectany) GUID ConcRT_ProviderGuid = { 0xF7B697A3, 0x4DB5, 0x4d3b, { 0xBE, 0x71, 0xC4, 0xD2, 0x84, 0xE6, 0x59, 0x2F } }; // // GUIDS for events // /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are not more specifically described by another category. /// /// /// This category of events is not currently fired by the Concurrency Runtime. /// /**/ extern "C" const __declspec(selectany) GUID ConcRTEventGuid = { 0x72B14A7D, 0x704C, 0x423e, { 0x92, 0xF8, 0x7E, 0x6D, 0x64, 0xBC, 0xB9, 0x2A } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to scheduler activity. /// /// /// /**/ extern "C" const __declspec(selectany) GUID SchedulerEventGuid = { 0xE2091F8A, 0x1E0A, 0x4731, { 0x84, 0xA2, 0x0D, 0xD5, 0x7C, 0x8A, 0x52, 0x61 } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to schedule groups. /// /// /// This category of events is not currently fired by the Concurrency Runtime. /// /// /**/ extern "C" const __declspec(selectany) GUID ScheduleGroupEventGuid = { 0xE8A3BF1F, 0xA86B, 0x4390, { 0x9C, 0x60, 0x53, 0x90, 0xB9, 0x69, 0xD2, 0x2C } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to contexts. /// /// /**/ extern "C" const __declspec(selectany) GUID ContextEventGuid = { 0x5727A00F, 0x50BE, 0x4519, { 0x82, 0x56, 0xF7, 0x69, 0x98, 0x71, 0xFE, 0xCB } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to chores or tasks. /// /// /// This category of events is not currently fired by the Concurrency Runtime. /// /// /// /**/ extern "C" const __declspec(selectany) GUID ChoreEventGuid = { 0x7E854EC7, 0xCDC4, 0x405a, { 0xB5, 0xB2, 0xAA, 0xF7, 0xC9, 0xE7, 0xD4, 0x0C } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to virtual processors. /// /**/ extern "C" const __declspec(selectany) GUID VirtualProcessorEventGuid = { 0x2f27805f, 0x1676, 0x4ecc, { 0x96, 0xfa, 0x7e, 0xb0, 0x9d, 0x44, 0x30, 0x2f } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to locks. /// /// /// This category of events is not currently fired by the Concurrency Runtime. /// /// /// /**/ extern "C" const __declspec(selectany) GUID LockEventGuid = { 0x79A60DC6, 0x5FC8, 0x4952, { 0xA4, 0x1C, 0x11, 0x63, 0xAE, 0xEC, 0x5E, 0xB8 } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to the resource manager. /// /// /// This category of events is not currently fired by the Concurrency Runtime. /// /// /**/ extern "C" const __declspec(selectany) GUID ResourceManagerEventGuid = { 0x2718D25B, 0x5BF5, 0x4479, { 0x8E, 0x88, 0xBA, 0xBC, 0x64, 0xBD, 0xBF, 0xCA } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to usage of the parallel_invoke /// function. /// /// /**/ extern "C" const __declspec(selectany) GUID PPLParallelInvokeEventGuid = { 0xd1b5b133, 0xec3d, 0x49f4, { 0x98, 0xa3, 0x46, 0x4d, 0x1a, 0x9e, 0x46, 0x82 } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to usage of the parallel_for /// function. /// /// /**/ extern "C" const __declspec(selectany) GUID PPLParallelForEventGuid = { 0x31c8da6b, 0x6165, 0x4042, { 0x8b, 0x92, 0x94, 0x9e, 0x31, 0x5f, 0x4d, 0x84 } }; /// /// A category GUID describing ETW events fired by the Concurrency Runtime that are directly related to usage of the parallel_for_each /// function. /// /// /**/ extern "C" const __declspec(selectany) GUID PPLParallelForeachEventGuid = { 0x5cb7d785, 0x9d66, 0x465d, { 0xba, 0xe1, 0x46, 0x11, 0x6, 0x1b, 0x54, 0x34 } }; /// /// A category GUID ({B9B5B78C-0713-4898-A21A-C67949DCED07}) describing ETW events fired by the Agents library in the Concurrency Runtime. /// /**/ extern "C" const __declspec(selectany) GUID AgentEventGuid = {0xb9b5b78c, 0x713, 0x4898, { 0xa2, 0x1a, 0xc6, 0x79, 0x49, 0xdc, 0xed, 0x7 } }; // Trace an event signaling a parallel function _CRTIMP void __cdecl _Trace_ppl_function(const GUID& _Guid, unsigned char _Level, ConcRT_EventType _Type); /// /// Trace flags for the event types /// /**/ enum Concrt_TraceFlags { SchedulerEventFlag = 0x1, ContextEventFlag = 0x2, VirtualProcessorEventFlag = 0x4, ResourceManagerEventFlag = 0x8, PPLEventFlag = 0x10, AgentEventFlag = 0x20, AllEventsFlag = 0xFFFFFFFF }; /// /// The types of events that can be traced using the tracing functionality offered by the Agents Library /// /**/ enum Agents_EventType { /// /// An event type that represents the creation of an object /// /**/ AGENTS_EVENT_CREATE = 0, /// /// An event type that represents the initiation of some processing /// /**/ AGENTS_EVENT_START = 1, /// /// An event type that represents the conclusion of some processing /// /**/ AGENTS_EVENT_END = 2, /// /// An event type that represents the deletion of an object /// /**/ AGENTS_EVENT_DESTROY = 3, /// /// An event type that represents the scheduling of a process /// /**/ AGENTS_EVENT_SCHEDULE = 4, /// /// An event type that represents the linking of message blocks /// /**/ AGENTS_EVENT_LINK = 5, /// /// An event type that represents the unlinking of message blocks /// /**/ AGENTS_EVENT_UNLINK = 6, /// /// An event type that represents the name for an object /// /**/ AGENTS_EVENT_NAME = 7 }; // // Common trace payload for agents // // struct AGENTS_TRACE_PAYLOAD // { // // Identifier of the agent or message block that is emitting the event // __int64 AgentId1; // union // { // // The identifier of a target block for link/unlink event // __int64 AgentId2; // // // Count of messages processed for the end event // long Count; // // // Name of this agent for the purposes of the ETW trace // wchar_t Name[32]; // }; // }; // Emit a trace event specific to the agents library of the given type and payload _CRTIMP void __cdecl _Trace_agents(Agents_EventType _Type, __int64 agentId, ...); } namespace concurrency = Concurrency; #pragma pop_macro("new") #pragma pack(pop)