ThreadEmulation.cxx 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
  2. // ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
  3. // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
  4. // PARTICULAR PURPOSE.
  5. //
  6. // Copyright (c) Microsoft Corporation. All rights reserved.
  7. #include "ThreadEmulation.h"
  8. #include <assert.h>
  9. #include <vector>
  10. #include <set>
  11. #include <map>
  12. #include <mutex>
  13. using namespace std;
  14. using namespace Platform;
  15. using namespace Windows::Foundation;
  16. using namespace Windows::System::Threading;
  17. namespace ThreadEmulation
  18. {
  19. // Stored data for CREATE_SUSPENDED and ResumeThread.
  20. struct PendingThreadInfo {
  21. LPTHREAD_START_ROUTINE lpStartAddress;
  22. LPVOID lpParameter;
  23. HANDLE completionEvent;
  24. int nPriority;
  25. };
  26. static map<HANDLE, PendingThreadInfo> pendingThreads;
  27. static mutex pendingThreadsLock;
  28. // Thread local storage.
  29. typedef vector<void*> ThreadLocalData;
  30. static __declspec(thread) ThreadLocalData* currentThreadData = nullptr;
  31. static set<ThreadLocalData*> allThreadData;
  32. static DWORD nextTlsIndex = 0;
  33. static vector<DWORD> freeTlsIndices;
  34. static mutex tlsAllocationLock;
  35. // Converts a Win32 thread priority to WinRT format.
  36. static WorkItemPriority GetWorkItemPriority(int nPriority)
  37. {
  38. if (nPriority < 0) {
  39. return WorkItemPriority::Low;
  40. }
  41. else if (nPriority > 0) {
  42. return WorkItemPriority::High;
  43. }
  44. else {
  45. return WorkItemPriority::Normal;
  46. }
  47. }
  48. // Helper shared between CreateThread and ResumeThread.
  49. static void StartThread(LPTHREAD_START_ROUTINE lpStartAddress, LPVOID lpParameter, HANDLE completionEvent, int nPriority)
  50. {
  51. auto workItemHandler = ref new WorkItemHandler([=](IAsyncAction^) {
  52. // Run the user callback.
  53. try {
  54. lpStartAddress(lpParameter);
  55. }
  56. catch (...) { }
  57. // Clean up any TLS allocations made by this thread.
  58. TlsShutdown();
  59. // Signal that the thread has completed.
  60. SetEvent(completionEvent);
  61. CloseHandle(completionEvent);
  62. }, CallbackContext::Any);
  63. ThreadPool::RunAsync(workItemHandler, GetWorkItemPriority(nPriority), WorkItemOptions::TimeSliced);
  64. }
  65. _Use_decl_annotations_ HANDLE WINAPI CreateThread(LPSECURITY_ATTRIBUTES unusedThreadAttributes, SIZE_T unusedStackSize, LPTHREAD_START_ROUTINE lpStartAddress, LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD unusedThreadId)
  66. {
  67. // Validate parameters.
  68. assert(unusedThreadAttributes == nullptr);
  69. assert(unusedStackSize == 0);
  70. assert((dwCreationFlags & ~CREATE_SUSPENDED) == 0);
  71. assert(unusedThreadId == nullptr);
  72. // Create a handle that will be signalled when the thread has completed.
  73. HANDLE threadHandle = CreateEventEx(nullptr, nullptr, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
  74. if (!threadHandle) {
  75. return nullptr;
  76. }
  77. // Make a copy of the handle for internal use. This is necessary because
  78. // the caller is responsible for closing the handle returned by CreateThread,
  79. // and they may do that before or after the thread has finished running.
  80. HANDLE completionEvent;
  81. if (!DuplicateHandle(GetCurrentProcess(), threadHandle, GetCurrentProcess(), &completionEvent, 0, false, DUPLICATE_SAME_ACCESS)) {
  82. CloseHandle(threadHandle);
  83. return nullptr;
  84. }
  85. try {
  86. if (dwCreationFlags & CREATE_SUSPENDED) {
  87. // Store info about a suspended thread.
  88. PendingThreadInfo info;
  89. info.lpStartAddress = lpStartAddress;
  90. info.lpParameter = lpParameter;
  91. info.completionEvent = completionEvent;
  92. info.nPriority = 0;
  93. lock_guard<mutex> lock(pendingThreadsLock);
  94. pendingThreads[threadHandle] = info;
  95. }
  96. else {
  97. // Start the thread immediately.
  98. StartThread(lpStartAddress, lpParameter, completionEvent, 0);
  99. }
  100. return threadHandle;
  101. }
  102. catch (...) {
  103. // Clean up if thread creation fails.
  104. CloseHandle(threadHandle);
  105. CloseHandle(completionEvent);
  106. return nullptr;
  107. }
  108. }
  109. _Use_decl_annotations_ DWORD WINAPI ResumeThread(HANDLE hThread)
  110. {
  111. lock_guard<mutex> lock(pendingThreadsLock);
  112. // Look up the requested thread.
  113. auto threadInfo = pendingThreads.find(hThread);
  114. if (threadInfo == pendingThreads.end()) {
  115. // Can only resume threads while they are in CREATE_SUSPENDED state.
  116. assert(false);
  117. return (DWORD)-1;
  118. }
  119. // Start the thread.
  120. try {
  121. PendingThreadInfo& info = threadInfo->second;
  122. StartThread(info.lpStartAddress, info.lpParameter, info.completionEvent, info.nPriority);
  123. }
  124. catch (...) {
  125. return (DWORD)-1;
  126. }
  127. // Remove this thread from the pending list.
  128. pendingThreads.erase(threadInfo);
  129. return 0;
  130. }
  131. _Use_decl_annotations_ BOOL WINAPI SetThreadPriority(HANDLE hThread, int nPriority)
  132. {
  133. lock_guard<mutex> lock(pendingThreadsLock);
  134. // Look up the requested thread.
  135. auto threadInfo = pendingThreads.find(hThread);
  136. if (threadInfo == pendingThreads.end()) {
  137. // Can only set priority on threads while they are in CREATE_SUSPENDED state.
  138. return false;
  139. }
  140. // Store the new priority.
  141. threadInfo->second.nPriority = nPriority;
  142. return true;
  143. }
  144. _Use_decl_annotations_ VOID WINAPI Sleep(DWORD dwMilliseconds)
  145. {
  146. static HANDLE singletonEvent = nullptr;
  147. HANDLE sleepEvent = singletonEvent;
  148. // Demand create the event.
  149. if (!sleepEvent) {
  150. sleepEvent = CreateEventEx(nullptr, nullptr, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
  151. if (!sleepEvent) {
  152. return;
  153. }
  154. HANDLE previousEvent = InterlockedCompareExchangePointerRelease(&singletonEvent, sleepEvent, nullptr);
  155. if (previousEvent) {
  156. // Back out if multiple threads try to demand create at the same time.
  157. CloseHandle(sleepEvent);
  158. sleepEvent = previousEvent;
  159. }
  160. }
  161. // Emulate sleep by waiting with timeout on an event that is never signalled.
  162. WaitForSingleObjectEx(sleepEvent, dwMilliseconds, false);
  163. }
  164. DWORD WINAPI TlsAlloc()
  165. {
  166. lock_guard<mutex> lock(tlsAllocationLock);
  167. // Can we reuse a previously freed TLS slot?
  168. if (!freeTlsIndices.empty()) {
  169. DWORD result = freeTlsIndices.back();
  170. freeTlsIndices.pop_back();
  171. return result;
  172. }
  173. // Allocate a new TLS slot.
  174. return nextTlsIndex++;
  175. }
  176. _Use_decl_annotations_ BOOL WINAPI TlsFree(DWORD dwTlsIndex)
  177. {
  178. lock_guard<mutex> lock(tlsAllocationLock);
  179. assert(dwTlsIndex < nextTlsIndex);
  180. assert(find(freeTlsIndices.begin(), freeTlsIndices.end(), dwTlsIndex) == freeTlsIndices.end());
  181. // Store this slot for reuse by TlsAlloc.
  182. try {
  183. freeTlsIndices.push_back(dwTlsIndex);
  184. }
  185. catch (...) {
  186. return false;
  187. }
  188. // Zero the value for all threads that might be using this now freed slot.
  189. for each (auto threadData in allThreadData) {
  190. if (threadData->size() > dwTlsIndex) {
  191. threadData->at(dwTlsIndex) = nullptr;
  192. }
  193. }
  194. return true;
  195. }
  196. _Use_decl_annotations_ LPVOID WINAPI TlsGetValue(DWORD dwTlsIndex)
  197. {
  198. ThreadLocalData* threadData = currentThreadData;
  199. if (threadData && threadData->size() > dwTlsIndex) {
  200. // Return the value of an allocated TLS slot.
  201. return threadData->at(dwTlsIndex);
  202. }
  203. else {
  204. // Default value for unallocated slots.
  205. return nullptr;
  206. }
  207. }
  208. _Use_decl_annotations_ BOOL WINAPI TlsSetValue(DWORD dwTlsIndex, LPVOID lpTlsValue)
  209. {
  210. ThreadLocalData* threadData = currentThreadData;
  211. if (!threadData) {
  212. // First time allocation of TLS data for this thread.
  213. try {
  214. threadData = new ThreadLocalData(dwTlsIndex + 1, nullptr);
  215. lock_guard<mutex> lock(tlsAllocationLock);
  216. allThreadData.insert(threadData);
  217. currentThreadData = threadData;
  218. }
  219. catch (...) {
  220. if (threadData) {
  221. delete threadData;
  222. }
  223. return false;
  224. }
  225. }
  226. else if (threadData->size() <= dwTlsIndex) {
  227. // This thread already has a TLS data block, but it must be expanded to fit the specified slot.
  228. try {
  229. lock_guard<mutex> lock(tlsAllocationLock);
  230. threadData->resize(dwTlsIndex + 1, nullptr);
  231. }
  232. catch (...) {
  233. return false;
  234. }
  235. }
  236. // Store the new value for this slot.
  237. threadData->at(dwTlsIndex) = lpTlsValue;
  238. return true;
  239. }
  240. // Called at thread exit to clean up TLS allocations.
  241. void WINAPI TlsShutdown()
  242. {
  243. ThreadLocalData* threadData = currentThreadData;
  244. if (threadData) {
  245. {
  246. lock_guard<mutex> lock(tlsAllocationLock);
  247. allThreadData.erase(threadData);
  248. }
  249. currentThreadData = nullptr;
  250. delete threadData;
  251. }
  252. }
  253. }