12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #define TIME_QUANTUM_USEC (10 * 1000)
17 #define RB_CONDATTR_CLOCK_MONOTONIC 1
21 #define native_thread_yield() Sleep(0)
22 #define remove_signal_thread_list(th)
24 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
31 w32_error(
const char *
func)
35 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
36 FORMAT_MESSAGE_FROM_SYSTEM |
37 FORMAT_MESSAGE_IGNORE_INSERTS,
40 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
41 (LPTSTR) & lpMsgBuf, 0,
NULL) == 0)
42 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
43 FORMAT_MESSAGE_FROM_SYSTEM |
44 FORMAT_MESSAGE_IGNORE_INSERTS,
47 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
48 (LPTSTR) & lpMsgBuf, 0,
NULL);
49 rb_bug(
"%s: %s", func, (
char*)lpMsgBuf);
53 w32_mutex_lock(HANDLE lock)
58 result = w32_wait_events(&lock, 1, INFINITE, 0);
64 case WAIT_OBJECT_0 + 1:
73 rb_bug(
"win32_mutex_lock: WAIT_ABANDONED");
76 rb_bug(
"win32_mutex_lock: unknown result (%ld)", result);
84 w32_mutex_create(
void)
88 w32_error(
"native_mutex_initialize");
99 if (GVL_DEBUG) fprintf(stderr,
"gvl acquire (%p): acquire\n", th);
112 native_thread_yield();
120 rb_bug(
"gvl_atfork() is called on win32");
126 if (GVL_DEBUG) fprintf(stderr,
"gvl init\n");
127 vm->
gvl.
lock = w32_mutex_create();
133 if (GVL_DEBUG) fprintf(stderr,
"gvl destroy\n");
138 ruby_thread_from_native(
void)
140 return TlsGetValue(ruby_native_thread_key);
146 return TlsSetValue(ruby_native_thread_key, th);
154 ruby_native_thread_key = TlsAlloc();
155 ruby_thread_set_native(th);
156 DuplicateHandle(GetCurrentProcess(),
163 thread_debug(
"initial thread (th: %p, thid: %p, event: %p)\n",
169 w32_set_event(HANDLE handle)
171 if (SetEvent(handle) == 0) {
172 w32_error(
"w32_set_event");
177 w32_reset_event(HANDLE handle)
179 if (ResetEvent(handle) == 0) {
180 w32_error(
"w32_reset_event");
187 HANDLE *targets = events;
191 thread_debug(
" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
192 events, count, timeout, th);
194 gvl_acquire(th->
vm, th);
196 w32_reset_event(intr);
201 targets =
ALLOCA_N(HANDLE, count + 1);
202 memcpy(targets, events,
sizeof(HANDLE) * count);
204 targets[count++] = intr;
205 thread_debug(
" * handle: %p (count: %d, intr)\n", intr, count);
210 thread_debug(
" WaitForMultipleObjects start (count: %d)\n", count);
211 ret = WaitForMultipleObjects(count, targets,
FALSE, timeout);
212 thread_debug(
" WaitForMultipleObjects end (ret: %lu)\n", ret);
214 if (ret == (
DWORD)(WAIT_OBJECT_0 + count - 1) && th) {
220 for (i = 0; i <
count; i++) {
222 GetHandleInformation(targets[i], &dmy) ?
"OK" :
"NG");
228 static void ubf_handle(
void *
ptr);
229 #define ubf_select ubf_handle
234 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
243 ubf_handle, ruby_thread_from_native(),
FALSE);
248 w32_close_handle(HANDLE handle)
250 if (CloseHandle(handle) == 0) {
251 w32_error(
"w32_close_handle");
256 w32_resume_thread(HANDLE handle)
258 if (ResumeThread(handle) == (
DWORD)-1) {
259 w32_error(
"w32_resume_thread");
264 #define HAVE__BEGINTHREADEX 1
266 #undef HAVE__BEGINTHREADEX
269 #ifdef HAVE__BEGINTHREADEX
270 #define start_thread (HANDLE)_beginthreadex
271 #define thread_errno errno
272 typedef unsigned long (_stdcall *w32_thread_start_func)(
void*);
274 #define start_thread CreateThread
275 #define thread_errno rb_w32_map_errno(GetLastError())
276 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
280 w32_create_thread(
DWORD stack_size, w32_thread_start_func func,
void *
val)
282 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
288 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
297 ubf_handle, ruby_thread_from_native(),
FALSE);
327 ret = w32_wait_events(0, 0, msec, th);
343 w32_mutex_lock(lock->mutex);
345 EnterCriticalSection(&lock->crit);
355 return ReleaseMutex(lock->mutex);
357 LeaveCriticalSection(&lock->crit);
367 thread_debug(
"native_mutex_trylock: %p\n", lock->mutex);
368 result = w32_wait_events(&lock->mutex, 1, 1, 0);
369 thread_debug(
"native_mutex_trylock result: %d\n", result);
386 lock->mutex = w32_mutex_create();
389 InitializeCriticalSection(&lock->crit);
397 w32_close_handle(lock->mutex);
399 DeleteCriticalSection(&lock->crit);
403 struct cond_event_entry {
404 struct cond_event_entry* next;
405 struct cond_event_entry* prev;
413 struct cond_event_entry *
e = cond->
next;
414 struct cond_event_entry *
head = (
struct cond_event_entry*)cond;
417 struct cond_event_entry *next = e->next;
418 struct cond_event_entry *prev = e->prev;
422 e->next = e->prev =
e;
432 struct cond_event_entry *e = cond->
next;
433 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
436 struct cond_event_entry *next = e->next;
437 struct cond_event_entry *prev = e->prev;
443 e->next = e->prev =
e;
454 struct cond_event_entry entry;
455 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
457 entry.event = CreateEvent(0,
FALSE,
FALSE, 0);
461 entry.prev = head->prev;
462 head->prev->next = &entry;
465 native_mutex_unlock(mutex);
467 r = WaitForSingleObject(entry.event, msec);
468 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
469 rb_bug(
"native_cond_wait: WaitForSingleObject returns %lu", r);
472 native_mutex_lock(mutex);
474 entry.prev->next = entry.next;
475 entry.next->prev = entry.prev;
477 w32_close_handle(entry.event);
478 return (r == WAIT_OBJECT_0) ? 0 :
ETIMEDOUT;
484 return native_cond_timedwait_ms(cond, mutex, INFINITE);
488 abs_timespec_to_timeout_ms(
struct timespec *ts)
506 unsigned long timeout_ms;
508 timeout_ms = abs_timespec_to_timeout_ms(ts);
512 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
515 #if SIZEOF_TIME_T == SIZEOF_LONG
517 #elif SIZEOF_TIME_T == SIZEOF_INT
519 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
522 # error cannot find integer type which size is same as time_t.
525 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
539 now.tv_nsec = tv.
tv_usec * 1000;
541 timeout.tv_sec = now.tv_sec;
542 timeout.tv_nsec = now.tv_nsec;
543 timeout.tv_sec += timeout_rel.tv_sec;
544 timeout.tv_nsec += timeout_rel.tv_nsec;
546 if (timeout.tv_nsec >= 1000*1000*1000) {
548 timeout.tv_nsec -= 1000*1000*1000;
551 if (timeout.tv_sec < now.tv_sec)
560 cond->
next = (
struct cond_event_entry *)cond;
561 cond->
prev = (
struct cond_event_entry *)cond;
575 #define CHECK_ERR(expr) \
576 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
581 MEMORY_BASIC_INFORMATION mi;
585 CHECK_ERR(VirtualQuery(&mi, &mi,
sizeof(mi)));
586 base = mi.AllocationBase;
587 end = mi.BaseAddress;
588 end += mi.RegionSize;
591 if (space > 1024*1024) space = 1024*1024;
596 #ifndef InterlockedExchangePointer
597 #define InterlockedExchangePointer(t, v) \
598 (void *)InterlockedExchange((long *)(t), (long)(v))
605 w32_close_handle(intr);
608 static unsigned long _stdcall
609 thread_start_func_1(
void *th_ptr)
612 volatile HANDLE thread_id = th->
thread_id;
614 native_thread_init_stack(th);
618 thread_debug(
"thread created (th: %p, thid: %p, event: %p)\n", th,
623 w32_close_handle(thread_id);
631 size_t stack_size = 4 * 1024;
632 th->
thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
650 native_thread_join(HANDLE th)
652 w32_wait_events(&th, 1, INFINITE, 0);
655 #if USE_NATIVE_THREAD_PRIORITY
662 priority = THREAD_PRIORITY_ABOVE_NORMAL;
665 priority = THREAD_PRIORITY_BELOW_NORMAL;
668 priority = THREAD_PRIORITY_NORMAL;
671 SetThreadPriority(th->
thread_id, priority);
701 return w32_wait_events(0, 0, 0, th);
705 ubf_handle(
void *
ptr)
713 static HANDLE timer_thread_id = 0;
714 static HANDLE timer_thread_lock;
716 static unsigned long _stdcall
717 timer_thread_func(
void *
dummy)
720 while (WaitForSingleObject(timer_thread_lock, TIME_QUANTUM_USEC/1000) ==
735 rb_thread_create_timer_thread(
void)
737 if (timer_thread_id == 0) {
738 if (!timer_thread_lock) {
739 timer_thread_lock = CreateEvent(0,
TRUE,
FALSE, 0);
741 timer_thread_id = w32_create_thread(1024 + (
THREAD_DEBUG ? BUFSIZ : 0),
742 timer_thread_func, 0);
743 w32_resume_thread(timer_thread_id);
748 native_stop_timer_thread(
int close_anyway)
752 SetEvent(timer_thread_lock);
753 native_thread_join(timer_thread_id);
754 CloseHandle(timer_thread_lock);
755 timer_thread_lock = 0;
761 native_reset_timer_thread(
void)
763 if (timer_thread_id) {
764 CloseHandle(timer_thread_id);
770 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
775 #if defined(__MINGW32__)
777 rb_w32_stack_overflow_handler(
struct _EXCEPTION_POINTERS *exception)
779 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
783 return EXCEPTION_CONTINUE_SEARCH;
787 #ifdef RUBY_ALLOCA_CHKSTK
789 ruby_alloca_chkstk(
size_t len,
void *sp)
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
int rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
rb_thread_lock_t interrupt_lock
size_t ruby_stack_length(VALUE **)
pthread_mutex_t rb_thread_lock_t
SSL_METHOD *(* func)(void)
rb_unblock_function_t * func
static volatile int system_working
unsigned long unsigned_time_t
WINBASEAPI BOOL WINAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection)
void rb_exc_raise(VALUE mesg)
void rb_thread_wakeup_timer_thread(void)
int WINAPI rb_w32_Sleep(unsigned long msec)
int rb_w32_sleep(unsigned long msec)
struct cond_event_entry * next
int rb_w32_select_with_thread(int nfds, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *timeout, void *th)
VALUE * machine_stack_start
#define ALLOCA_N(type, n)
#define GVL_UNLOCK_BEGIN()
int rb_w32_time_subtract(struct timeval *rest, const struct timeval *wait)
struct cond_event_entry * prev
#define rb_thread_raised_set(th, f)
void ruby_init_stack(volatile VALUE *)
static void timer_thread_function(void *)
int rb_reserved_fd_p(int fd)
void rb_sys_fail(const char *mesg)
#define rb_fd_resize(n, f)
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
native_thread_data_t native_thread_data
#define BLOCKING_REGION(func, arg)
#define rb_thread_raised_p(th, f)
#define RUBY_VM_INTERRUPTED(th)
void Init_native_thread(void)
size_t machine_stack_maxsize
static rb_thread_t * GET_THREAD(void)
int rb_w32_check_interrupt(void *)