Ruby  2.1.10p492(2016-04-01revision54464)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: nagachika $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /*
46  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
47  * 2.15 or later and set _FORTIFY_SOURCE > 0.
48  * However, the implementation is wrong. Even though Linux's select(2)
49  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
50  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
51  * it doesn't work correctly and makes program abort. Therefore we need to
52  * disable FORTY_SOURCE until glibc fixes it.
53  */
54 #undef _FORTIFY_SOURCE
55 #undef __USE_FORTIFY_LEVEL
56 #define __USE_FORTIFY_LEVEL 0
57 
58 /* for model 2 */
59 
60 #include "eval_intern.h"
61 #include "gc.h"
62 #include "timev.h"
63 #include "ruby/io.h"
64 #include "ruby/thread.h"
65 #include "internal.h"
66 
67 #ifndef USE_NATIVE_THREAD_PRIORITY
68 #define USE_NATIVE_THREAD_PRIORITY 0
69 #define RUBY_THREAD_PRIORITY_MAX 3
70 #define RUBY_THREAD_PRIORITY_MIN -3
71 #endif
72 
73 #ifndef THREAD_DEBUG
74 #define THREAD_DEBUG 0
75 #endif
76 
79 
83 static ID id_locals;
84 
85 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
86 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
87 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
88 static double timeofday(void);
89 static int rb_threadptr_dead(rb_thread_t *th);
90 static void rb_check_deadlock(rb_vm_t *vm);
92 
93 #define eKillSignal INT2FIX(0)
94 #define eTerminateSignal INT2FIX(1)
95 static volatile int system_working = 1;
96 
97 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
98 
99 inline static void
101 {
102  st_delete(table, &key, 0);
103 }
104 
105 /********************************************************************************/
106 
107 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
108 
112 };
113 
115  struct rb_unblock_callback *old, int fail_if_interrupted);
116 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
117 
118 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
119  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
120 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
121 
122 #ifdef __ia64
123 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
124  do{(th)->machine.register_stack_end = rb_ia64_bsp();}while(0)
125 #else
126 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
127 #endif
128 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
129  do { \
130  FLUSH_REGISTER_WINDOWS; \
131  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
132  setjmp((th)->machine.regs); \
133  SET_MACHINE_STACK_END(&(th)->machine.stack_end); \
134  } while (0)
135 
136 #define GVL_UNLOCK_BEGIN() do { \
137  rb_thread_t *_th_stored = GET_THREAD(); \
138  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
139  gvl_release(_th_stored->vm);
140 
141 #define GVL_UNLOCK_END() \
142  gvl_acquire(_th_stored->vm, _th_stored); \
143  rb_thread_set_current(_th_stored); \
144 } while(0)
145 
146 #ifdef __GNUC__
147 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
148 #else
149 #define only_if_constant(expr, notconst) notconst
150 #endif
151 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
152  rb_thread_t *__th = GET_THREAD(); \
153  struct rb_blocking_region_buffer __region; \
154  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
155  /* always return true unless fail_if_interrupted */ \
156  !only_if_constant(fail_if_interrupted, TRUE)) { \
157  exec; \
158  blocking_region_end(__th, &__region); \
159  }; \
160 } while(0)
161 
162 #if THREAD_DEBUG
163 #ifdef HAVE_VA_ARGS_MACRO
164 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
165 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
166 #define POSITION_FORMAT "%s:%d:"
167 #define POSITION_ARGS ,file, line
168 #else
169 void rb_thread_debug(const char *fmt, ...);
170 #define thread_debug rb_thread_debug
171 #define POSITION_FORMAT
172 #define POSITION_ARGS
173 #endif
174 
175 # if THREAD_DEBUG < 0
176 static int rb_thread_debug_enabled;
177 
178 /*
179  * call-seq:
180  * Thread.DEBUG -> num
181  *
182  * Returns the thread debug level. Available only if compiled with
183  * THREAD_DEBUG=-1.
184  */
185 
186 static VALUE
187 rb_thread_s_debug(void)
188 {
189  return INT2NUM(rb_thread_debug_enabled);
190 }
191 
192 /*
193  * call-seq:
194  * Thread.DEBUG = num
195  *
196  * Sets the thread debug level. Available only if compiled with
197  * THREAD_DEBUG=-1.
198  */
199 
200 static VALUE
201 rb_thread_s_debug_set(VALUE self, VALUE val)
202 {
203  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
204  return val;
205 }
206 # else
207 # define rb_thread_debug_enabled THREAD_DEBUG
208 # endif
209 #else
210 #define thread_debug if(0)printf
211 #endif
212 
213 #ifndef __ia64
214 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
215 #endif
216 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
217  VALUE *register_stack_start));
218 static void timer_thread_function(void *);
219 
220 #if defined(_WIN32)
221 #include "thread_win32.c"
222 
223 #define DEBUG_OUT() \
224  WaitForSingleObject(&debug_mutex, INFINITE); \
225  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
226  fflush(stdout); \
227  ReleaseMutex(&debug_mutex);
228 
229 #elif defined(HAVE_PTHREAD_H)
230 #include "thread_pthread.c"
231 
232 #define DEBUG_OUT() \
233  pthread_mutex_lock(&debug_mutex); \
234  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
235  fflush(stdout); \
236  pthread_mutex_unlock(&debug_mutex);
237 
238 #else
239 #error "unsupported thread type"
240 #endif
241 
242 #if THREAD_DEBUG
243 static int debug_mutex_initialized = 1;
244 static rb_nativethread_lock_t debug_mutex;
245 
246 void
247 rb_thread_debug(
248 #ifdef HAVE_VA_ARGS_MACRO
249  const char *file, int line,
250 #endif
251  const char *fmt, ...)
252 {
253  va_list args;
254  char buf[BUFSIZ];
255 
256  if (!rb_thread_debug_enabled) return;
257 
258  if (debug_mutex_initialized == 1) {
259  debug_mutex_initialized = 0;
260  native_mutex_initialize(&debug_mutex);
261  }
262 
263  va_start(args, fmt);
264  vsnprintf(buf, BUFSIZ, fmt, args);
265  va_end(args);
266 
267  DEBUG_OUT();
268 }
269 #endif
270 
271 void
273 {
274  gvl_release(vm);
275  gvl_destroy(vm);
276  native_mutex_destroy(&vm->thread_destruct_lock);
277 }
278 
279 void
281 {
282  native_mutex_initialize(lock);
283 }
284 
285 void
287 {
288  native_mutex_destroy(lock);
289 }
290 
291 void
293 {
294  native_mutex_lock(lock);
295 }
296 
297 void
299 {
300  native_mutex_unlock(lock);
301 }
302 
303 static int
305  struct rb_unblock_callback *old, int fail_if_interrupted)
306 {
307  check_ints:
308  if (fail_if_interrupted) {
309  if (RUBY_VM_INTERRUPTED_ANY(th)) {
310  return FALSE;
311  }
312  }
313  else {
314  RUBY_VM_CHECK_INTS(th);
315  }
316 
317  native_mutex_lock(&th->interrupt_lock);
318  if (RUBY_VM_INTERRUPTED_ANY(th)) {
319  native_mutex_unlock(&th->interrupt_lock);
320  goto check_ints;
321  }
322  else {
323  if (old) *old = th->unblock;
324  th->unblock.func = func;
325  th->unblock.arg = arg;
326  }
327  native_mutex_unlock(&th->interrupt_lock);
328 
329  return TRUE;
330 }
331 
332 static void
334 {
335  native_mutex_lock(&th->interrupt_lock);
336  th->unblock = *old;
337  native_mutex_unlock(&th->interrupt_lock);
338 }
339 
340 static void
342 {
343  native_mutex_lock(&th->interrupt_lock);
344  if (trap)
346  else
348  if (th->unblock.func) {
349  (th->unblock.func)(th->unblock.arg);
350  }
351  else {
352  /* none */
353  }
354  native_cond_signal(&th->interrupt_cond);
355  native_mutex_unlock(&th->interrupt_lock);
356 }
357 
358 void
360 {
362 }
363 
364 void
366 {
368 }
369 
370 static int
372 {
373  VALUE thval = key;
374  rb_thread_t *th;
375  GetThreadPtr(thval, th);
376 
377  if (th != main_thread) {
378  thread_debug("terminate_i: %p\n", (void *)th);
381  }
382  else {
383  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
384  }
385  return ST_CONTINUE;
386 }
387 
388 typedef struct rb_mutex_struct
389 {
392  struct rb_thread_struct volatile *th;
396 } rb_mutex_t;
397 
398 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
401 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
402 
403 void
405 {
406  const char *err;
407  rb_mutex_t *mutex;
408  rb_mutex_t *mutexes = th->keeping_mutexes;
409 
410  while (mutexes) {
411  mutex = mutexes;
412  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
413  mutexes); */
414  mutexes = mutex->next_mutex;
415  err = rb_mutex_unlock_th(mutex, th);
416  if (err) rb_bug("invalid keeping_mutexes: %s", err);
417  }
418 }
419 
420 void
422 {
423  rb_thread_t *th = GET_THREAD(); /* main thread */
424  rb_vm_t *vm = th->vm;
425 
426  if (vm->main_thread != th) {
427  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
428  (void *)vm->main_thread, (void *)th);
429  }
430 
431  /* unlock all locking mutexes */
433 
434  retry:
435  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
437 
438  while (!rb_thread_alone()) {
439  int state;
440 
441  TH_PUSH_TAG(th);
442  if ((state = TH_EXEC_TAG()) == 0) {
443  /*
444  * Thread exiting routine in thread_start_func_2 notify
445  * me when the last sub-thread exit.
446  */
447  native_sleep(th, 0);
449  }
450  TH_POP_TAG();
451 
452  /*
453  * When caught an exception (e.g. Ctrl+C), let's broadcast
454  * kill request again to ensure killing all threads even
455  * if they are blocked on sleep, mutex, etc.
456  */
457  if (state) {
458  goto retry;
459  }
460  }
461 }
462 
463 static void
465 {
466  rb_thread_t *th = th_ptr;
469 #ifdef __ia64
470  th->machine.register_stack_start = th->machine.register_stack_end = 0;
471 #endif
472 }
473 
474 static void
475 thread_cleanup_func(void *th_ptr, int atfork)
476 {
477  rb_thread_t *th = th_ptr;
478 
481 
482  /*
483  * Unfortunately, we can't release native threading resource at fork
484  * because libc may have unstable locking state therefore touching
485  * a threading resource may cause a deadlock.
486  */
487  if (atfork)
488  return;
489 
490  native_mutex_destroy(&th->interrupt_lock);
491  native_thread_destroy(th);
492 }
493 
494 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
495 
496 void
498 {
499  native_thread_init_stack(th);
500 }
501 
502 static int
503 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
504 {
505  int state;
506  VALUE args = th->first_args;
507  rb_proc_t *proc;
508  rb_thread_list_t *join_list;
509  rb_thread_t *main_th;
510  VALUE errinfo = Qnil;
511 # ifdef USE_SIGALTSTACK
512  void rb_register_sigaltstack(rb_thread_t *th);
513 
514  rb_register_sigaltstack(th);
515 # endif
516 
517  if (th == th->vm->main_thread)
518  rb_bug("thread_start_func_2 must not be used for main thread");
519 
520  ruby_thread_set_native(th);
521 
522  th->machine.stack_start = stack_start;
523 #ifdef __ia64
524  th->machine.register_stack_start = register_stack_start;
525 #endif
526  thread_debug("thread start: %p\n", (void *)th);
527 
528  gvl_acquire(th->vm, th);
529  {
530  thread_debug("thread start (get lock): %p\n", (void *)th);
532 
533  TH_PUSH_TAG(th);
534  if ((state = EXEC_TAG()) == 0) {
536  if (!th->first_func) {
537  GetProcPtr(th->first_proc, proc);
538  th->errinfo = Qnil;
539  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
540  th->root_svar = Qnil;
541  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
542  th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args), 0);
543  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
544  }
545  else {
546  th->value = (*th->first_func)((void *)args);
547  }
548  });
549  }
550  else {
551  errinfo = th->errinfo;
552  if (state == TAG_FATAL) {
553  /* fatal error within this thread, need to stop whole script */
554  }
555  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
556  /* exit on main_thread. */
557  }
558  else if (th->vm->thread_abort_on_exception ||
560  /* exit on main_thread */
561  }
562  else {
563  errinfo = Qnil;
564  }
565  th->value = Qnil;
566  }
567 
569  thread_debug("thread end: %p\n", (void *)th);
570 
571  main_th = th->vm->main_thread;
572  if (main_th == th) {
573  ruby_stop(0);
574  }
575  if (RB_TYPE_P(errinfo, T_OBJECT)) {
576  /* treat with normal error object */
577  rb_threadptr_raise(main_th, 1, &errinfo);
578  }
579  TH_POP_TAG();
580 
581  /* locking_mutex must be Qfalse */
582  if (th->locking_mutex != Qfalse) {
583  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
584  (void *)th, th->locking_mutex);
585  }
586 
587  /* delete self other than main thread from living_threads */
589  if (rb_thread_alone()) {
590  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
591  rb_threadptr_interrupt(main_th);
592  }
593 
594  /* wake up joining threads */
595  join_list = th->join_list;
596  while (join_list) {
597  rb_threadptr_interrupt(join_list->th);
598  switch (join_list->th->status) {
600  join_list->th->status = THREAD_RUNNABLE;
601  default: break;
602  }
603  join_list = join_list->next;
604  }
605 
608 
609  if (!th->root_fiber) {
611  th->stack = 0;
612  }
613  }
614  native_mutex_lock(&th->vm->thread_destruct_lock);
615  /* make sure vm->running_thread never point me after this point.*/
616  th->vm->running_thread = NULL;
617  native_mutex_unlock(&th->vm->thread_destruct_lock);
619  gvl_release(th->vm);
620 
621  return 0;
622 }
623 
624 static VALUE
626 {
627  rb_thread_t *th, *current_th = GET_THREAD();
628  int err;
629 
630  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
632  "can't start a new thread (frozen ThreadGroup)");
633  }
634  GetThreadPtr(thval, th);
635 
636  /* setup thread environment */
637  th->first_func = fn;
638  th->first_proc = fn ? Qfalse : rb_block_proc();
639  th->first_args = args; /* GC: shouldn't put before above line */
640 
641  th->priority = current_th->priority;
642  th->thgroup = current_th->thgroup;
643 
648 
649  th->interrupt_mask = 0;
650 
651  native_mutex_initialize(&th->interrupt_lock);
652  native_cond_initialize(&th->interrupt_cond, RB_CONDATTR_CLOCK_MONOTONIC);
653 
654  /* kick thread */
655  err = native_thread_create(th);
656  if (err) {
658  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
659  }
661  return thval;
662 }
663 
664 /*
665  * call-seq:
666  * Thread.new { ... } -> thread
667  * Thread.new(*args, &proc) -> thread
668  * Thread.new(*args) { |args| ... } -> thread
669  *
670  * Creates a new thread executing the given block.
671  *
672  * Any +args+ given to ::new will be passed to the block:
673  *
674  * arr = []
675  * a, b, c = 1, 2, 3
676  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
677  * arr #=> [1, 2, 3]
678  *
679  * A ThreadError exception is raised if ::new is called without a block.
680  *
681  * If you're going to subclass Thread, be sure to call super in your
682  * +initialize+ method, otherwise a ThreadError will be raised.
683  */
684 static VALUE
686 {
687  rb_thread_t *th;
688  VALUE thread = rb_thread_alloc(klass);
689 
690  if (GET_VM()->main_thread->status == THREAD_KILLED)
691  rb_raise(rb_eThreadError, "can't alloc thread");
692 
693  rb_obj_call_init(thread, argc, argv);
694  GetThreadPtr(thread, th);
695  if (!th->first_args) {
696  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
697  rb_class2name(klass));
698  }
699  return thread;
700 }
701 
702 /*
703  * call-seq:
704  * Thread.start([args]*) {|args| block } -> thread
705  * Thread.fork([args]*) {|args| block } -> thread
706  *
707  * Basically the same as ::new. However, if class Thread is subclassed, then
708  * calling +start+ in that subclass will not invoke the subclass's
709  * +initialize+ method.
710  */
711 
712 static VALUE
714 {
715  return thread_create_core(rb_thread_alloc(klass), args, 0);
716 }
717 
718 /* :nodoc: */
719 static VALUE
721 {
722  rb_thread_t *th;
723  if (!rb_block_given_p()) {
724  rb_raise(rb_eThreadError, "must be called with a block");
725  }
726  GetThreadPtr(thread, th);
727  if (th->first_args) {
728  VALUE proc = th->first_proc, line, loc;
729  const char *file;
730  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
731  rb_raise(rb_eThreadError, "already initialized thread");
732  }
733  file = RSTRING_PTR(RARRAY_AREF(loc, 0));
734  if (NIL_P(line = RARRAY_AREF(loc, 1))) {
735  rb_raise(rb_eThreadError, "already initialized thread - %s",
736  file);
737  }
738  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
739  file, NUM2INT(line));
740  }
741  return thread_create_core(thread, args, 0);
742 }
743 
744 VALUE
745 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
746 {
748 }
749 
750 
751 /* +infty, for this purpose */
752 #define DELAY_INFTY 1E30
753 
754 struct join_arg {
756  double limit;
757  int forever;
758 };
759 
760 static VALUE
762 {
763  struct join_arg *p = (struct join_arg *)arg;
764  rb_thread_t *target_th = p->target, *th = p->waiting;
765 
766  if (target_th->status != THREAD_KILLED) {
767  rb_thread_list_t **p = &target_th->join_list;
768 
769  while (*p) {
770  if ((*p)->th == th) {
771  *p = (*p)->next;
772  break;
773  }
774  p = &(*p)->next;
775  }
776  }
777 
778  return Qnil;
779 }
780 
781 static VALUE
783 {
784  struct join_arg *p = (struct join_arg *)arg;
785  rb_thread_t *target_th = p->target, *th = p->waiting;
786  double now, limit = p->limit;
787 
788  while (target_th->status != THREAD_KILLED) {
789  if (p->forever) {
790  sleep_forever(th, 1, 0);
791  }
792  else {
793  now = timeofday();
794  if (now > limit) {
795  thread_debug("thread_join: timeout (thid: %p)\n",
796  (void *)target_th->thread_id);
797  return Qfalse;
798  }
799  sleep_wait_for_interrupt(th, limit - now, 0);
800  }
801  thread_debug("thread_join: interrupted (thid: %p)\n",
802  (void *)target_th->thread_id);
803  }
804  return Qtrue;
805 }
806 
807 static VALUE
808 thread_join(rb_thread_t *target_th, double delay)
809 {
810  rb_thread_t *th = GET_THREAD();
811  struct join_arg arg;
812 
813  if (th == target_th) {
814  rb_raise(rb_eThreadError, "Target thread must not be current thread");
815  }
816  if (GET_VM()->main_thread == target_th) {
817  rb_raise(rb_eThreadError, "Target thread must not be main thread");
818  }
819 
820  arg.target = target_th;
821  arg.waiting = th;
822  arg.limit = timeofday() + delay;
823  arg.forever = delay == DELAY_INFTY;
824 
825  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
826 
827  if (target_th->status != THREAD_KILLED) {
829  list.next = target_th->join_list;
830  list.th = th;
831  target_th->join_list = &list;
832  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
833  remove_from_join_list, (VALUE)&arg)) {
834  return Qnil;
835  }
836  }
837 
838  thread_debug("thread_join: success (thid: %p)\n",
839  (void *)target_th->thread_id);
840 
841  if (target_th->errinfo != Qnil) {
842  VALUE err = target_th->errinfo;
843 
844  if (FIXNUM_P(err)) {
845  /* */
846  }
847  else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
850  }
851  else {
852  /* normal exception */
853  rb_exc_raise(err);
854  }
855  }
856  return target_th->self;
857 }
858 
859 /*
860  * call-seq:
861  * thr.join -> thr
862  * thr.join(limit) -> thr
863  *
864  * The calling thread will suspend execution and run this +thr+.
865  *
866  * Does not return until +thr+ exits or until the given +limit+ seconds have
867  * passed.
868  *
869  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
870  * returned.
871  *
872  * Any threads not joined will be killed when the main program exits.
873  *
874  * If +thr+ had previously raised an exception and the ::abort_on_exception or
875  * $DEBUG flags are not set, (so the exception has not yet been processed), it
876  * will be processed at this time.
877  *
878  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
879  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
880  * x.join # Let thread x finish, thread a will be killed on exit.
881  * #=> "axyz"
882  *
883  * The following example illustrates the +limit+ parameter.
884  *
885  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
886  * puts "Waiting" until y.join(0.15)
887  *
888  * This will produce:
889  *
890  * tick...
891  * Waiting
892  * tick...
893  * Waiting
894  * tick...
895  * tick...
896  */
897 
898 static VALUE
900 {
901  rb_thread_t *target_th;
902  double delay = DELAY_INFTY;
903  VALUE limit;
904 
905  GetThreadPtr(self, target_th);
906 
907  rb_scan_args(argc, argv, "01", &limit);
908  if (!NIL_P(limit)) {
909  delay = rb_num2dbl(limit);
910  }
911 
912  return thread_join(target_th, delay);
913 }
914 
915 /*
916  * call-seq:
917  * thr.value -> obj
918  *
919  * Waits for +thr+ to complete, using #join, and returns its value or raises
920  * the exception which terminated the thread.
921  *
922  * a = Thread.new { 2 + 2 }
923  * a.value #=> 4
924  *
925  * b = Thread.new { raise 'something went wrong' }
926  * b.value #=> RuntimeError: something went wrong
927  */
928 
929 static VALUE
931 {
932  rb_thread_t *th;
933  GetThreadPtr(self, th);
935  return th->value;
936 }
937 
938 /*
939  * Thread Scheduling
940  */
941 
942 /*
943  * The type of tv_sec in struct timeval is time_t in POSIX.
944  * But several systems violate POSIX.
945  *
946  * OpenBSD 5.2 (amd64):
947  * time_t: int (signed 32bit integer)
948  * tv_sec: long (signed 64bit integer)
949  *
950  * MinGW-w64 (x64):
951  * time_t: long long (signed 64bit integer)
952  * tv_sec: long (signed 32bit integer)
953  */
954 
955 #if SIGNEDNESS_OF_TIME_T < 0 /* signed */
956 # define TIMEVAL_SEC_MAX SIGNED_INTEGER_MAX(TYPEOF_TIMEVAL_TV_SEC)
957 # define TIMEVAL_SEC_MIN SIGNED_INTEGER_MIN(TYPEOF_TIMEVAL_TV_SEC)
958 #elif SIGNEDNESS_OF_TIME_T > 0 /* unsigned */
959 # define TIMEVAL_SEC_MAX ((TYPEOF_TIMEVAL_TV_SEC)(~(unsigned_time_t)0))
960 # define TIMEVAL_SEC_MIN ((TYPEOF_TIMEVAL_TV_SEC)0)
961 #endif
962 
963 static struct timeval
964 double2timeval(double d)
965 {
966  /* assume timeval.tv_sec has same signedness as time_t */
967  const double TIMEVAL_SEC_MAX_PLUS_ONE = (2*(double)(TIMEVAL_SEC_MAX/2+1));
968 
969  struct timeval time;
970 
971  if (TIMEVAL_SEC_MAX_PLUS_ONE <= d) {
972  time.tv_sec = TIMEVAL_SEC_MAX;
973  time.tv_usec = 999999;
974  }
975  else if (d <= TIMEVAL_SEC_MIN) {
976  time.tv_sec = TIMEVAL_SEC_MIN;
977  time.tv_usec = 0;
978  }
979  else {
980  time.tv_sec = (TYPEOF_TIMEVAL_TV_SEC)d;
981  time.tv_usec = (int)((d - (time_t)d) * 1e6);
982  if (time.tv_usec < 0) {
983  time.tv_usec += (int)1e6;
984  time.tv_sec -= 1;
985  }
986  }
987  return time;
988 }
989 
990 static void
991 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
992 {
993  enum rb_thread_status prev_status = th->status;
994  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
995 
996  th->status = status;
998  while (th->status == status) {
999  if (deadlockable) {
1000  th->vm->sleeper++;
1001  rb_check_deadlock(th->vm);
1002  }
1003  native_sleep(th, 0);
1004  if (deadlockable) {
1005  th->vm->sleeper--;
1006  }
1008  if (!spurious_check)
1009  break;
1010  }
1011  th->status = prev_status;
1012 }
1013 
1014 static void
1016 {
1017 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1018  struct timespec ts;
1019 
1020  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
1021  tp->tv_sec = ts.tv_sec;
1022  tp->tv_usec = ts.tv_nsec / 1000;
1023  } else
1024 #endif
1025  {
1026  gettimeofday(tp, NULL);
1027  }
1028 }
1029 
1030 static void
1031 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
1032 {
1033  struct timeval to, tvn;
1034  enum rb_thread_status prev_status = th->status;
1035 
1036  getclockofday(&to);
1037  if (TIMEVAL_SEC_MAX - tv.tv_sec < to.tv_sec)
1038  to.tv_sec = TIMEVAL_SEC_MAX;
1039  else
1040  to.tv_sec += tv.tv_sec;
1041  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
1042  if (to.tv_sec == TIMEVAL_SEC_MAX)
1043  to.tv_usec = 999999;
1044  else {
1045  to.tv_sec++;
1046  to.tv_usec -= 1000000;
1047  }
1048  }
1049 
1050  th->status = THREAD_STOPPED;
1052  while (th->status == THREAD_STOPPED) {
1053  native_sleep(th, &tv);
1055  getclockofday(&tvn);
1056  if (to.tv_sec < tvn.tv_sec) break;
1057  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1058  thread_debug("sleep_timeval: %"PRI_TIMET_PREFIX"d.%.6ld > %"PRI_TIMET_PREFIX"d.%.6ld\n",
1059  (time_t)to.tv_sec, (long)to.tv_usec,
1060  (time_t)tvn.tv_sec, (long)tvn.tv_usec);
1061  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1062  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1063  --tv.tv_sec;
1064  tv.tv_usec += 1000000;
1065  }
1066  if (!spurious_check)
1067  break;
1068  }
1069  th->status = prev_status;
1070 }
1071 
1072 void
1074 {
1075  thread_debug("rb_thread_sleep_forever\n");
1076  sleep_forever(GET_THREAD(), 0, 1);
1077 }
1078 
1079 void
1081 {
1082  thread_debug("rb_thread_sleep_deadly\n");
1083  sleep_forever(GET_THREAD(), 1, 1);
1084 }
1085 
1086 static double
1088 {
1089 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1090  struct timespec tp;
1091 
1092  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1093  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1094  } else
1095 #endif
1096  {
1097  struct timeval tv;
1098  gettimeofday(&tv, NULL);
1099  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1100  }
1101 }
1102 
1103 static void
1104 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1105 {
1106  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1107 }
1108 
1109 static void
1111 {
1112  struct timeval time;
1113  time.tv_sec = 0;
1114  time.tv_usec = 100 * 1000; /* 0.1 sec */
1115  sleep_timeval(th, time, 1);
1116 }
1117 
1118 void
1120 {
1121  rb_thread_t *th = GET_THREAD();
1122  sleep_timeval(th, time, 1);
1123 }
1124 
1125 void
1127 {
1128  if (!rb_thread_alone()) {
1129  rb_thread_t *th = GET_THREAD();
1131  sleep_for_polling(th);
1132  }
1133 }
1134 
1135 /*
1136  * CAUTION: This function causes thread switching.
1137  * rb_thread_check_ints() check ruby's interrupts.
1138  * some interrupt needs thread switching/invoke handlers,
1139  * and so on.
1140  */
1141 
1142 void
1144 {
1146 }
1147 
1148 /*
1149  * Hidden API for tcl/tk wrapper.
1150  * There is no guarantee to perpetuate it.
1151  */
1152 int
1154 {
1155  return rb_signal_buff_size() != 0;
1156 }
1157 
1158 /* This function can be called in blocking region. */
1159 int
1161 {
1162  rb_thread_t *th;
1163  GetThreadPtr(thval, th);
1164  return (int)RUBY_VM_INTERRUPTED(th);
1165 }
1166 
1167 void
1169 {
1171 }
1172 
1173 static void
1174 rb_thread_schedule_limits(unsigned long limits_us)
1175 {
1176  thread_debug("rb_thread_schedule\n");
1177  if (!rb_thread_alone()) {
1178  rb_thread_t *th = GET_THREAD();
1179 
1180  if (th->running_time_us >= limits_us) {
1181  thread_debug("rb_thread_schedule/switch start\n");
1183  gvl_yield(th->vm, th);
1185  thread_debug("rb_thread_schedule/switch done\n");
1186  }
1187  }
1188 }
1189 
1190 void
1192 {
1193  rb_thread_t *cur_th = GET_THREAD();
1195 
1196  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
1198  }
1199 }
1200 
1201 /* blocking region */
1202 
1203 static inline int
1205  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1206 {
1207  region->prev_status = th->status;
1208  if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
1209  th->blocking_region_buffer = region;
1210  th->status = THREAD_STOPPED;
1211  thread_debug("enter blocking region (%p)\n", (void *)th);
1213  gvl_release(th->vm);
1214  return TRUE;
1215  }
1216  else {
1217  return FALSE;
1218  }
1219 }
1220 
1221 static inline void
1223 {
1224  gvl_acquire(th->vm, th);
1226  thread_debug("leave blocking region (%p)\n", (void *)th);
1227  remove_signal_thread_list(th);
1228  th->blocking_region_buffer = 0;
1229  reset_unblock_function(th, &region->oldubf);
1230  if (th->status == THREAD_STOPPED) {
1231  th->status = region->prev_status;
1232  }
1233 }
1234 
1237 {
1238  rb_thread_t *th = GET_THREAD();
1240  blocking_region_begin(th, region, ubf_select, th, FALSE);
1241  return region;
1242 }
1243 
1244 void
1246 {
1247  int saved_errno = errno;
1248  rb_thread_t *th = ruby_thread_from_native();
1249  blocking_region_end(th, region);
1250  xfree(region);
1252  errno = saved_errno;
1253 }
1254 
1255 static void *
1256 call_without_gvl(void *(*func)(void *), void *data1,
1257  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1258 {
1259  void *val = 0;
1260 
1261  rb_thread_t *th = GET_THREAD();
1262  int saved_errno = 0;
1263 
1264  th->waiting_fd = -1;
1265  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1266  ubf = ubf_select;
1267  data2 = th;
1268  }
1269 
1270  BLOCKING_REGION({
1271  val = func(data1);
1272  saved_errno = errno;
1273  }, ubf, data2, fail_if_interrupted);
1274 
1275  if (!fail_if_interrupted) {
1277  }
1278 
1279  errno = saved_errno;
1280 
1281  return val;
1282 }
1283 
1284 /*
1285  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1286  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1287  * without interrupt process.
1288  *
1289  * rb_thread_call_without_gvl() does:
1290  * (1) Check interrupts.
1291  * (2) release GVL.
1292  * Other Ruby threads may run in parallel.
1293  * (3) call func with data1
1294  * (4) acquire GVL.
1295  * Other Ruby threads can not run in parallel any more.
1296  * (5) Check interrupts.
1297  *
1298  * rb_thread_call_without_gvl2() does:
1299  * (1) Check interrupt and return if interrupted.
1300  * (2) release GVL.
1301  * (3) call func with data1 and a pointer to the flags.
1302  * (4) acquire GVL.
1303  *
1304  * If another thread interrupts this thread (Thread#kill, signal delivery,
1305  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1306  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1307  * toggling a cancellation flag, canceling the invocation of a call inside
1308  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1309  *
1310  * There are built-in ubfs and you can specify these ubfs:
1311  *
1312  * * RUBY_UBF_IO: ubf for IO operation
1313  * * RUBY_UBF_PROCESS: ubf for process operation
1314  *
1315  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1316  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1317  * provide proper ubf(), your program will not stop for Control+C or other
1318  * shutdown events.
1319  *
1320  * "Check interrupts" on above list means checking asynchronous
1321  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1322  * request, and so on) and calling corresponding procedures
1323  * (such as `trap' for signals, raise an exception for Thread#raise).
1324  * If `func()' finished and received interrupts, you may skip interrupt
1325  * checking. For example, assume the following func() it reads data from file.
1326  *
1327  * read_func(...) {
1328  * // (a) before read
1329  * read(buffer); // (b) reading
1330  * // (c) after read
1331  * }
1332  *
1333  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1334  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1335  * at (c), after *read* operation is completed, checking interrupts is harmful
1336  * because it causes irrevocable side-effect, the read data will vanish. To
1337  * avoid such problem, the `read_func()' should be used with
1338  * `rb_thread_call_without_gvl2()'.
1339  *
1340  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1341  * immediately. This function does not show when the execution was interrupted.
1342  * For example, there are 4 possible timing (a), (b), (c) and before calling
1343  * read_func(). You need to record progress of a read_func() and check
1344  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1345  * `rb_thread_check_ints()' correctly or your program can not process proper
1346  * process such as `trap' and so on.
1347  *
1348  * NOTE: You can not execute most of Ruby C API and touch Ruby
1349  * objects in `func()' and `ubf()', including raising an
1350  * exception, because current thread doesn't acquire GVL
1351  * (it causes synchronization problems). If you need to
1352  * call ruby functions either use rb_thread_call_with_gvl()
1353  * or read source code of C APIs and confirm safety by
1354  * yourself.
1355  *
1356  * NOTE: In short, this API is difficult to use safely. I recommend you
1357  * use other ways if you have. We lack experiences to use this API.
1358  * Please report your problem related on it.
1359  *
1360  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1361  * for a short running `func()'. Be sure to benchmark and use this
1362  * mechanism when `func()' consumes enough time.
1363  *
1364  * Safe C API:
1365  * * rb_thread_interrupted() - check interrupt flag
1366  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1367  * they will work without GVL, and may acquire GVL when GC is needed.
1368  */
1369 void *
1370 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1371  rb_unblock_function_t *ubf, void *data2)
1372 {
1373  return call_without_gvl(func, data1, ubf, data2, TRUE);
1374 }
1375 
1376 void *
1377 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1378  rb_unblock_function_t *ubf, void *data2)
1379 {
1380  return call_without_gvl(func, data1, ubf, data2, FALSE);
1381 }
1382 
1383 VALUE
1385 {
1386  VALUE val = Qundef; /* shouldn't be used */
1387  rb_thread_t *th = GET_THREAD();
1388  int saved_errno = 0;
1389  int state;
1390 
1391  th->waiting_fd = fd;
1392 
1393  TH_PUSH_TAG(th);
1394  if ((state = EXEC_TAG()) == 0) {
1395  BLOCKING_REGION({
1396  val = func(data1);
1397  saved_errno = errno;
1398  }, ubf_select, th, FALSE);
1399  }
1400  TH_POP_TAG();
1401 
1402  /* clear waiting_fd anytime */
1403  th->waiting_fd = -1;
1404 
1405  if (state) {
1406  JUMP_TAG(state);
1407  }
1408  /* TODO: check func() */
1410 
1411  errno = saved_errno;
1412 
1413  return val;
1414 }
1415 
1416 VALUE
1418  rb_blocking_function_t *func, void *data1,
1419  rb_unblock_function_t *ubf, void *data2)
1420 {
1421  void *(*f)(void*) = (void *(*)(void*))func;
1422  return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
1423 }
1424 
1425 /*
1426  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1427  *
1428  * After releasing GVL using rb_thread_blocking_region() or
1429  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1430  * methods. If you need to access Ruby you must use this function
1431  * rb_thread_call_with_gvl().
1432  *
1433  * This function rb_thread_call_with_gvl() does:
1434  * (1) acquire GVL.
1435  * (2) call passed function `func'.
1436  * (3) release GVL.
1437  * (4) return a value which is returned at (2).
1438  *
1439  * NOTE: You should not return Ruby object at (2) because such Object
1440  * will not be marked.
1441  *
1442  * NOTE: If an exception is raised in `func', this function DOES NOT
1443  * protect (catch) the exception. If you have any resources
1444  * which should free before throwing exception, you need use
1445  * rb_protect() in `func' and return a value which represents
1446  * exception was raised.
1447  *
1448  * NOTE: This function should not be called by a thread which was not
1449  * created as Ruby thread (created by Thread.new or so). In other
1450  * words, this function *DOES NOT* associate or convert a NON-Ruby
1451  * thread to a Ruby thread.
1452  */
1453 void *
1454 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1455 {
1456  rb_thread_t *th = ruby_thread_from_native();
1457  struct rb_blocking_region_buffer *brb;
1458  struct rb_unblock_callback prev_unblock;
1459  void *r;
1460 
1461  if (th == 0) {
1462  /* Error has occurred, but we can't use rb_bug()
1463  * because this thread is not Ruby's thread.
1464  * What should we do?
1465  */
1466 
1467  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1468  exit(EXIT_FAILURE);
1469  }
1470 
1472  prev_unblock = th->unblock;
1473 
1474  if (brb == 0) {
1475  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1476  }
1477 
1478  blocking_region_end(th, brb);
1479  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1480  r = (*func)(data1);
1481  /* leave from Ruby world: You can not access Ruby values, etc. */
1482  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1483  return r;
1484 }
1485 
1486 /*
1487  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1488  *
1489  ***
1490  *** This API is EXPERIMENTAL!
1491  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1492  ***
1493  */
1494 
1495 int
1497 {
1498  rb_thread_t *th = ruby_thread_from_native();
1499 
1500  if (th && th->blocking_region_buffer == 0) {
1501  return 1;
1502  }
1503  else {
1504  return 0;
1505  }
1506 }
1507 
1508 /*
1509  * call-seq:
1510  * Thread.pass -> nil
1511  *
1512  * Give the thread scheduler a hint to pass execution to another thread.
1513  * A running thread may or may not switch, it depends on OS and processor.
1514  */
1515 
1516 static VALUE
1518 {
1520  return Qnil;
1521 }
1522 
1523 /*****************************************************/
1524 
1525 /*
1526  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1527  *
1528  * Async events such as an exception thrown by Thread#raise,
1529  * Thread#kill and thread termination (after main thread termination)
1530  * will be queued to th->pending_interrupt_queue.
1531  * - clear: clear the queue.
1532  * - enque: enqueue err object into queue.
1533  * - deque: dequeue err object from queue.
1534  * - active_p: return 1 if the queue should be checked.
1535  *
1536  * All rb_threadptr_pending_interrupt_* functions are called by
1537  * a GVL acquired thread, of course.
1538  * Note that all "rb_" prefix APIs need GVL to call.
1539  */
1540 
1541 void
1543 {
1545 }
1546 
1547 void
1549 {
1552 }
1553 
1559 };
1560 
1561 static enum handle_interrupt_timing
1563 {
1564  VALUE mask;
1565  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1566  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1567  VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
1568  long ancestors_len = RARRAY_LEN(ancestors);
1569  const VALUE *ancestors_ptr = RARRAY_CONST_PTR(ancestors);
1570  int i, j;
1571 
1572  for (i=0; i<mask_stack_len; i++) {
1573  mask = mask_stack[mask_stack_len-(i+1)];
1574 
1575  for (j=0; j<ancestors_len; j++) {
1576  VALUE klass = ancestors_ptr[j];
1577  VALUE sym;
1578 
1579  /* TODO: remove rb_intern() */
1580  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1581  if (sym == sym_immediate) {
1582  return INTERRUPT_IMMEDIATE;
1583  }
1584  else if (sym == sym_on_blocking) {
1585  return INTERRUPT_ON_BLOCKING;
1586  }
1587  else if (sym == sym_never) {
1588  return INTERRUPT_NEVER;
1589  }
1590  else {
1591  rb_raise(rb_eThreadError, "unknown mask signature");
1592  }
1593  }
1594  }
1595  /* try to next mask */
1596  }
1597  return INTERRUPT_NONE;
1598 }
1599 
1600 static int
1602 {
1603  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1604 }
1605 
1606 static int
1608 {
1609  int i;
1610  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1612  if (rb_class_inherited_p(e, err)) {
1613  return TRUE;
1614  }
1615  }
1616  return FALSE;
1617 }
1618 
1619 static VALUE
1621 {
1622 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1623  int i;
1624 
1625  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1627 
1629 
1630  switch (mask_timing) {
1631  case INTERRUPT_ON_BLOCKING:
1632  if (timing != INTERRUPT_ON_BLOCKING) {
1633  break;
1634  }
1635  /* fall through */
1636  case INTERRUPT_NONE: /* default: IMMEDIATE */
1637  case INTERRUPT_IMMEDIATE:
1639  return err;
1640  case INTERRUPT_NEVER:
1641  break;
1642  }
1643  }
1644 
1646  return Qundef;
1647 #else
1651  }
1652  return err;
1653 #endif
1654 }
1655 
1656 int
1658 {
1659  /*
1660  * For optimization, we don't check async errinfo queue
1661  * if the queue and the thread interrupt mask were not changed
1662  * since last check.
1663  */
1665  return 0;
1666  }
1667 
1669  return 0;
1670  }
1671 
1672  return 1;
1673 }
1674 
1675 static int
1677 {
1678  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1679  rb_raise(rb_eArgError, "unknown mask signature");
1680  }
1681 
1682  return ST_CONTINUE;
1683 }
1684 
1685 /*
1686  * call-seq:
1687  * Thread.handle_interrupt(hash) { ... } -> result of the block
1688  *
1689  * Changes asynchronous interrupt timing.
1690  *
1691  * _interrupt_ means asynchronous event and corresponding procedure
1692  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1693  * and main thread termination (if main thread terminates, then all
1694  * other thread will be killed).
1695  *
1696  * The given +hash+ has pairs like <code>ExceptionClass =>
1697  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1698  * the given block. The TimingSymbol can be one of the following symbols:
1699  *
1700  * [+:immediate+] Invoke interrupts immediately.
1701  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1702  * [+:never+] Never invoke all interrupts.
1703  *
1704  * _BlockingOperation_ means that the operation will block the calling thread,
1705  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1706  * operation executed without GVL.
1707  *
1708  * Masked asynchronous interrupts are delayed until they are enabled.
1709  * This method is similar to sigprocmask(3).
1710  *
1711  * === NOTE
1712  *
1713  * Asynchronous interrupts are difficult to use.
1714  *
1715  * If you need to communicate between threads, please consider to use another way such as Queue.
1716  *
1717  * Or use them with deep understanding about this method.
1718  *
1719  * === Usage
1720  *
1721  * In this example, we can guard from Thread#raise exceptions.
1722  *
1723  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1724  * ignored in the first block of the main thread. In the second
1725  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1726  *
1727  * th = Thread.new do
1728  * Thread.handle_interrupt(RuntimeError => :never) {
1729  * begin
1730  * # You can write resource allocation code safely.
1731  * Thread.handle_interrupt(RuntimeError => :immediate) {
1732  * # ...
1733  * }
1734  * ensure
1735  * # You can write resource deallocation code safely.
1736  * end
1737  * }
1738  * end
1739  * Thread.pass
1740  * # ...
1741  * th.raise "stop"
1742  *
1743  * While we are ignoring the RuntimeError exception, it's safe to write our
1744  * resource allocation code. Then, the ensure block is where we can safely
1745  * deallocate your resources.
1746  *
1747  * ==== Guarding from TimeoutError
1748  *
1749  * In the next example, we will guard from the TimeoutError exception. This
1750  * will help prevent from leaking resources when TimeoutError exceptions occur
1751  * during normal ensure clause. For this example we use the help of the
1752  * standard library Timeout, from lib/timeout.rb
1753  *
1754  * require 'timeout'
1755  * Thread.handle_interrupt(TimeoutError => :never) {
1756  * timeout(10){
1757  * # TimeoutError doesn't occur here
1758  * Thread.handle_interrupt(TimeoutError => :on_blocking) {
1759  * # possible to be killed by TimeoutError
1760  * # while blocking operation
1761  * }
1762  * # TimeoutError doesn't occur here
1763  * }
1764  * }
1765  *
1766  * In the first part of the +timeout+ block, we can rely on TimeoutError being
1767  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
1768  * operation that will block the calling thread is susceptible to a
1769  * TimeoutError exception being raised.
1770  *
1771  * ==== Stack control settings
1772  *
1773  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1774  * to control more than one ExceptionClass and TimingSymbol at a time.
1775  *
1776  * Thread.handle_interrupt(FooError => :never) {
1777  * Thread.handle_interrupt(BarError => :never) {
1778  * # FooError and BarError are prohibited.
1779  * }
1780  * }
1781  *
1782  * ==== Inheritance with ExceptionClass
1783  *
1784  * All exceptions inherited from the ExceptionClass parameter will be considered.
1785  *
1786  * Thread.handle_interrupt(Exception => :never) {
1787  * # all exceptions inherited from Exception are prohibited.
1788  * }
1789  *
1790  */
1791 static VALUE
1793 {
1794  VALUE mask;
1795  rb_thread_t *th = GET_THREAD();
1796  VALUE r = Qnil;
1797  int state;
1798 
1799  if (!rb_block_given_p()) {
1800  rb_raise(rb_eArgError, "block is needed.");
1801  }
1802 
1803  mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
1809  }
1810 
1811  TH_PUSH_TAG(th);
1812  if ((state = EXEC_TAG()) == 0) {
1813  r = rb_yield(Qnil);
1814  }
1815  TH_POP_TAG();
1816 
1821  }
1822 
1823  RUBY_VM_CHECK_INTS(th);
1824 
1825  if (state) {
1826  JUMP_TAG(state);
1827  }
1828 
1829  return r;
1830 }
1831 
1832 /*
1833  * call-seq:
1834  * target_thread.pending_interrupt?(error = nil) -> true/false
1835  *
1836  * Returns whether or not the asynchronous queue is empty for the target thread.
1837  *
1838  * If +error+ is given, then check only for +error+ type deferred events.
1839  *
1840  * See ::pending_interrupt? for more information.
1841  */
1842 static VALUE
1844 {
1845  rb_thread_t *target_th;
1846 
1847  GetThreadPtr(target_thread, target_th);
1848 
1849  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1850  return Qfalse;
1851  }
1852  else {
1853  if (argc == 1) {
1854  VALUE err;
1855  rb_scan_args(argc, argv, "01", &err);
1856  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1857  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1858  }
1860  return Qtrue;
1861  }
1862  else {
1863  return Qfalse;
1864  }
1865  }
1866  return Qtrue;
1867  }
1868 }
1869 
1870 /*
1871  * call-seq:
1872  * Thread.pending_interrupt?(error = nil) -> true/false
1873  *
1874  * Returns whether or not the asynchronous queue is empty.
1875  *
1876  * Since Thread::handle_interrupt can be used to defer asynchronous events,
1877  * this method can be used to determine if there are any deferred events.
1878  *
1879  * If you find this method returns true, then you may finish +:never+ blocks.
1880  *
1881  * For example, the following method processes deferred asynchronous events
1882  * immediately.
1883  *
1884  * def Thread.kick_interrupt_immediately
1885  * Thread.handle_interrupt(Object => :immediate) {
1886  * Thread.pass
1887  * }
1888  * end
1889  *
1890  * If +error+ is given, then check only for +error+ type deferred events.
1891  *
1892  * === Usage
1893  *
1894  * th = Thread.new{
1895  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1896  * while true
1897  * ...
1898  * # reach safe point to invoke interrupt
1899  * if Thread.pending_interrupt?
1900  * Thread.handle_interrupt(Object => :immediate){}
1901  * end
1902  * ...
1903  * end
1904  * }
1905  * }
1906  * ...
1907  * th.raise # stop thread
1908  *
1909  * This example can also be written as the following, which you should use to
1910  * avoid asynchronous interrupts.
1911  *
1912  * flag = true
1913  * th = Thread.new{
1914  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1915  * while true
1916  * ...
1917  * # reach safe point to invoke interrupt
1918  * break if flag == false
1919  * ...
1920  * end
1921  * }
1922  * }
1923  * ...
1924  * flag = false # stop thread
1925  */
1926 
1927 static VALUE
1929 {
1931 }
1932 
1933 static void
1935 {
1937  th->status = THREAD_RUNNABLE;
1938  th->to_kill = 1;
1939  th->errinfo = INT2FIX(TAG_FATAL);
1940  TH_JUMP_TAG(th, TAG_FATAL);
1941 }
1942 
1943 static inline rb_atomic_t
1945 {
1946  rb_atomic_t interrupt;
1947  rb_atomic_t old;
1948 
1949  do {
1950  interrupt = th->interrupt_flag;
1951  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
1952  } while (old != interrupt);
1953  return interrupt & (rb_atomic_t)~th->interrupt_mask;
1954 }
1955 
1956 void
1958 {
1959  rb_atomic_t interrupt;
1960  int postponed_job_interrupt = 0;
1961 
1962  if (th->raised_flag) return;
1963 
1964  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
1965  int sig;
1966  int timer_interrupt;
1967  int pending_interrupt;
1968  int trap_interrupt;
1969 
1970  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
1971  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
1972  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
1973  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
1974 
1975  if (postponed_job_interrupt) {
1977  }
1978 
1979  /* signal handling */
1980  if (trap_interrupt && (th == th->vm->main_thread)) {
1981  enum rb_thread_status prev_status = th->status;
1982  th->status = THREAD_RUNNABLE;
1983  while ((sig = rb_get_next_signal()) != 0) {
1984  rb_signal_exec(th, sig);
1985  }
1986  th->status = prev_status;
1987  }
1988 
1989  /* exception from another thread */
1990  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
1992  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1993 
1994  if (err == Qundef) {
1995  /* no error */
1996  }
1997  else if (err == eKillSignal /* Thread#kill received */ ||
1998  err == eTerminateSignal /* Terminate thread */ ||
1999  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2001  }
2002  else {
2003  /* set runnable if th was slept. */
2004  if (th->status == THREAD_STOPPED ||
2006  th->status = THREAD_RUNNABLE;
2007  rb_exc_raise(err);
2008  }
2009  }
2010 
2011  if (timer_interrupt) {
2012  unsigned long limits_us = TIME_QUANTUM_USEC;
2013 
2014  if (th->priority > 0)
2015  limits_us <<= th->priority;
2016  else
2017  limits_us >>= -th->priority;
2018 
2019  if (th->status == THREAD_RUNNABLE)
2020  th->running_time_us += TIME_QUANTUM_USEC;
2021 
2023 
2024  rb_thread_schedule_limits(limits_us);
2025  }
2026  }
2027 }
2028 
2029 void
2031 {
2032  rb_thread_t *th;
2033  GetThreadPtr(thval, th);
2035 }
2036 
2037 static void
2039 {
2041 }
2042 
2043 static VALUE
2045 {
2046  VALUE exc;
2047 
2048  if (rb_threadptr_dead(th)) {
2049  return Qnil;
2050  }
2051 
2052  if (argc == 0) {
2053  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2054  }
2055  else {
2056  exc = rb_make_exception(argc, argv);
2057  }
2060  return Qnil;
2061 }
2062 
2063 void
2065 {
2066  VALUE argv[2];
2067 
2068  argv[0] = rb_eSignal;
2069  argv[1] = INT2FIX(sig);
2071 }
2072 
2073 void
2075 {
2076  VALUE argv[2];
2077 
2078  argv[0] = rb_eSystemExit;
2079  argv[1] = rb_str_new2("exit");
2081 }
2082 
2083 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2084 #define USE_SIGALTSTACK
2085 #endif
2086 
2087 void
2089 {
2090  th->raised_flag = 0;
2091 #ifdef USE_SIGALTSTACK
2093 #else
2094  th->errinfo = sysstack_error;
2095  TH_JUMP_TAG(th, TAG_RAISE);
2096 #endif
2097 }
2098 
2099 int
2101 {
2102  if (th->raised_flag & RAISED_EXCEPTION) {
2103  return 1;
2104  }
2106  return 0;
2107 }
2108 
2109 int
2111 {
2112  if (!(th->raised_flag & RAISED_EXCEPTION)) {
2113  return 0;
2114  }
2115  th->raised_flag &= ~RAISED_EXCEPTION;
2116  return 1;
2117 }
2118 
2119 static int
2121 {
2122  int fd = (int)data;
2123  rb_thread_t *th;
2124  GetThreadPtr((VALUE)key, th);
2125 
2126  if (th->waiting_fd == fd) {
2130  }
2131  return ST_CONTINUE;
2132 }
2133 
2134 void
2136 {
2137  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
2138 }
2139 
2140 /*
2141  * call-seq:
2142  * thr.raise
2143  * thr.raise(string)
2144  * thr.raise(exception [, string [, array]])
2145  *
2146  * Raises an exception from the given thread. The caller does not have to be
2147  * +thr+. See Kernel#raise for more information.
2148  *
2149  * Thread.abort_on_exception = true
2150  * a = Thread.new { sleep(200) }
2151  * a.raise("Gotcha")
2152  *
2153  * This will produce:
2154  *
2155  * prog.rb:3: Gotcha (RuntimeError)
2156  * from prog.rb:2:in `initialize'
2157  * from prog.rb:2:in `new'
2158  * from prog.rb:2
2159  */
2160 
2161 static VALUE
2163 {
2164  rb_thread_t *target_th;
2165  rb_thread_t *th = GET_THREAD();
2166  GetThreadPtr(self, target_th);
2167  rb_threadptr_raise(target_th, argc, argv);
2168 
2169  /* To perform Thread.current.raise as Kernel.raise */
2170  if (th == target_th) {
2171  RUBY_VM_CHECK_INTS(th);
2172  }
2173  return Qnil;
2174 }
2175 
2176 
2177 /*
2178  * call-seq:
2179  * thr.exit -> thr or nil
2180  * thr.kill -> thr or nil
2181  * thr.terminate -> thr or nil
2182  *
2183  * Terminates +thr+ and schedules another thread to be run.
2184  *
2185  * If this thread is already marked to be killed, #exit returns the Thread.
2186  *
2187  * If this is the main thread, or the last thread, exits the process.
2188  */
2189 
2190 VALUE
2192 {
2193  rb_thread_t *th;
2194 
2195  GetThreadPtr(thread, th);
2196 
2197  if (th->to_kill || th->status == THREAD_KILLED) {
2198  return thread;
2199  }
2200  if (th == th->vm->main_thread) {
2202  }
2203 
2204  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
2205 
2206  if (th == GET_THREAD()) {
2207  /* kill myself immediately */
2209  }
2210  else {
2213  }
2214  return thread;
2215 }
2216 
2217 
2218 /*
2219  * call-seq:
2220  * Thread.kill(thread) -> thread
2221  *
2222  * Causes the given +thread+ to exit, see also Thread::exit.
2223  *
2224  * count = 0
2225  * a = Thread.new { loop { count += 1 } }
2226  * sleep(0.1) #=> 0
2227  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2228  * count #=> 93947
2229  * a.alive? #=> false
2230  */
2231 
2232 static VALUE
2234 {
2235  return rb_thread_kill(th);
2236 }
2237 
2238 
2239 /*
2240  * call-seq:
2241  * Thread.exit -> thread
2242  *
2243  * Terminates the currently running thread and schedules another thread to be
2244  * run.
2245  *
2246  * If this thread is already marked to be killed, ::exit returns the Thread.
2247  *
2248  * If this is the main thread, or the last thread, exit the process.
2249  */
2250 
2251 static VALUE
2253 {
2254  rb_thread_t *th = GET_THREAD();
2255  return rb_thread_kill(th->self);
2256 }
2257 
2258 
2259 /*
2260  * call-seq:
2261  * thr.wakeup -> thr
2262  *
2263  * Marks a given thread as eligible for scheduling, however it may still
2264  * remain blocked on I/O.
2265  *
2266  * *Note:* This does not invoke the scheduler, see #run for more information.
2267  *
2268  * c = Thread.new { Thread.stop; puts "hey!" }
2269  * sleep 0.1 while c.status!='sleep'
2270  * c.wakeup
2271  * c.join
2272  * #=> "hey!"
2273  */
2274 
2275 VALUE
2277 {
2278  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2279  rb_raise(rb_eThreadError, "killed thread");
2280  }
2281  return thread;
2282 }
2283 
2284 VALUE
2286 {
2287  rb_thread_t *th;
2288  GetThreadPtr(thread, th);
2289 
2290  if (th->status == THREAD_KILLED) {
2291  return Qnil;
2292  }
2293  rb_threadptr_ready(th);
2294  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2295  th->status = THREAD_RUNNABLE;
2296  return thread;
2297 }
2298 
2299 
2300 /*
2301  * call-seq:
2302  * thr.run -> thr
2303  *
2304  * Wakes up +thr+, making it eligible for scheduling.
2305  *
2306  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2307  * sleep 0.1 while a.status!='sleep'
2308  * puts "Got here"
2309  * a.run
2310  * a.join
2311  *
2312  * This will produce:
2313  *
2314  * a
2315  * Got here
2316  * c
2317  *
2318  * See also the instance method #wakeup.
2319  */
2320 
2321 VALUE
2323 {
2324  rb_thread_wakeup(thread);
2326  return thread;
2327 }
2328 
2329 
2330 /*
2331  * call-seq:
2332  * Thread.stop -> nil
2333  *
2334  * Stops execution of the current thread, putting it into a ``sleep'' state,
2335  * and schedules execution of another thread.
2336  *
2337  * a = Thread.new { print "a"; Thread.stop; print "c" }
2338  * sleep 0.1 while a.status!='sleep'
2339  * print "b"
2340  * a.run
2341  * a.join
2342  * #=> "abc"
2343  */
2344 
2345 VALUE
2347 {
2348  if (rb_thread_alone()) {
2350  "stopping only thread\n\tnote: use sleep to stop forever");
2351  }
2353  return Qnil;
2354 }
2355 
2356 static int
2358 {
2359  VALUE ary = (VALUE)data;
2360  rb_thread_t *th;
2361  GetThreadPtr((VALUE)key, th);
2362 
2363  switch (th->status) {
2364  case THREAD_RUNNABLE:
2365  case THREAD_STOPPED:
2367  rb_ary_push(ary, th->self);
2368  default:
2369  break;
2370  }
2371  return ST_CONTINUE;
2372 }
2373 
2374 /********************************************************************/
2375 
2376 /*
2377  * call-seq:
2378  * Thread.list -> array
2379  *
2380  * Returns an array of Thread objects for all threads that are either runnable
2381  * or stopped.
2382  *
2383  * Thread.new { sleep(200) }
2384  * Thread.new { 1000000.times {|i| i*i } }
2385  * Thread.new { Thread.stop }
2386  * Thread.list.each {|t| p t}
2387  *
2388  * This will produce:
2389  *
2390  * #<Thread:0x401b3e84 sleep>
2391  * #<Thread:0x401b3f38 run>
2392  * #<Thread:0x401b3fb0 sleep>
2393  * #<Thread:0x401bdf4c run>
2394  */
2395 
2396 VALUE
2398 {
2399  VALUE ary = rb_ary_new();
2400  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
2401  return ary;
2402 }
2403 
2404 VALUE
2406 {
2407  return GET_THREAD()->self;
2408 }
2409 
2410 /*
2411  * call-seq:
2412  * Thread.current -> thread
2413  *
2414  * Returns the currently executing thread.
2415  *
2416  * Thread.current #=> #<Thread:0x401bdf4c run>
2417  */
2418 
2419 static VALUE
2421 {
2422  return rb_thread_current();
2423 }
2424 
2425 VALUE
2427 {
2428  return GET_THREAD()->vm->main_thread->self;
2429 }
2430 
2431 /*
2432  * call-seq:
2433  * Thread.main -> thread
2434  *
2435  * Returns the main thread.
2436  */
2437 
2438 static VALUE
2440 {
2441  return rb_thread_main();
2442 }
2443 
2444 
2445 /*
2446  * call-seq:
2447  * Thread.abort_on_exception -> true or false
2448  *
2449  * Returns the status of the global ``abort on exception'' condition.
2450  *
2451  * The default is +false+.
2452  *
2453  * When set to +true+, all threads will abort (the process will
2454  * <code>exit(0)</code>) if an exception is raised in any thread.
2455  *
2456  * Can also be specified by the global $DEBUG flag or command line option
2457  * +-d+.
2458  *
2459  * See also ::abort_on_exception=.
2460  *
2461  * There is also an instance level method to set this for a specific thread,
2462  * see #abort_on_exception.
2463  */
2464 
2465 static VALUE
2467 {
2469 }
2470 
2471 
2472 /*
2473  * call-seq:
2474  * Thread.abort_on_exception= boolean -> true or false
2475  *
2476  * When set to +true+, all threads will abort if an exception is raised.
2477  * Returns the new state.
2478  *
2479  * Thread.abort_on_exception = true
2480  * t1 = Thread.new do
2481  * puts "In new thread"
2482  * raise "Exception from thread"
2483  * end
2484  * sleep(1)
2485  * puts "not reached"
2486  *
2487  * This will produce:
2488  *
2489  * In new thread
2490  * prog.rb:4: Exception from thread (RuntimeError)
2491  * from prog.rb:2:in `initialize'
2492  * from prog.rb:2:in `new'
2493  * from prog.rb:2
2494  *
2495  * See also ::abort_on_exception.
2496  *
2497  * There is also an instance level method to set this for a specific thread,
2498  * see #abort_on_exception=.
2499  */
2500 
2501 static VALUE
2503 {
2505  return val;
2506 }
2507 
2508 
2509 /*
2510  * call-seq:
2511  * thr.abort_on_exception -> true or false
2512  *
2513  * Returns the status of the thread-local ``abort on exception'' condition for
2514  * this +thr+.
2515  *
2516  * The default is +false+.
2517  *
2518  * See also #abort_on_exception=.
2519  *
2520  * There is also a class level method to set this for all threads, see
2521  * ::abort_on_exception.
2522  */
2523 
2524 static VALUE
2526 {
2527  rb_thread_t *th;
2528  GetThreadPtr(thread, th);
2529  return th->abort_on_exception ? Qtrue : Qfalse;
2530 }
2531 
2532 
2533 /*
2534  * call-seq:
2535  * thr.abort_on_exception= boolean -> true or false
2536  *
2537  * When set to +true+, all threads (including the main program) will abort if
2538  * an exception is raised in this +thr+.
2539  *
2540  * The process will effectively <code>exit(0)</code>.
2541  *
2542  * See also #abort_on_exception.
2543  *
2544  * There is also a class level method to set this for all threads, see
2545  * ::abort_on_exception=.
2546  */
2547 
2548 static VALUE
2550 {
2551  rb_thread_t *th;
2552 
2553  GetThreadPtr(thread, th);
2554  th->abort_on_exception = RTEST(val);
2555  return val;
2556 }
2557 
2558 
2559 /*
2560  * call-seq:
2561  * thr.group -> thgrp or nil
2562  *
2563  * Returns the ThreadGroup which contains the given thread, or returns +nil+
2564  * if +thr+ is not a member of any group.
2565  *
2566  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2567  */
2568 
2569 VALUE
2571 {
2572  rb_thread_t *th;
2573  VALUE group;
2574  GetThreadPtr(thread, th);
2575  group = th->thgroup;
2576 
2577  if (!group) {
2578  group = Qnil;
2579  }
2580  return group;
2581 }
2582 
2583 static const char *
2585 {
2586  switch (th->status) {
2587  case THREAD_RUNNABLE:
2588  if (th->to_kill)
2589  return "aborting";
2590  else
2591  return "run";
2592  case THREAD_STOPPED:
2594  return "sleep";
2595  case THREAD_KILLED:
2596  return "dead";
2597  default:
2598  return "unknown";
2599  }
2600 }
2601 
2602 static int
2604 {
2605  return th->status == THREAD_KILLED;
2606 }
2607 
2608 
2609 /*
2610  * call-seq:
2611  * thr.status -> string, false or nil
2612  *
2613  * Returns the status of +thr+.
2614  *
2615  * [<tt>"sleep"</tt>]
2616  * Returned if this thread is sleeping or waiting on I/O
2617  * [<tt>"run"</tt>]
2618  * When this thread is executing
2619  * [<tt>"aborting"</tt>]
2620  * If this thread is aborting
2621  * [+false+]
2622  * When this thread is terminated normally
2623  * [+nil+]
2624  * If terminated with an exception.
2625  *
2626  * a = Thread.new { raise("die now") }
2627  * b = Thread.new { Thread.stop }
2628  * c = Thread.new { Thread.exit }
2629  * d = Thread.new { sleep }
2630  * d.kill #=> #<Thread:0x401b3678 aborting>
2631  * a.status #=> nil
2632  * b.status #=> "sleep"
2633  * c.status #=> false
2634  * d.status #=> "aborting"
2635  * Thread.current.status #=> "run"
2636  *
2637  * See also the instance methods #alive? and #stop?
2638  */
2639 
2640 static VALUE
2642 {
2643  rb_thread_t *th;
2644  GetThreadPtr(thread, th);
2645 
2646  if (rb_threadptr_dead(th)) {
2647  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
2648  /* TODO */ ) {
2649  return Qnil;
2650  }
2651  return Qfalse;
2652  }
2653  return rb_str_new2(thread_status_name(th));
2654 }
2655 
2656 
2657 /*
2658  * call-seq:
2659  * thr.alive? -> true or false
2660  *
2661  * Returns +true+ if +thr+ is running or sleeping.
2662  *
2663  * thr = Thread.new { }
2664  * thr.join #=> #<Thread:0x401b3fb0 dead>
2665  * Thread.current.alive? #=> true
2666  * thr.alive? #=> false
2667  *
2668  * See also #stop? and #status.
2669  */
2670 
2671 static VALUE
2673 {
2674  rb_thread_t *th;
2675  GetThreadPtr(thread, th);
2676 
2677  if (rb_threadptr_dead(th))
2678  return Qfalse;
2679  return Qtrue;
2680 }
2681 
2682 /*
2683  * call-seq:
2684  * thr.stop? -> true or false
2685  *
2686  * Returns +true+ if +thr+ is dead or sleeping.
2687  *
2688  * a = Thread.new { Thread.stop }
2689  * b = Thread.current
2690  * a.stop? #=> true
2691  * b.stop? #=> false
2692  *
2693  * See also #alive? and #status.
2694  */
2695 
2696 static VALUE
2698 {
2699  rb_thread_t *th;
2700  GetThreadPtr(thread, th);
2701 
2702  if (rb_threadptr_dead(th))
2703  return Qtrue;
2704  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
2705  return Qtrue;
2706  return Qfalse;
2707 }
2708 
2709 /*
2710  * call-seq:
2711  * thr.safe_level -> integer
2712  *
2713  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2714  * levels can help when implementing sandboxes which run insecure code.
2715  *
2716  * thr = Thread.new { $SAFE = 3; sleep }
2717  * Thread.current.safe_level #=> 0
2718  * thr.safe_level #=> 3
2719  */
2720 
2721 static VALUE
2723 {
2724  rb_thread_t *th;
2725  GetThreadPtr(thread, th);
2726 
2727  return INT2NUM(th->safe_level);
2728 }
2729 
2730 /*
2731  * call-seq:
2732  * thr.inspect -> string
2733  *
2734  * Dump the name, id, and status of _thr_ to a string.
2735  */
2736 
2737 static VALUE
2739 {
2740  const char *cname = rb_obj_classname(thread);
2741  rb_thread_t *th;
2742  const char *status;
2743  VALUE str;
2744 
2745  GetThreadPtr(thread, th);
2746  status = thread_status_name(th);
2747  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2748  OBJ_INFECT(str, thread);
2749 
2750  return str;
2751 }
2752 
2753 static VALUE
2755 {
2756  st_data_t val;
2757 
2758  if (th->local_storage && st_lookup(th->local_storage, id, &val)) {
2759  return (VALUE)val;
2760  }
2761  return Qnil;
2762 }
2763 
2764 VALUE
2766 {
2767  rb_thread_t *th;
2768  GetThreadPtr(thread, th);
2769  return threadptr_local_aref(th, id);
2770 }
2771 
2772 /*
2773  * call-seq:
2774  * thr[sym] -> obj or nil
2775  *
2776  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
2777  * if not explicitly inside a Fiber), using either a symbol or a string name.
2778  * If the specified variable does not exist, returns +nil+.
2779  *
2780  * [
2781  * Thread.new { Thread.current["name"] = "A" },
2782  * Thread.new { Thread.current[:name] = "B" },
2783  * Thread.new { Thread.current["name"] = "C" }
2784  * ].each do |th|
2785  * th.join
2786  * puts "#{th.inspect}: #{th[:name]}"
2787  * end
2788  *
2789  * This will produce:
2790  *
2791  * #<Thread:0x00000002a54220 dead>: A
2792  * #<Thread:0x00000002a541a8 dead>: B
2793  * #<Thread:0x00000002a54130 dead>: C
2794  *
2795  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
2796  * This confusion did not exist in Ruby 1.8 because
2797  * fibers are only available since Ruby 1.9.
2798  * Ruby 1.9 chooses that the methods behaves fiber-local to save
2799  * following idiom for dynamic scope.
2800  *
2801  * def meth(newvalue)
2802  * begin
2803  * oldvalue = Thread.current[:name]
2804  * Thread.current[:name] = newvalue
2805  * yield
2806  * ensure
2807  * Thread.current[:name] = oldvalue
2808  * end
2809  * end
2810  *
2811  * The idiom may not work as dynamic scope if the methods are thread-local
2812  * and a given block switches fiber.
2813  *
2814  * f = Fiber.new {
2815  * meth(1) {
2816  * Fiber.yield
2817  * }
2818  * }
2819  * meth(2) {
2820  * f.resume
2821  * }
2822  * f.resume
2823  * p Thread.current[:name]
2824  * #=> nil if fiber-local
2825  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
2826  *
2827  * For thread-local variables, please see #thread_variable_get and
2828  * #thread_variable_set.
2829  *
2830  */
2831 
2832 static VALUE
2834 {
2835  ID id = rb_check_id(&key);
2836  if (!id) return Qnil;
2837  return rb_thread_local_aref(thread, id);
2838 }
2839 
2840 static VALUE
2842 {
2843  if (NIL_P(val)) {
2844  if (!th->local_storage) return Qnil;
2845  st_delete_wrap(th->local_storage, id);
2846  return Qnil;
2847  }
2848  else {
2849  if (!th->local_storage) {
2851  }
2852  st_insert(th->local_storage, id, val);
2853  return val;
2854 }
2855 }
2856 
2857 VALUE
2859 {
2860  rb_thread_t *th;
2861  GetThreadPtr(thread, th);
2862 
2863  if (OBJ_FROZEN(thread)) {
2864  rb_error_frozen("thread locals");
2865  }
2866 
2867  return threadptr_local_aset(th, id, val);
2868 }
2869 
2870 /*
2871  * call-seq:
2872  * thr[sym] = obj -> obj
2873  *
2874  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
2875  * using either a symbol or a string.
2876  *
2877  * See also Thread#[].
2878  *
2879  * For thread-local variables, please see #thread_variable_set and
2880  * #thread_variable_get.
2881  */
2882 
2883 static VALUE
2885 {
2886  return rb_thread_local_aset(self, rb_to_id(id), val);
2887 }
2888 
2889 /*
2890  * call-seq:
2891  * thr.thread_variable_get(key) -> obj or nil
2892  *
2893  * Returns the value of a thread local variable that has been set. Note that
2894  * these are different than fiber local values. For fiber local values,
2895  * please see Thread#[] and Thread#[]=.
2896  *
2897  * Thread local values are carried along with threads, and do not respect
2898  * fibers. For example:
2899  *
2900  * Thread.new {
2901  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
2902  * Thread.current["foo"] = "bar" # set a fiber local
2903  *
2904  * Fiber.new {
2905  * Fiber.yield [
2906  * Thread.current.thread_variable_get("foo"), # get the thread local
2907  * Thread.current["foo"], # get the fiber local
2908  * ]
2909  * }.resume
2910  * }.join.value # => ['bar', nil]
2911  *
2912  * The value "bar" is returned for the thread local, where nil is returned
2913  * for the fiber local. The fiber is executed in the same thread, so the
2914  * thread local values are available.
2915  */
2916 
2917 static VALUE
2919 {
2920  VALUE locals;
2921  ID id = rb_check_id(&key);
2922 
2923  if (!id) return Qnil;
2924  locals = rb_ivar_get(thread, id_locals);
2925  return rb_hash_aref(locals, ID2SYM(id));
2926 }
2927 
2928 /*
2929  * call-seq:
2930  * thr.thread_variable_set(key, value)
2931  *
2932  * Sets a thread local with +key+ to +value+. Note that these are local to
2933  * threads, and not to fibers. Please see Thread#thread_variable_get and
2934  * Thread#[] for more information.
2935  */
2936 
2937 static VALUE
2939 {
2940  VALUE locals;
2941 
2942  if (OBJ_FROZEN(thread)) {
2943  rb_error_frozen("thread locals");
2944  }
2945 
2946  locals = rb_ivar_get(thread, id_locals);
2947  return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
2948 }
2949 
2950 /*
2951  * call-seq:
2952  * thr.key?(sym) -> true or false
2953  *
2954  * Returns +true+ if the given string (or symbol) exists as a fiber-local
2955  * variable.
2956  *
2957  * me = Thread.current
2958  * me[:oliver] = "a"
2959  * me.key?(:oliver) #=> true
2960  * me.key?(:stanley) #=> false
2961  */
2962 
2963 static VALUE
2965 {
2966  rb_thread_t *th;
2967  ID id = rb_check_id(&key);
2968 
2969  GetThreadPtr(self, th);
2970 
2971  if (!id || !th->local_storage) {
2972  return Qfalse;
2973  }
2974  if (st_lookup(th->local_storage, id, 0)) {
2975  return Qtrue;
2976  }
2977  return Qfalse;
2978 }
2979 
2980 static int
2982 {
2983  rb_ary_push(ary, ID2SYM(key));
2984  return ST_CONTINUE;
2985 }
2986 
2987 static int
2989 {
2990  return (int)vm->living_threads->num_entries;
2991 }
2992 
2993 int
2995 {
2996  int num = 1;
2997  if (GET_THREAD()->vm->living_threads) {
2998  num = vm_living_thread_num(GET_THREAD()->vm);
2999  thread_debug("rb_thread_alone: %d\n", num);
3000  }
3001  return num == 1;
3002 }
3003 
3004 /*
3005  * call-seq:
3006  * thr.keys -> array
3007  *
3008  * Returns an array of the names of the fiber-local variables (as Symbols).
3009  *
3010  * thr = Thread.new do
3011  * Thread.current[:cat] = 'meow'
3012  * Thread.current["dog"] = 'woof'
3013  * end
3014  * thr.join #=> #<Thread:0x401b3f10 dead>
3015  * thr.keys #=> [:dog, :cat]
3016  */
3017 
3018 static VALUE
3020 {
3021  rb_thread_t *th;
3022  VALUE ary = rb_ary_new();
3023  GetThreadPtr(self, th);
3024 
3025  if (th->local_storage) {
3027  }
3028  return ary;
3029 }
3030 
3031 static int
3033 {
3034  rb_ary_push(ary, key);
3035  return ST_CONTINUE;
3036 }
3037 
3038 /*
3039  * call-seq:
3040  * thr.thread_variables -> array
3041  *
3042  * Returns an array of the names of the thread-local variables (as Symbols).
3043  *
3044  * thr = Thread.new do
3045  * Thread.current.thread_variable_set(:cat, 'meow')
3046  * Thread.current.thread_variable_set("dog", 'woof')
3047  * end
3048  * thr.join #=> #<Thread:0x401b3f10 dead>
3049  * thr.thread_variables #=> [:dog, :cat]
3050  *
3051  * Note that these are not fiber local variables. Please see Thread#[] and
3052  * Thread#thread_variable_get for more details.
3053  */
3054 
3055 static VALUE
3057 {
3058  VALUE locals;
3059  VALUE ary;
3060 
3061  locals = rb_ivar_get(thread, id_locals);
3062  ary = rb_ary_new();
3063  rb_hash_foreach(locals, keys_i, ary);
3064 
3065  return ary;
3066 }
3067 
3068 /*
3069  * call-seq:
3070  * thr.thread_variable?(key) -> true or false
3071  *
3072  * Returns +true+ if the given string (or symbol) exists as a thread-local
3073  * variable.
3074  *
3075  * me = Thread.current
3076  * me.thread_variable_set(:oliver, "a")
3077  * me.thread_variable?(:oliver) #=> true
3078  * me.thread_variable?(:stanley) #=> false
3079  *
3080  * Note that these are not fiber local variables. Please see Thread#[] and
3081  * Thread#thread_variable_get for more details.
3082  */
3083 
3084 static VALUE
3086 {
3087  VALUE locals;
3088  ID id = rb_check_id(&key);
3089 
3090  if (!id) return Qfalse;
3091 
3092  locals = rb_ivar_get(thread, id_locals);
3093 
3094  if (!RHASH(locals)->ntbl)
3095  return Qfalse;
3096 
3097  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(id), 0)) {
3098  return Qtrue;
3099  }
3100 
3101  return Qfalse;
3102 }
3103 
3104 /*
3105  * call-seq:
3106  * thr.priority -> integer
3107  *
3108  * Returns the priority of <i>thr</i>. Default is inherited from the
3109  * current thread which creating the new thread, or zero for the
3110  * initial main thread; higher-priority thread will run more frequently
3111  * than lower-priority threads (but lower-priority threads can also run).
3112  *
3113  * This is just hint for Ruby thread scheduler. It may be ignored on some
3114  * platform.
3115  *
3116  * Thread.current.priority #=> 0
3117  */
3118 
3119 static VALUE
3121 {
3122  rb_thread_t *th;
3123  GetThreadPtr(thread, th);
3124  return INT2NUM(th->priority);
3125 }
3126 
3127 
3128 /*
3129  * call-seq:
3130  * thr.priority= integer -> thr
3131  *
3132  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3133  * will run more frequently than lower-priority threads (but lower-priority
3134  * threads can also run).
3135  *
3136  * This is just hint for Ruby thread scheduler. It may be ignored on some
3137  * platform.
3138  *
3139  * count1 = count2 = 0
3140  * a = Thread.new do
3141  * loop { count1 += 1 }
3142  * end
3143  * a.priority = -1
3144  *
3145  * b = Thread.new do
3146  * loop { count2 += 1 }
3147  * end
3148  * b.priority = -2
3149  * sleep 1 #=> 1
3150  * count1 #=> 622504
3151  * count2 #=> 5832
3152  */
3153 
3154 static VALUE
3156 {
3157  rb_thread_t *th;
3158  int priority;
3159  GetThreadPtr(thread, th);
3160 
3161 
3162 #if USE_NATIVE_THREAD_PRIORITY
3163  th->priority = NUM2INT(prio);
3164  native_thread_apply_priority(th);
3165 #else
3166  priority = NUM2INT(prio);
3167  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3168  priority = RUBY_THREAD_PRIORITY_MAX;
3169  }
3170  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3171  priority = RUBY_THREAD_PRIORITY_MIN;
3172  }
3173  th->priority = priority;
3174 #endif
3175  return INT2NUM(th->priority);
3176 }
3177 
3178 /* for IO */
3179 
3180 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3181 
3182 /*
3183  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3184  * in select(2) system call.
3185  *
3186  * - Linux 2.2.12 (?)
3187  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3188  * select(2) documents how to allocate fd_set dynamically.
3189  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3190  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3191  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3192  * select(2) documents how to allocate fd_set dynamically.
3193  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3194  * - HP-UX documents how to allocate fd_set dynamically.
3195  * http://docs.hp.com/en/B2355-60105/select.2.html
3196  * - Solaris 8 has select_large_fdset
3197  * - Mac OS X 10.7 (Lion)
3198  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3199  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3200  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3201  *
3202  * When fd_set is not big enough to hold big file descriptors,
3203  * it should be allocated dynamically.
3204  * Note that this assumes fd_set is structured as bitmap.
3205  *
3206  * rb_fd_init allocates the memory.
3207  * rb_fd_term free the memory.
3208  * rb_fd_set may re-allocates bitmap.
3209  *
3210  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3211  */
3212 
3213 void
3214 rb_fd_init(rb_fdset_t *fds)
3215 {
3216  fds->maxfd = 0;
3217  fds->fdset = ALLOC(fd_set);
3218  FD_ZERO(fds->fdset);
3219 }
3220 
3221 void
3223 {
3224  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3225 
3226  if (size < sizeof(fd_set))
3227  size = sizeof(fd_set);
3228  dst->maxfd = src->maxfd;
3229  dst->fdset = xmalloc(size);
3230  memcpy(dst->fdset, src->fdset, size);
3231 }
3232 
3233 void
3234 rb_fd_term(rb_fdset_t *fds)
3235 {
3236  if (fds->fdset) xfree(fds->fdset);
3237  fds->maxfd = 0;
3238  fds->fdset = 0;
3239 }
3240 
3241 void
3242 rb_fd_zero(rb_fdset_t *fds)
3243 {
3244  if (fds->fdset)
3245  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3246 }
3247 
3248 static void
3249 rb_fd_resize(int n, rb_fdset_t *fds)
3250 {
3251  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3252  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3253 
3254  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3255  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3256 
3257  if (m > o) {
3258  fds->fdset = xrealloc(fds->fdset, m);
3259  memset((char *)fds->fdset + o, 0, m - o);
3260  }
3261  if (n >= fds->maxfd) fds->maxfd = n + 1;
3262 }
3263 
3264 void
3265 rb_fd_set(int n, rb_fdset_t *fds)
3266 {
3267  rb_fd_resize(n, fds);
3268  FD_SET(n, fds->fdset);
3269 }
3270 
3271 void
3272 rb_fd_clr(int n, rb_fdset_t *fds)
3273 {
3274  if (n >= fds->maxfd) return;
3275  FD_CLR(n, fds->fdset);
3276 }
3277 
3278 int
3279 rb_fd_isset(int n, const rb_fdset_t *fds)
3280 {
3281  if (n >= fds->maxfd) return 0;
3282  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3283 }
3284 
3285 void
3286 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3287 {
3288  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3289 
3290  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3291  dst->maxfd = max;
3292  dst->fdset = xrealloc(dst->fdset, size);
3293  memcpy(dst->fdset, src, size);
3294 }
3295 
3296 static void
3297 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3298 {
3299  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3300 
3301  if (size > sizeof(fd_set)) {
3302  rb_raise(rb_eArgError, "too large fdsets");
3303  }
3304  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
3305 }
3306 
3307 void
3308 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3309 {
3310  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3311 
3312  if (size < sizeof(fd_set))
3313  size = sizeof(fd_set);
3314  dst->maxfd = src->maxfd;
3315  dst->fdset = xrealloc(dst->fdset, size);
3316  memcpy(dst->fdset, src->fdset, size);
3317 }
3318 
3319 #ifdef __native_client__
3320 int select(int nfds, fd_set *readfds, fd_set *writefds,
3321  fd_set *exceptfds, struct timeval *timeout);
3322 #endif
3323 
3324 int
3325 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3326 {
3327  fd_set *r = NULL, *w = NULL, *e = NULL;
3328  if (readfds) {
3329  rb_fd_resize(n - 1, readfds);
3330  r = rb_fd_ptr(readfds);
3331  }
3332  if (writefds) {
3333  rb_fd_resize(n - 1, writefds);
3334  w = rb_fd_ptr(writefds);
3335  }
3336  if (exceptfds) {
3337  rb_fd_resize(n - 1, exceptfds);
3338  e = rb_fd_ptr(exceptfds);
3339  }
3340  return select(n, r, w, e, timeout);
3341 }
3342 
3343 #undef FD_ZERO
3344 #undef FD_SET
3345 #undef FD_CLR
3346 #undef FD_ISSET
3347 
3348 #define FD_ZERO(f) rb_fd_zero(f)
3349 #define FD_SET(i, f) rb_fd_set((i), (f))
3350 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3351 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3352 
3353 #elif defined(_WIN32)
3354 
3355 void
3356 rb_fd_init(rb_fdset_t *set)
3357 {
3358  set->capa = FD_SETSIZE;
3359  set->fdset = ALLOC(fd_set);
3360  FD_ZERO(set->fdset);
3361 }
3362 
3363 void
3365 {
3366  rb_fd_init(dst);
3367  rb_fd_dup(dst, src);
3368 }
3369 
3370 static void
3371 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
3372 {
3373  int max = rb_fd_max(src);
3374 
3375  /* we assume src is the result of select() with dst, so dst should be
3376  * larger or equal than src. */
3377  if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
3378  rb_raise(rb_eArgError, "too large fdsets");
3379  }
3380 
3381  memcpy(dst->fd_array, src->fdset->fd_array, max);
3382  dst->fd_count = max;
3383 }
3384 
3385 void
3386 rb_fd_term(rb_fdset_t *set)
3387 {
3388  xfree(set->fdset);
3389  set->fdset = NULL;
3390  set->capa = 0;
3391 }
3392 
3393 void
3394 rb_fd_set(int fd, rb_fdset_t *set)
3395 {
3396  unsigned int i;
3397  SOCKET s = rb_w32_get_osfhandle(fd);
3398 
3399  for (i = 0; i < set->fdset->fd_count; i++) {
3400  if (set->fdset->fd_array[i] == s) {
3401  return;
3402  }
3403  }
3404  if (set->fdset->fd_count >= (unsigned)set->capa) {
3405  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3406  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3407  }
3408  set->fdset->fd_array[set->fdset->fd_count++] = s;
3409 }
3410 
3411 #undef FD_ZERO
3412 #undef FD_SET
3413 #undef FD_CLR
3414 #undef FD_ISSET
3415 
3416 #define FD_ZERO(f) rb_fd_zero(f)
3417 #define FD_SET(i, f) rb_fd_set((i), (f))
3418 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3419 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3420 
3421 #else
3422 #define rb_fd_rcopy(d, s) (*(d) = *(s))
3423 #endif
3424 
3425 static int
3426 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
3427  struct timeval *timeout)
3428 {
3430  int lerrno;
3431  rb_fdset_t UNINITIALIZED_VAR(orig_read);
3432  rb_fdset_t UNINITIALIZED_VAR(orig_write);
3433  rb_fdset_t UNINITIALIZED_VAR(orig_except);
3434  double limit = 0;
3435  struct timeval wait_rest;
3436  rb_thread_t *th = GET_THREAD();
3437 
3438  if (timeout) {
3439  limit = timeofday();
3440  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3441  wait_rest = *timeout;
3442  timeout = &wait_rest;
3443  }
3444 
3445  if (read)
3446  rb_fd_init_copy(&orig_read, read);
3447  if (write)
3448  rb_fd_init_copy(&orig_write, write);
3449  if (except)
3450  rb_fd_init_copy(&orig_except, except);
3451 
3452  retry:
3453  lerrno = 0;
3454 
3455  BLOCKING_REGION({
3456  result = native_fd_select(n, read, write, except, timeout, th);
3457  if (result < 0) lerrno = errno;
3458  }, ubf_select, th, FALSE);
3459 
3461 
3462  errno = lerrno;
3463 
3464  if (result < 0) {
3465  switch (errno) {
3466  case EINTR:
3467 #ifdef ERESTART
3468  case ERESTART:
3469 #endif
3470  if (read)
3471  rb_fd_dup(read, &orig_read);
3472  if (write)
3473  rb_fd_dup(write, &orig_write);
3474  if (except)
3475  rb_fd_dup(except, &orig_except);
3476 
3477  if (timeout) {
3478  double d = limit - timeofday();
3479 
3480  wait_rest.tv_sec = (time_t)d;
3481  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
3482  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
3483  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
3484  }
3485 
3486  goto retry;
3487  default:
3488  break;
3489  }
3490  }
3491 
3492  if (read)
3493  rb_fd_term(&orig_read);
3494  if (write)
3495  rb_fd_term(&orig_write);
3496  if (except)
3497  rb_fd_term(&orig_except);
3498 
3499  return result;
3500 }
3501 
3502 static void
3503 rb_thread_wait_fd_rw(int fd, int read)
3504 {
3505  int result = 0;
3506  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3507 
3508  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3509 
3510  if (fd < 0) {
3511  rb_raise(rb_eIOError, "closed stream");
3512  }
3513 
3514  result = rb_wait_for_single_fd(fd, events, NULL);
3515  if (result < 0) {
3516  rb_sys_fail(0);
3517  }
3518 
3519  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3520 }
3521 
3522 void
3524 {
3525  rb_thread_wait_fd_rw(fd, 1);
3526 }
3527 
3528 int
3530 {
3531  rb_thread_wait_fd_rw(fd, 0);
3532  return TRUE;
3533 }
3534 
3535 int
3536 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
3537  struct timeval *timeout)
3538 {
3539  rb_fdset_t fdsets[3];
3540  rb_fdset_t *rfds = NULL;
3541  rb_fdset_t *wfds = NULL;
3542  rb_fdset_t *efds = NULL;
3543  int retval;
3544 
3545  if (read) {
3546  rfds = &fdsets[0];
3547  rb_fd_init(rfds);
3548  rb_fd_copy(rfds, read, max);
3549  }
3550  if (write) {
3551  wfds = &fdsets[1];
3552  rb_fd_init(wfds);
3553  rb_fd_copy(wfds, write, max);
3554  }
3555  if (except) {
3556  efds = &fdsets[2];
3557  rb_fd_init(efds);
3558  rb_fd_copy(efds, except, max);
3559  }
3560 
3561  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
3562 
3563  if (rfds) {
3564  rb_fd_rcopy(read, rfds);
3565  rb_fd_term(rfds);
3566  }
3567  if (wfds) {
3568  rb_fd_rcopy(write, wfds);
3569  rb_fd_term(wfds);
3570  }
3571  if (efds) {
3572  rb_fd_rcopy(except, efds);
3573  rb_fd_term(efds);
3574  }
3575 
3576  return retval;
3577 }
3578 
3579 int
3580 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3581  struct timeval *timeout)
3582 {
3583  if (!read && !write && !except) {
3584  if (!timeout) {
3586  return 0;
3587  }
3588  rb_thread_wait_for(*timeout);
3589  return 0;
3590  }
3591 
3592  if (read) {
3593  rb_fd_resize(max - 1, read);
3594  }
3595  if (write) {
3596  rb_fd_resize(max - 1, write);
3597  }
3598  if (except) {
3599  rb_fd_resize(max - 1, except);
3600  }
3601  return do_select(max, read, write, except, timeout);
3602 }
3603 
3604 /*
3605  * poll() is supported by many OSes, but so far Linux is the only
3606  * one we know of that supports using poll() in all places select()
3607  * would work.
3608  */
3609 #if defined(HAVE_POLL) && defined(__linux__)
3610 # define USE_POLL
3611 #endif
3612 
3613 #ifdef USE_POLL
3614 
3615 /* The same with linux kernel. TODO: make platform independent definition. */
3616 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3617 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3618 #define POLLEX_SET (POLLPRI)
3619 
3620 #ifndef HAVE_PPOLL
3621 /* TODO: don't ignore sigmask */
3622 int
3623 ppoll(struct pollfd *fds, nfds_t nfds,
3624  const struct timespec *ts, const sigset_t *sigmask)
3625 {
3626  int timeout_ms;
3627 
3628  if (ts) {
3629  int tmp, tmp2;
3630 
3631  if (ts->tv_sec > TIMET_MAX/1000)
3632  timeout_ms = -1;
3633  else {
3634  tmp = ts->tv_sec * 1000;
3635  tmp2 = ts->tv_nsec / (1000 * 1000);
3636  if (TIMET_MAX - tmp < tmp2)
3637  timeout_ms = -1;
3638  else
3639  timeout_ms = tmp + tmp2;
3640  }
3641  }
3642  else
3643  timeout_ms = -1;
3644 
3645  return poll(fds, nfds, timeout_ms);
3646 }
3647 #endif
3648 
3649 /*
3650  * returns a mask of events
3651  */
3652 int
3653 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3654 {
3655  struct pollfd fds;
3656  int result = 0, lerrno;
3657  double limit = 0;
3658  struct timespec ts;
3659  struct timespec *timeout = NULL;
3660  rb_thread_t *th = GET_THREAD();
3661 
3662  if (tv) {
3663  ts.tv_sec = tv->tv_sec;
3664  ts.tv_nsec = tv->tv_usec * 1000;
3665  limit = timeofday();
3666  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3667  timeout = &ts;
3668  }
3669 
3670  fds.fd = fd;
3671  fds.events = (short)events;
3672 
3673 retry:
3674  lerrno = 0;
3675  BLOCKING_REGION({
3676  result = ppoll(&fds, 1, timeout, NULL);
3677  if (result < 0) lerrno = errno;
3678  }, ubf_select, th, FALSE);
3679 
3681 
3682  if (result < 0) {
3683  errno = lerrno;
3684  switch (errno) {
3685  case EINTR:
3686 #ifdef ERESTART
3687  case ERESTART:
3688 #endif
3689  if (timeout) {
3690  double d = limit - timeofday();
3691 
3692  ts.tv_sec = (long)d;
3693  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
3694  if (ts.tv_sec < 0)
3695  ts.tv_sec = 0;
3696  if (ts.tv_nsec < 0)
3697  ts.tv_nsec = 0;
3698  }
3699  goto retry;
3700  }
3701  return -1;
3702  }
3703 
3704  if (fds.revents & POLLNVAL) {
3705  errno = EBADF;
3706  return -1;
3707  }
3708 
3709  /*
3710  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3711  * Therefore we need to fix it up.
3712  */
3713  result = 0;
3714  if (fds.revents & POLLIN_SET)
3715  result |= RB_WAITFD_IN;
3716  if (fds.revents & POLLOUT_SET)
3717  result |= RB_WAITFD_OUT;
3718  if (fds.revents & POLLEX_SET)
3719  result |= RB_WAITFD_PRI;
3720 
3721  return result;
3722 }
3723 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3724 static rb_fdset_t *
3726 {
3727  rb_fd_init(fds);
3728  rb_fd_set(fd, fds);
3729 
3730  return fds;
3731 }
3732 
3733 struct select_args {
3734  union {
3735  int fd;
3736  int error;
3737  } as;
3741  struct timeval *tv;
3742 };
3743 
3744 static VALUE
3746 {
3747  struct select_args *args = (struct select_args *)ptr;
3748  int r;
3749 
3750  r = rb_thread_fd_select(args->as.fd + 1,
3751  args->read, args->write, args->except, args->tv);
3752  if (r == -1)
3753  args->as.error = errno;
3754  if (r > 0) {
3755  r = 0;
3756  if (args->read && rb_fd_isset(args->as.fd, args->read))
3757  r |= RB_WAITFD_IN;
3758  if (args->write && rb_fd_isset(args->as.fd, args->write))
3759  r |= RB_WAITFD_OUT;
3760  if (args->except && rb_fd_isset(args->as.fd, args->except))
3761  r |= RB_WAITFD_PRI;
3762  }
3763  return (VALUE)r;
3764 }
3765 
3766 static VALUE
3768 {
3769  struct select_args *args = (struct select_args *)ptr;
3770 
3771  if (args->read) rb_fd_term(args->read);
3772  if (args->write) rb_fd_term(args->write);
3773  if (args->except) rb_fd_term(args->except);
3774 
3775  return (VALUE)-1;
3776 }
3777 
3778 int
3779 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3780 {
3781  rb_fdset_t rfds, wfds, efds;
3782  struct select_args args;
3783  int r;
3784  VALUE ptr = (VALUE)&args;
3785 
3786  args.as.fd = fd;
3787  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3788  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3789  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
3790  args.tv = tv;
3791 
3792  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
3793  if (r == -1)
3794  errno = args.as.error;
3795 
3796  return r;
3797 }
3798 #endif /* ! USE_POLL */
3799 
3800 /*
3801  * for GC
3802  */
3803 
3804 #ifdef USE_CONSERVATIVE_STACK_END
3805 void
3807 {
3808  VALUE stack_end;
3809  *stack_end_p = &stack_end;
3810 }
3811 #endif
3812 
3813 
3814 /*
3815  *
3816  */
3817 
3818 void
3820 {
3821  /* mth must be main_thread */
3822  if (rb_signal_buff_size() > 0) {
3823  /* wakeup main thread */
3825  }
3826 }
3827 
3828 static void
3830 {
3831  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3832 
3833  /*
3834  * Tricky: thread_destruct_lock doesn't close a race against
3835  * vm->running_thread switch. however it guarantees th->running_thread
3836  * point to valid pointer or NULL.
3837  */
3838  native_mutex_lock(&vm->thread_destruct_lock);
3839  /* for time slice */
3840  if (vm->running_thread)
3842  native_mutex_unlock(&vm->thread_destruct_lock);
3843 
3844  /* check signal */
3846 
3847 #if 0
3848  /* prove profiler */
3849  if (vm->prove_profile.enable) {
3850  rb_thread_t *th = vm->running_thread;
3851 
3852  if (vm->during_gc) {
3853  /* GC prove profiling */
3854  }
3855  }
3856 #endif
3857 }
3858 
3859 void
3861 {
3862  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3863  native_reset_timer_thread();
3864  }
3865 }
3866 
3867 void
3869 {
3870  native_reset_timer_thread();
3871 }
3872 
3873 void
3875 {
3876  system_working = 1;
3877  rb_thread_create_timer_thread();
3878 }
3879 
3880 static int
3882 {
3883  int i;
3884  VALUE lines = (VALUE)val;
3885 
3886  for (i = 0; i < RARRAY_LEN(lines); i++) {
3887  if (RARRAY_AREF(lines, i) != Qnil) {
3888  RARRAY_ASET(lines, i, INT2FIX(0));
3889  }
3890  }
3891  return ST_CONTINUE;
3892 }
3893 
3894 static void
3896 {
3897  VALUE coverages = rb_get_coverages();
3898  if (RTEST(coverages)) {
3900  }
3901 }
3902 
3903 static void
3905 {
3906  rb_thread_t *th = GET_THREAD();
3907  rb_vm_t *vm = th->vm;
3908  VALUE thval = th->self;
3909  vm->main_thread = th;
3910 
3911  gvl_atfork(th->vm);
3912  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3913  st_clear(vm->living_threads);
3914  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3915  vm->sleeper = 0;
3916  clear_coverage();
3917 }
3918 
3919 static int
3921 {
3922  VALUE thval = key;
3923  rb_thread_t *th;
3924  GetThreadPtr(thval, th);
3925 
3926  if (th != (rb_thread_t *)current_th) {
3930  }
3931  return ST_CONTINUE;
3932 }
3933 
3934 void
3936 {
3938  GET_THREAD()->join_list = NULL;
3939 
3940  /* We don't want reproduce CVE-2003-0900. */
3942 }
3943 
3944 static int
3946 {
3947  VALUE thval = key;
3948  rb_thread_t *th;
3949  GetThreadPtr(thval, th);
3950 
3951  if (th != (rb_thread_t *)current_th) {
3953  }
3954  return ST_CONTINUE;
3955 }
3956 
3957 void
3959 {
3961 }
3962 
3963 struct thgroup {
3966 };
3967 
3968 static size_t
3969 thgroup_memsize(const void *ptr)
3970 {
3971  return ptr ? sizeof(struct thgroup) : 0;
3972 }
3973 
3975  "thgroup",
3978 };
3979 
3980 /*
3981  * Document-class: ThreadGroup
3982  *
3983  * ThreadGroup provides a means of keeping track of a number of threads as a
3984  * group.
3985  *
3986  * A given Thread object can only belong to one ThreadGroup at a time; adding
3987  * a thread to a new group will remove it from any previous group.
3988  *
3989  * Newly created threads belong to the same group as the thread from which they
3990  * were created.
3991  */
3992 
3993 /*
3994  * Document-const: Default
3995  *
3996  * The default ThreadGroup created when Ruby starts; all Threads belong to it
3997  * by default.
3998  */
3999 static VALUE
4001 {
4002  VALUE group;
4003  struct thgroup *data;
4004 
4005  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4006  data->enclosed = 0;
4007  data->group = group;
4008 
4009  return group;
4010 }
4011 
4015 };
4016 
4017 static int
4019 {
4020  VALUE thread = (VALUE)key;
4021  VALUE ary = ((struct thgroup_list_params *)data)->ary;
4022  VALUE group = ((struct thgroup_list_params *)data)->group;
4023  rb_thread_t *th;
4024  GetThreadPtr(thread, th);
4025 
4026  if (th->thgroup == group) {
4027  rb_ary_push(ary, thread);
4028  }
4029  return ST_CONTINUE;
4030 }
4031 
4032 /*
4033  * call-seq:
4034  * thgrp.list -> array
4035  *
4036  * Returns an array of all existing Thread objects that belong to this group.
4037  *
4038  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4039  */
4040 
4041 static VALUE
4043 {
4044  VALUE ary = rb_ary_new();
4045  struct thgroup_list_params param;
4046 
4047  param.ary = ary;
4048  param.group = group;
4049  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
4050  return ary;
4051 }
4052 
4053 
4054 /*
4055  * call-seq:
4056  * thgrp.enclose -> thgrp
4057  *
4058  * Prevents threads from being added to or removed from the receiving
4059  * ThreadGroup.
4060  *
4061  * New threads can still be started in an enclosed ThreadGroup.
4062  *
4063  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4064  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4065  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
4066  * tg.add thr
4067  * #=> ThreadError: can't move from the enclosed thread group
4068  */
4069 
4070 static VALUE
4072 {
4073  struct thgroup *data;
4074 
4076  data->enclosed = 1;
4077 
4078  return group;
4079 }
4080 
4081 
4082 /*
4083  * call-seq:
4084  * thgrp.enclosed? -> true or false
4085  *
4086  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4087  */
4088 
4089 static VALUE
4091 {
4092  struct thgroup *data;
4093 
4095  if (data->enclosed)
4096  return Qtrue;
4097  return Qfalse;
4098 }
4099 
4100 
4101 /*
4102  * call-seq:
4103  * thgrp.add(thread) -> thgrp
4104  *
4105  * Adds the given +thread+ to this group, removing it from any other
4106  * group to which it may have previously been a member.
4107  *
4108  * puts "Initial group is #{ThreadGroup::Default.list}"
4109  * tg = ThreadGroup.new
4110  * t1 = Thread.new { sleep }
4111  * t2 = Thread.new { sleep }
4112  * puts "t1 is #{t1}"
4113  * puts "t2 is #{t2}"
4114  * tg.add(t1)
4115  * puts "Initial group now #{ThreadGroup::Default.list}"
4116  * puts "tg group now #{tg.list}"
4117  *
4118  * This will produce:
4119  *
4120  * Initial group is #<Thread:0x401bdf4c>
4121  * t1 is #<Thread:0x401b3c90>
4122  * t2 is #<Thread:0x401b3c18>
4123  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4124  * tg group now #<Thread:0x401b3c90>
4125  */
4126 
4127 static VALUE
4129 {
4130  rb_thread_t *th;
4131  struct thgroup *data;
4132 
4133  GetThreadPtr(thread, th);
4134 
4135  if (OBJ_FROZEN(group)) {
4136  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4137  }
4139  if (data->enclosed) {
4140  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4141  }
4142 
4143  if (!th->thgroup) {
4144  return Qnil;
4145  }
4146 
4147  if (OBJ_FROZEN(th->thgroup)) {
4148  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4149  }
4151  if (data->enclosed) {
4153  "can't move from the enclosed thread group");
4154  }
4155 
4156  th->thgroup = group;
4157  return group;
4158 }
4159 
4160 
4161 /*
4162  * Document-class: Mutex
4163  *
4164  * Mutex implements a simple semaphore that can be used to coordinate access to
4165  * shared data from multiple concurrent threads.
4166  *
4167  * Example:
4168  *
4169  * require 'thread'
4170  * semaphore = Mutex.new
4171  *
4172  * a = Thread.new {
4173  * semaphore.synchronize {
4174  * # access shared resource
4175  * }
4176  * }
4177  *
4178  * b = Thread.new {
4179  * semaphore.synchronize {
4180  * # access shared resource
4181  * }
4182  * }
4183  *
4184  */
4185 
4186 #define GetMutexPtr(obj, tobj) \
4187  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
4188 
4189 #define mutex_mark NULL
4190 
4191 static void
4192 mutex_free(void *ptr)
4193 {
4194  if (ptr) {
4195  rb_mutex_t *mutex = ptr;
4196  if (mutex->th) {
4197  /* rb_warn("free locked mutex"); */
4198  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
4199  if (err) rb_bug("%s", err);
4200  }
4201  native_mutex_destroy(&mutex->lock);
4202  native_cond_destroy(&mutex->cond);
4203  }
4204  ruby_xfree(ptr);
4205 }
4206 
4207 static size_t
4208 mutex_memsize(const void *ptr)
4209 {
4210  return ptr ? sizeof(rb_mutex_t) : 0;
4211 }
4212 
4214  "mutex",
4217 };
4218 
4219 VALUE
4221 {
4223  return Qtrue;
4224  }
4225  else {
4226  return Qfalse;
4227  }
4228 }
4229 
4230 static VALUE
4232 {
4233  VALUE volatile obj;
4234  rb_mutex_t *mutex;
4235 
4236  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
4237  native_mutex_initialize(&mutex->lock);
4238  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
4239  return obj;
4240 }
4241 
4242 /*
4243  * call-seq:
4244  * Mutex.new -> mutex
4245  *
4246  * Creates a new Mutex
4247  */
4248 static VALUE
4250 {
4251  return self;
4252 }
4253 
4254 VALUE
4256 {
4257  return mutex_alloc(rb_cMutex);
4258 }
4259 
4260 /*
4261  * call-seq:
4262  * mutex.locked? -> true or false
4263  *
4264  * Returns +true+ if this lock is currently held by some thread.
4265  */
4266 VALUE
4268 {
4269  rb_mutex_t *mutex;
4270  GetMutexPtr(self, mutex);
4271  return mutex->th ? Qtrue : Qfalse;
4272 }
4273 
4274 static void
4276 {
4277  rb_mutex_t *mutex;
4278  GetMutexPtr(self, mutex);
4279 
4280  if (th->keeping_mutexes) {
4281  mutex->next_mutex = th->keeping_mutexes;
4282  }
4283  th->keeping_mutexes = mutex;
4284 }
4285 
4286 /*
4287  * call-seq:
4288  * mutex.try_lock -> true or false
4289  *
4290  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
4291  * lock was granted.
4292  */
4293 VALUE
4295 {
4296  rb_mutex_t *mutex;
4297  VALUE locked = Qfalse;
4298  GetMutexPtr(self, mutex);
4299 
4300  native_mutex_lock(&mutex->lock);
4301  if (mutex->th == 0) {
4302  mutex->th = GET_THREAD();
4303  locked = Qtrue;
4304 
4305  mutex_locked(GET_THREAD(), self);
4306  }
4307  native_mutex_unlock(&mutex->lock);
4308 
4309  return locked;
4310 }
4311 
4312 static int
4313 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
4314 {
4315  int interrupted = 0;
4316  int err = 0;
4317 
4318  mutex->cond_waiting++;
4319  for (;;) {
4320  if (!mutex->th) {
4321  mutex->th = th;
4322  break;
4323  }
4324  if (RUBY_VM_INTERRUPTED(th)) {
4325  interrupted = 1;
4326  break;
4327  }
4328  if (err == ETIMEDOUT) {
4329  interrupted = 2;
4330  break;
4331  }
4332 
4333  if (timeout_ms) {
4334  struct timespec timeout_rel;
4335  struct timespec timeout;
4336 
4337  timeout_rel.tv_sec = 0;
4338  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
4339  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
4340  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
4341  }
4342  else {
4343  native_cond_wait(&mutex->cond, &mutex->lock);
4344  err = 0;
4345  }
4346  }
4347  mutex->cond_waiting--;
4348 
4349  return interrupted;
4350 }
4351 
4352 static void
4353 lock_interrupt(void *ptr)
4354 {
4355  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
4356  native_mutex_lock(&mutex->lock);
4357  if (mutex->cond_waiting > 0)
4358  native_cond_broadcast(&mutex->cond);
4359  native_mutex_unlock(&mutex->lock);
4360 }
4361 
4362 /*
4363  * At maximum, only one thread can use cond_timedwait and watch deadlock
4364  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
4365  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
4366  */
4368 
4369 /*
4370  * call-seq:
4371  * mutex.lock -> self
4372  *
4373  * Attempts to grab the lock and waits if it isn't available.
4374  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
4375  */
4376 VALUE
4378 {
4379  rb_thread_t *th = GET_THREAD();
4380  rb_mutex_t *mutex;
4381  GetMutexPtr(self, mutex);
4382 
4383  /* When running trap handler */
4384  if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
4385  rb_raise(rb_eThreadError, "can't be called from trap context");
4386  }
4387 
4388  if (rb_mutex_trylock(self) == Qfalse) {
4389  if (mutex->th == GET_THREAD()) {
4390  rb_raise(rb_eThreadError, "deadlock; recursive locking");
4391  }
4392 
4393  while (mutex->th != th) {
4394  int interrupted;
4395  enum rb_thread_status prev_status = th->status;
4396  volatile int timeout_ms = 0;
4397  struct rb_unblock_callback oldubf;
4398 
4399  set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
4401  th->locking_mutex = self;
4402 
4403  native_mutex_lock(&mutex->lock);
4404  th->vm->sleeper++;
4405  /*
4406  * Carefully! while some contended threads are in lock_func(),
4407  * vm->sleepr is unstable value. we have to avoid both deadlock
4408  * and busy loop.
4409  */
4410  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
4411  !patrol_thread) {
4412  timeout_ms = 100;
4413  patrol_thread = th;
4414  }
4415 
4416  GVL_UNLOCK_BEGIN();
4417  interrupted = lock_func(th, mutex, (int)timeout_ms);
4418  native_mutex_unlock(&mutex->lock);
4419  GVL_UNLOCK_END();
4420 
4421  if (patrol_thread == th)
4422  patrol_thread = NULL;
4423 
4424  reset_unblock_function(th, &oldubf);
4425 
4426  th->locking_mutex = Qfalse;
4427  if (mutex->th && interrupted == 2) {
4428  rb_check_deadlock(th->vm);
4429  }
4430  if (th->status == THREAD_STOPPED_FOREVER) {
4431  th->status = prev_status;
4432  }
4433  th->vm->sleeper--;
4434 
4435  if (mutex->th == th) mutex_locked(th, self);
4436 
4437  if (interrupted) {
4439  }
4440  }
4441  }
4442  return self;
4443 }
4444 
4445 /*
4446  * call-seq:
4447  * mutex.owned? -> true or false
4448  *
4449  * Returns +true+ if this lock is currently held by current thread.
4450  * <em>This API is experimental, and subject to change.</em>
4451  */
4452 VALUE
4454 {
4455  VALUE owned = Qfalse;
4456  rb_thread_t *th = GET_THREAD();
4457  rb_mutex_t *mutex;
4458 
4459  GetMutexPtr(self, mutex);
4460 
4461  if (mutex->th == th)
4462  owned = Qtrue;
4463 
4464  return owned;
4465 }
4466 
4467 static const char *
4469 {
4470  const char *err = NULL;
4471 
4472  native_mutex_lock(&mutex->lock);
4473 
4474  if (mutex->th == 0) {
4475  err = "Attempt to unlock a mutex which is not locked";
4476  }
4477  else if (mutex->th != th) {
4478  err = "Attempt to unlock a mutex which is locked by another thread";
4479  }
4480  else {
4481  mutex->th = 0;
4482  if (mutex->cond_waiting > 0)
4483  native_cond_signal(&mutex->cond);
4484  }
4485 
4486  native_mutex_unlock(&mutex->lock);
4487 
4488  if (!err) {
4489  rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
4490  while (*th_mutex != mutex) {
4491  th_mutex = &(*th_mutex)->next_mutex;
4492  }
4493  *th_mutex = mutex->next_mutex;
4494  mutex->next_mutex = NULL;
4495  }
4496 
4497  return err;
4498 }
4499 
4500 /*
4501  * call-seq:
4502  * mutex.unlock -> self
4503  *
4504  * Releases the lock.
4505  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
4506  */
4507 VALUE
4509 {
4510  const char *err;
4511  rb_mutex_t *mutex;
4512  GetMutexPtr(self, mutex);
4513 
4514  err = rb_mutex_unlock_th(mutex, GET_THREAD());
4515  if (err) rb_raise(rb_eThreadError, "%s", err);
4516 
4517  return self;
4518 }
4519 
4520 static void
4522 {
4523  if (th->keeping_mutexes) {
4525  }
4526  th->keeping_mutexes = NULL;
4527 }
4528 
4529 static void
4531 {
4532  rb_mutex_t *mutex;
4533 
4534  if (!th->locking_mutex) return;
4535 
4536  GetMutexPtr(th->locking_mutex, mutex);
4537  if (mutex->th == th)
4538  rb_mutex_abandon_all(mutex);
4539  th->locking_mutex = Qfalse;
4540 }
4541 
4542 static void
4544 {
4545  rb_mutex_t *mutex;
4546 
4547  while (mutexes) {
4548  mutex = mutexes;
4549  mutexes = mutex->next_mutex;
4550  mutex->th = 0;
4551  mutex->next_mutex = 0;
4552  }
4553 }
4554 
4555 static VALUE
4557 {
4558  sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
4559  return Qnil;
4560 }
4561 
4562 static VALUE
4564 {
4565  struct timeval *t = (struct timeval *)time;
4566  sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
4567  return Qnil;
4568 }
4569 
4570 VALUE
4572 {
4573  time_t beg, end;
4574  struct timeval t;
4575 
4576  if (!NIL_P(timeout)) {
4577  t = rb_time_interval(timeout);
4578  }
4579  rb_mutex_unlock(self);
4580  beg = time(0);
4581  if (NIL_P(timeout)) {
4583  }
4584  else {
4586  }
4587  end = time(0) - beg;
4588  return INT2FIX(end);
4589 }
4590 
4591 /*
4592  * call-seq:
4593  * mutex.sleep(timeout = nil) -> number
4594  *
4595  * Releases the lock and sleeps +timeout+ seconds if it is given and
4596  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
4597  * the current thread.
4598  *
4599  * When the thread is next woken up, it will attempt to reacquire
4600  * the lock.
4601  *
4602  * Note that this method can wakeup without explicit Thread#wakeup call.
4603  * For example, receiving signal and so on.
4604  */
4605 static VALUE
4607 {
4608  VALUE timeout;
4609 
4610  rb_scan_args(argc, argv, "01", &timeout);
4611  return rb_mutex_sleep(self, timeout);
4612 }
4613 
4614 /*
4615  * call-seq:
4616  * mutex.synchronize { ... } -> result of the block
4617  *
4618  * Obtains a lock, runs the block, and releases the lock when the block
4619  * completes. See the example under +Mutex+.
4620  */
4621 
4622 VALUE
4624 {
4625  rb_mutex_lock(mutex);
4626  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
4627 }
4628 
4629 /*
4630  * call-seq:
4631  * mutex.synchronize { ... } -> result of the block
4632  *
4633  * Obtains a lock, runs the block, and releases the lock when the block
4634  * completes. See the example under +Mutex+.
4635  */
4636 static VALUE
4638 {
4639  if (!rb_block_given_p()) {
4640  rb_raise(rb_eThreadError, "must be called with a block");
4641  }
4642 
4643  return rb_mutex_synchronize(self, rb_yield, Qundef);
4644 }
4645 
4647 {
4648  rb_mutex_t *m;
4649  GetMutexPtr(self, m);
4650 
4651  m->allow_trap = val;
4652 }
4653 
4654 /*
4655  * Document-class: ThreadShield
4656  */
4657 static void
4659 {
4660  rb_gc_mark((VALUE)ptr);
4661 }
4662 
4664  "thread_shield",
4665  {thread_shield_mark, 0, 0,},
4667 };
4668 
4669 static VALUE
4671 {
4672  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4673 }
4674 
4675 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4676 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4677 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4678 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4679 
4680 static inline void
4682 {
4683  unsigned int w = rb_thread_shield_waiting(b);
4684  w++;
4686  rb_raise(rb_eRuntimeError, "waiting count overflow");
4687  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4688  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4689 }
4690 
4691 static inline void
4693 {
4694  unsigned int w = rb_thread_shield_waiting(b);
4695  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4696  w--;
4697  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4698  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4699 }
4700 
4701 VALUE
4703 {
4704  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4705  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4706  return thread_shield;
4707 }
4708 
4709 /*
4710  * Wait a thread shield.
4711  *
4712  * Returns
4713  * true: acquired the thread shield
4714  * false: the thread shield was destroyed and no other threads waiting
4715  * nil: the thread shield was destroyed but still in use
4716  */
4717 VALUE
4719 {
4720  VALUE mutex = GetThreadShieldPtr(self);
4721  rb_mutex_t *m;
4722 
4723  if (!mutex) return Qfalse;
4724  GetMutexPtr(mutex, m);
4725  if (m->th == GET_THREAD()) return Qnil;
4727  rb_mutex_lock(mutex);
4729  if (DATA_PTR(self)) return Qtrue;
4730  rb_mutex_unlock(mutex);
4731  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4732 }
4733 
4734 /*
4735  * Release a thread shield, and return true if it has waiting threads.
4736  */
4737 VALUE
4739 {
4740  VALUE mutex = GetThreadShieldPtr(self);
4741  rb_mutex_unlock(mutex);
4742  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4743 }
4744 
4745 /*
4746  * Release and destroy a thread shield, and return true if it has waiting threads.
4747  */
4748 VALUE
4750 {
4751  VALUE mutex = GetThreadShieldPtr(self);
4752  DATA_PTR(self) = 0;
4753  rb_mutex_unlock(mutex);
4754  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4755 }
4756 
4757 /* variables for recursive traversals */
4759 
4760 extern const struct st_hash_type st_hashtype_num;
4761 
4762 static VALUE
4764 {
4765  VALUE hash = rb_hash_new();
4767  return hash;
4768 }
4769 
4770 ID rb_frame_last_func(void);
4771 
4772 /*
4773  * Returns the current "recursive list" used to detect recursion.
4774  * This list is a hash table, unique for the current thread and for
4775  * the current __callee__.
4776  */
4777 
4778 static VALUE
4780 {
4782  VALUE list;
4783  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4784  hash = ident_hash_new();
4786  list = Qnil;
4787  }
4788  else {
4789  list = rb_hash_aref(hash, sym);
4790  }
4791  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4792  list = ident_hash_new();
4794  }
4795  return list;
4796 }
4797 
4798 VALUE
4800 {
4803  return old;
4804 }
4805 
4806 void
4808 {
4810 }
4811 
4812 /*
4813  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4814  * in the recursion list.
4815  * Assumes the recursion list is valid.
4816  */
4817 
4818 static VALUE
4819 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4820 {
4821 #if SIZEOF_LONG == SIZEOF_VOIDP
4822  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4823 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4824  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4825  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4826 #endif
4827 
4828  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4829  if (pair_list == Qundef)
4830  return Qfalse;
4831  if (paired_obj_id) {
4832  if (!RB_TYPE_P(pair_list, T_HASH)) {
4833  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4834  return Qfalse;
4835  }
4836  else {
4837  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4838  return Qfalse;
4839  }
4840  }
4841  return Qtrue;
4842 }
4843 
4844 /*
4845  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4846  * For a single obj_id, it sets list[obj_id] to Qtrue.
4847  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4848  * otherwise list[obj_id] becomes a hash like:
4849  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4850  * Assumes the recursion list is valid.
4851  */
4852 
4853 static void
4855 {
4856  VALUE pair_list;
4857 
4858  if (!paired_obj) {
4859  rb_hash_aset(list, obj, Qtrue);
4860  }
4861  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4862  rb_hash_aset(list, obj, paired_obj);
4863  }
4864  else {
4865  if (!RB_TYPE_P(pair_list, T_HASH)){
4866  VALUE other_paired_obj = pair_list;
4867  pair_list = rb_hash_new();
4868  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4869  rb_hash_aset(list, obj, pair_list);
4870  }
4871  rb_hash_aset(pair_list, paired_obj, Qtrue);
4872  }
4873 }
4874 
4875 /*
4876  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4877  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4878  * removed from the hash and no attempt is made to simplify
4879  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4880  * Assumes the recursion list is valid.
4881  */
4882 
4883 static int
4885 {
4886  if (paired_obj) {
4887  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4888  if (pair_list == Qundef) {
4889  return 0;
4890  }
4891  if (RB_TYPE_P(pair_list, T_HASH)) {
4892  rb_hash_delete(pair_list, paired_obj);
4893  if (!RHASH_EMPTY_P(pair_list)) {
4894  return 1; /* keep hash until is empty */
4895  }
4896  }
4897  }
4898  rb_hash_delete(list, obj);
4899  return 1;
4900 }
4901 
4903  VALUE (*func) (VALUE, VALUE, int);
4909 };
4910 
4911 static VALUE
4913 {
4914  struct exec_recursive_params *p = (void *)data;
4915  return (*p->func)(p->obj, p->arg, FALSE);
4916 }
4917 
4918 /*
4919  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4920  * current method is called recursively on obj, or on the pair <obj, pairid>
4921  * If outer is 0, then the innermost func will be called with recursive set
4922  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4923  * all inner func are short-circuited by throw.
4924  * Implementation details: the value thrown is the recursive list which is
4925  * proper to the current method and unlikely to be caught anywhere else.
4926  * list[recursive_key] is used as a flag for the outermost call.
4927  */
4928 
4929 static VALUE
4931 {
4932  VALUE result = Qundef;
4933  const ID mid = rb_frame_last_func();
4934  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
4935  struct exec_recursive_params p;
4936  int outermost;
4938  p.objid = rb_obj_id(obj);
4939  p.obj = obj;
4940  p.pairid = pairid;
4941  p.arg = arg;
4942  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4943 
4944  if (recursive_check(p.list, p.objid, pairid)) {
4945  if (outer && !outermost) {
4946  rb_throw_obj(p.list, p.list);
4947  }
4948  return (*func)(obj, arg, TRUE);
4949  }
4950  else {
4951  int state;
4952 
4953  p.func = func;
4954 
4955  if (outermost) {
4957  recursive_push(p.list, p.objid, p.pairid);
4958  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
4959  if (!recursive_pop(p.list, p.objid, p.pairid)) goto invalid;
4960  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
4961  if (state) JUMP_TAG(state);
4962  if (result == p.list) {
4963  result = (*func)(obj, arg, TRUE);
4964  }
4965  }
4966  else {
4967  recursive_push(p.list, p.objid, p.pairid);
4968  PUSH_TAG();
4969  if ((state = EXEC_TAG()) == 0) {
4970  result = (*func)(obj, arg, FALSE);
4971  }
4972  POP_TAG();
4973  if (!recursive_pop(p.list, p.objid, p.pairid)) {
4974  invalid:
4975  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
4976  "for %+"PRIsVALUE" in %+"PRIsVALUE,
4977  sym, rb_thread_current());
4978  }
4979  if (state) JUMP_TAG(state);
4980  }
4981  }
4982  *(volatile struct exec_recursive_params *)&p;
4983  return result;
4984 }
4985 
4986 /*
4987  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4988  * current method is called recursively on obj
4989  */
4990 
4991 VALUE
4993 {
4994  return exec_recursive(func, obj, 0, arg, 0);
4995 }
4996 
4997 /*
4998  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4999  * current method is called recursively on the ordered pair <obj, paired_obj>
5000  */
5001 
5002 VALUE
5004 {
5005  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
5006 }
5007 
5008 /*
5009  * If recursion is detected on the current method and obj, the outermost
5010  * func will be called with (obj, arg, Qtrue). All inner func will be
5011  * short-circuited using throw.
5012  */
5013 
5014 VALUE
5016 {
5017  return exec_recursive(func, obj, 0, arg, 1);
5018 }
5019 
5020 /*
5021  * If recursion is detected on the current method, obj and paired_obj,
5022  * the outermost func will be called with (obj, arg, Qtrue). All inner
5023  * func will be short-circuited using throw.
5024  */
5025 
5026 VALUE
5028 {
5029  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
5030 }
5031 
5032 /*
5033  * call-seq:
5034  * thread.backtrace -> array
5035  *
5036  * Returns the current backtrace of the target thread.
5037  *
5038  */
5039 
5040 static VALUE
5042 {
5043  return rb_vm_thread_backtrace(argc, argv, thval);
5044 }
5045 
5046 /* call-seq:
5047  * thread.backtrace_locations(*args) -> array or nil
5048  *
5049  * Returns the execution stack for the target thread---an array containing
5050  * backtrace location objects.
5051  *
5052  * See Thread::Backtrace::Location for more information.
5053  *
5054  * This method behaves similarly to Kernel#caller_locations except it applies
5055  * to a specific thread.
5056  */
5057 static VALUE
5059 {
5060  return rb_vm_thread_backtrace_locations(argc, argv, thval);
5061 }
5062 
5063 /*
5064  * Document-class: ThreadError
5065  *
5066  * Raised when an invalid operation is attempted on a thread.
5067  *
5068  * For example, when no other thread has been started:
5069  *
5070  * Thread.stop
5071  *
5072  * This will raises the following exception:
5073  *
5074  * ThreadError: stopping only thread
5075  * note: use sleep to stop forever
5076  */
5077 
5078 void
5080 {
5081 #undef rb_intern
5082 #define rb_intern(str) rb_intern_const(str)
5083 
5084  VALUE cThGroup;
5085  rb_thread_t *th = GET_THREAD();
5086 
5087  sym_never = ID2SYM(rb_intern("never"));
5088  sym_immediate = ID2SYM(rb_intern("immediate"));
5089  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
5090  id_locals = rb_intern("locals");
5091 
5102  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5104 #if THREAD_DEBUG < 0
5105  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5106  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5107 #endif
5110  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5111 
5112  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5117  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5128  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5129  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5130  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5131  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5134  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5135  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5139  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5140 
5142 
5143  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
5146 
5147  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5149  rb_define_method(cThGroup, "list", thgroup_list, 0);
5150  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5151  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5152  rb_define_method(cThGroup, "add", thgroup_add, 1);
5153 
5154  {
5155  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5156  rb_define_const(cThGroup, "Default", th->thgroup);
5157  }
5158 
5159  rb_cMutex = rb_define_class("Mutex", rb_cObject);
5161  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
5163  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
5166  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
5169 
5170  recursive_key = rb_intern("__recursive_key__");
5172 
5173  /* init thread core */
5174  {
5175  /* main thread setting */
5176  {
5177  /* acquire global vm lock */
5178  gvl_init(th->vm);
5179  gvl_acquire(th->vm, th);
5180  native_mutex_initialize(&th->vm->thread_destruct_lock);
5181  native_mutex_initialize(&th->interrupt_lock);
5182  native_cond_initialize(&th->interrupt_cond,
5183  RB_CONDATTR_CLOCK_MONOTONIC);
5184 
5188 
5189  th->interrupt_mask = 0;
5190  }
5191  }
5192 
5193  rb_thread_create_timer_thread();
5194 
5195  /* suppress warnings on cygwin, mingw and mswin.*/
5196  (void)native_mutex_trylock;
5197 }
5198 
5199 int
5201 {
5202  rb_thread_t *th = ruby_thread_from_native();
5203 
5204  return th != 0;
5205 }
5206 
5207 static int
5209 {
5210  VALUE thval = key;
5211  rb_thread_t *th;
5212  GetThreadPtr(thval, th);
5213 
5215  *found = 1;
5216  }
5217  else if (th->locking_mutex) {
5218  rb_mutex_t *mutex;
5219  GetMutexPtr(th->locking_mutex, mutex);
5220 
5221  native_mutex_lock(&mutex->lock);
5222  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
5223  *found = 1;
5224  }
5225  native_mutex_unlock(&mutex->lock);
5226  }
5227 
5228  return (*found) ? ST_STOP : ST_CONTINUE;
5229 }
5230 
5231 #ifdef DEBUG_DEADLOCK_CHECK
5232 static int
5233 debug_i(st_data_t key, st_data_t val, int *found)
5234 {
5235  VALUE thval = key;
5236  rb_thread_t *th;
5237  GetThreadPtr(thval, th);
5238 
5239  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
5240  if (th->locking_mutex) {
5241  rb_mutex_t *mutex;
5242  GetMutexPtr(th->locking_mutex, mutex);
5243 
5244  native_mutex_lock(&mutex->lock);
5245  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
5246  native_mutex_unlock(&mutex->lock);
5247  }
5248  else
5249  puts("");
5250 
5251  return ST_CONTINUE;
5252 }
5253 #endif
5254 
5255 static void
5257 {
5258  int found = 0;
5259 
5260  if (vm_living_thread_num(vm) > vm->sleeper) return;
5261  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5262  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5263 
5265 
5266  if (!found) {
5267  VALUE argv[2];
5268  argv[0] = rb_eFatal;
5269  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5270 #ifdef DEBUG_DEADLOCK_CHECK
5271  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
5272  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
5273 #endif
5274  vm->sleeper--;
5276  }
5277 }
5278 
5279 static void
5280 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
5281 {
5283  if (coverage && RBASIC(coverage)->klass == 0) {
5284  long line = rb_sourceline() - 1;
5285  long count;
5286  if (RARRAY_AREF(coverage, line) == Qnil) {
5287  return;
5288  }
5289  count = FIX2LONG(RARRAY_AREF(coverage, line)) + 1;
5290  if (POSFIXABLE(count)) {
5292  }
5293  }
5294 }
5295 
5296 VALUE
5298 {
5299  return GET_VM()->coverages;
5300 }
5301 
5302 void
5304 {
5305  GET_VM()->coverages = coverages;
5307 }
5308 
5309 void
5311 {
5312  GET_VM()->coverages = Qfalse;
5314 }
5315 
5316 VALUE
5318 {
5319  VALUE interrupt_mask = rb_hash_new();
5320  rb_thread_t *cur_th = GET_THREAD();
5321 
5322  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5323  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5324 
5325  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5326 }
5327 
5328 void
5329 ruby_kill(rb_pid_t pid, int sig)
5330 {
5331  int err;
5332  rb_thread_t *th = GET_THREAD();
5333 
5334  /*
5335  * When target pid is self, many caller assume signal will be
5336  * delivered immediately and synchronously.
5337  */
5338  {
5339  GVL_UNLOCK_BEGIN();
5340  native_mutex_lock(&th->interrupt_lock);
5341  err = kill(pid, sig);
5342  native_cond_wait(&th->interrupt_cond, &th->interrupt_lock);
5343  native_mutex_unlock(&th->interrupt_lock);
5344  GVL_UNLOCK_END();
5345  }
5346  if (err < 0) {
5347  rb_sys_fail(0);
5348  }
5349 }
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:609
static int vm_living_thread_num(rb_vm_t *vm)
Definition: thread.c:2988
struct timeval rb_time_interval(VALUE num)
Definition: time.c:2411
rb_control_frame_t * cfp
Definition: vm_core.h:531
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:404
rb_thread_list_t * join_list
Definition: vm_core.h:613
#define T_OBJECT
Definition: ruby.h:477
static VALUE sym_never
Definition: thread.c:82
static VALUE thgroup_enclose(VALUE group)
Definition: thread.c:4071
VALUE rb_eStandardError
Definition: error.c:546
rb_nativethread_cond_t cond
Definition: thread.c:391
static VALUE rb_thread_variable_p(VALUE thread, VALUE key)
Definition: thread.c:3085
#define eKillSignal
Definition: thread.c:93
VALUE * stack_end
Definition: vm_core.h:622
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:991
unsigned long running_time_us
Definition: vm_core.h:654
rb_vm_t * vm
Definition: vm_core.h:526
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:573
static VALUE thgroup_add(VALUE group, VALUE thread)
Definition: thread.c:4128
void ruby_kill(rb_pid_t pid, int sig)
Definition: thread.c:5329
static int check_deadlock_i(st_data_t key, st_data_t val, int *found)
Definition: thread.c:5208
#define rb_exc_new2
Definition: intern.h:247
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Definition: thread.c:4571
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1496
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:944
static VALUE rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
Definition: thread.c:1792
static const rb_thread_t * patrol_thread
Definition: thread.c:4367
struct rb_mutex_struct * next_mutex
Definition: thread.c:393
void ruby_thread_stack_overflow(rb_thread_t *th)
Definition: thread.c:2088
#define RARRAY_LEN(a)
Definition: ruby.h:878
void rb_bug(const char *fmt,...)
Definition: error.c:327
static VALUE rb_thread_priority(VALUE thread)
Definition: thread.c:3120
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4313
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1514
#define FALSE
Definition: nkf.h:174
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1015
#define rb_hash_lookup
Definition: tcltklib.c:269
#define mutex_mark
Definition: thread.c:4189
static int lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
Definition: thread.c:4313
static const char * rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
Definition: thread.c:4468
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:2376
static void thread_cleanup_func_before_exec(void *th_ptr)
Definition: thread.c:464
#define INT2NUM(x)
Definition: ruby.h:1296
static VALUE trap(int sig, sighandler_t func, VALUE command)
Definition: signal.c:1061
struct rb_thread_struct * running_thread
Definition: vm_core.h:355
VALUE rb_make_exception(int argc, VALUE *argv)
Definition: eval.c:682
void rb_mutex_allow_trap(VALUE self, int val)
Definition: thread.c:4646
struct timeval * tv
Definition: thread.c:3741
Definition: st.h:69
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:298
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:962
static VALUE rb_thread_abort_exc_set(VALUE thread, VALUE val)
Definition: thread.c:2549
VALUE rb_mutex_owned_p(VALUE self)
Definition: thread.c:4453
st_table * local_storage
Definition: vm_core.h:611
double limit
Definition: thread.c:756
Definition: st.h:100
int pending_interrupt_queue_checked
Definition: vm_core.h:582
VALUE rb_eSignal
Definition: error.c:544
static void rb_mutex_abandon_all(rb_mutex_t *mutexes)
Definition: thread.c:4543
struct rb_blocking_region_buffer * rb_thread_blocking_region_begin(void)
Definition: thread.c:1236
rb_fdset_t * read
Definition: thread.c:3738
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5015
#define NUM2INT(x)
Definition: ruby.h:630
int count
Definition: encoding.c:48
static int max(int a, int b)
Definition: strftime.c:141
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1646
static VALUE thgroup_enclosed_p(VALUE group)
Definition: thread.c:4090
int rb_thread_check_trap_pending(void)
Definition: thread.c:1153
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:1957
VALUE rb_thread_list(void)
Definition: thread.c:2397
static VALUE thread_join_sleep(VALUE arg)
Definition: thread.c:782
const VALUE coverage
Definition: vm_core.h:229
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4992
#define CLASS_OF(v)
Definition: ruby.h:440
static int terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3945
void rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
Definition: thread.c:1245
static VALUE rb_thread_variables(VALUE thread)
Definition: thread.c:3056
struct rb_thread_struct * th
Definition: vm_core.h:509
void rb_unblock_function_t(void *)
Definition: intern.h:864
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:2962
Definition: id.h:94
rb_unblock_function_t * func
Definition: vm_core.h:500
#define Qtrue
Definition: ruby.h:426
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:204
int st_insert(st_table *, st_data_t, st_data_t)
static void update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
Definition: thread.c:5280
static VALUE thread_s_new(int argc, VALUE *argv, VALUE klass)
Definition: thread.c:685
void rb_error_frozen(const char *what)
Definition: error.c:2077
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1027
#define CLOCK_MONOTONIC
Definition: win32.h:129
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:583
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:995
static VALUE threadptr_local_aref(rb_thread_t *th, ID id)
Definition: thread.c:2754
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1041
VALUE rb_mod_ancestors(VALUE mod)
Definition: class.c:1037
static VALUE mutex_initialize(VALUE self)
Definition: thread.c:4249
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
Definition: thread.c:4521
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2064
long tv_sec
Definition: ossl_asn1.c:17
struct rb_thread_struct volatile * th
Definition: thread.c:392
static struct timeval double2timeval(double d)
Definition: thread.c:964
#define sysstack_error
Definition: vm_core.h:901
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:988
VALUE rb_eTypeError
Definition: error.c:548
VALUE rb_thread_stop(void)
Definition: thread.c:2346
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:171
static VALUE mutex_alloc(VALUE klass)
Definition: thread.c:4231
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Definition: thread.c:4623
static const rb_data_type_t mutex_data_type
Definition: thread.c:4213
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:900
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1119
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:113
st_table * living_threads
Definition: vm_core.h:357
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:873
static int handle_interrupt_arg_check_i(VALUE key, VALUE val)
Definition: thread.c:1676
struct st_table * rb_hash_tbl_raw(VALUE hash)
Definition: hash.c:360
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:538
#define rb_fd_zero(f)
Definition: intern.h:349
int kill(int, int)
Definition: win32.c:4445
static VALUE rb_thread_safe_level(VALUE thread)
Definition: thread.c:2722
static VALUE rb_thread_aset(VALUE self, VALUE id, VALUE val)
Definition: thread.c:2884
VALUE rb_thread_current(void)
Definition: thread.c:2405
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:770
#define PRIxVALUE
Definition: ruby.h:135
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1548
#define OBJ_ID_EQL(obj_id, other)
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1857
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2233
static VALUE rb_mutex_sleep_forever(VALUE time)
Definition: thread.c:4556
static VALUE rb_thread_abort_exc(VALUE thread)
Definition: thread.c:2525
static void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
Definition: thread.c:1222
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1115
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:280
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1377
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3392
pthread_mutex_t rb_nativethread_lock_t
static void clear_coverage(void)
Definition: thread.c:3895
int rb_thread_alone(void)
Definition: thread.c:2994
VALUE rb_convert_type(VALUE, int, const char *, const char *)
Definition: object.c:2637
#define TH_EXEC_TAG()
Definition: eval_intern.h:165
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:646
#define T_HASH
Definition: ruby.h:485
static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check)
Definition: thread.c:991
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:2765
rb_nativethread_lock_t lock
Definition: thread.c:390
#define DATA_PTR(dta)
Definition: ruby.h:992
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, int *stateptr)
Definition: vm_eval.c:1842
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:964
static size_t thgroup_memsize(const void *ptr)
Definition: thread.c:3969
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th)
Definition: thread.c:4530
static VALUE sym_immediate
Definition: thread.c:80
static int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
Definition: thread.c:1204
void rb_gc_mark(VALUE ptr)
Definition: gc.c:3607
static void thread_shield_mark(void *ptr)
Definition: thread.c:4658
st_data_t st_index_t
Definition: st.h:48
#define TAG_RAISE
Definition: eval_intern.h:193
#define PUSH_TAG()
Definition: eval_intern.h:141
static size_t mutex_memsize(const void *ptr)
Definition: thread.c:4208
static volatile int system_working
Definition: thread.c:95
static VALUE thread_join(rb_thread_t *target_th, double delay)
Definition: thread.c:808
static VALUE remove_from_join_list(VALUE arg)
Definition: thread.c:761
union select_args::@154 as
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2191
VALUE rb_mutex_locked_p(VALUE self)
Definition: thread.c:4267
static int rb_threadptr_dead(rb_thread_t *th)
Definition: thread.c:2603
ID rb_check_id(volatile VALUE *namep)
Returns ID for the given name if it is interned already, or 0.
Definition: ripper.c:17365
#define FIXNUM_P(f)
Definition: ruby.h:347
static VALUE rb_thread_alive_p(VALUE thread)
Definition: thread.c:2672
rb_fdset_t * write
Definition: thread.c:3739
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5003
#define rb_fd_rcopy(d, s)
Definition: thread.c:3422
void rb_thread_start_timer_thread(void)
Definition: thread.c:3874
struct rb_thread_struct::@169 machine
static rb_fdset_t * init_set_fd(int fd, rb_fdset_t *fds)
Definition: thread.c:3725
const char * rb_obj_classname(VALUE)
Definition: variable.c:406
VALUE rb_cMutex
Definition: thread.c:77
int allow_trap
Definition: thread.c:395
#define RB_WAITFD_OUT
Definition: io.h:49
VALUE thgroup_default
Definition: vm_core.h:358
#define rb_fd_set(n, f)
Definition: intern.h:350
time_t tv_sec
Definition: missing.h:51
#define sym(x)
Definition: date_core.c:3695
static VALUE rb_thread_stop_p(VALUE thread)
Definition: thread.c:2697
static void thread_cleanup_func(void *th_ptr, int atfork)
Definition: thread.c:475
static double timeofday(void)
Definition: thread.c:1087
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:20
#define TAG_FATAL
Definition: eval_intern.h:195
int ruby_native_thread_p(void)
Definition: thread.c:5200
static VALUE rb_thread_s_abort_exc_set(VALUE self, VALUE val)
Definition: thread.c:2502
static rb_atomic_t threadptr_get_interrupts(rb_thread_t *th)
Definition: thread.c:1944
#define rb_fd_isset(n, f)
Definition: intern.h:352
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1519
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
Definition: hash.c:273
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2276
static VALUE rb_thread_s_main(VALUE klass)
Definition: thread.c:2439
void rb_exc_raise(VALUE mesg)
Definition: eval.c:567
static void rb_thread_wait_fd_rw(int fd, int read)
Definition: thread.c:3503
static VALUE sym_on_blocking
Definition: thread.c:81
VALUE * stack
Definition: vm_core.h:529
static void rb_thread_schedule_limits(unsigned long limits_us)
Definition: thread.c:1174
#define RB_TYPE_P(obj, type)
Definition: ruby.h:1672
void rb_reset_random_seed(void)
Definition: random.c:1323
int rb_thread_fd_writable(int fd)
Definition: thread.c:3529
#define RHASH(obj)
Definition: ruby.h:1124
static void rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
Definition: thread.c:341
static VALUE thgroup_s_alloc(VALUE klass)
Definition: thread.c:4000
#define POSFIXABLE(f)
Definition: ruby.h:348
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:966
#define TH_POP_TAG()
Definition: eval_intern.h:128
int st_lookup(st_table *, st_data_t, st_data_t *)
static int thread_list_i(st_data_t key, st_data_t val, void *data)
Definition: thread.c:2357
#define MEMZERO(p, type, n)
Definition: ruby.h:1359
#define PRI_TIMET_PREFIX
Definition: ruby.h:145
static VALUE coverage(VALUE fname, int n)
Definition: ripper.c:11922
#define closed_stream_error
Definition: thread.c:97
VALUE rb_vm_thread_backtrace(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:906
static const char * thread_status_name(rb_thread_t *th)
Definition: thread.c:2584
rb_thread_t * target
Definition: thread.c:755
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:69
fd_set rb_fdset_t
Definition: intern.h:348
#define rb_fd_term(f)
Definition: intern.h:359
int t(void)
Definition: conftest.c:13
static VALUE rb_thread_priority_set(VALUE thread, VALUE prio)
Definition: thread.c:3155
double rb_num2dbl(VALUE)
Definition: object.c:2987
static int do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3426
static void sleep_for_polling(rb_thread_t *th)
Definition: thread.c:1110
int rb_block_given_p(void)
Definition: eval.c:712
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:1402
#define EXEC_TAG()
Definition: eval_intern.h:168
VALUE locking_mutex
Definition: vm_core.h:590
static const rb_data_type_t thread_shield_data_type
Definition: thread.c:4663
#define val
long tv_usec
Definition: ossl_asn1.c:18
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1561
VALUE rb_eRuntimeError
Definition: error.c:547
static VALUE rb_thread_inspect(VALUE thread)
Definition: thread.c:2738
#define RB_WAITFD_PRI
Definition: io.h:48
static ID id_locals
Definition: thread.c:83
#define PRIdVALUE
Definition: ruby.h:132
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *)
Definition: thread.c:2044
#define rb_fd_ptr(f)
Definition: intern.h:356
VALUE rb_mutex_trylock(VALUE self)
Definition: thread.c:4294
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:510
VALUE rb_ary_new(void)
Definition: array.c:499
void * blocking_region_buffer
Definition: vm_core.h:567
static VALUE exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
Definition: thread.c:4930
static VALUE thread_create_core(VALUE thval, VALUE args, VALUE(*fn)(ANYARGS))
Definition: thread.c:625
void Init_Thread(void)
Definition: thread.c:5079
#define JUMP_TAG(st)
Definition: eval_intern.h:173
rb_iseq_t * iseq
Definition: vm_core.h:448
#define NIL_P(v)
Definition: ruby.h:438
static int rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
Definition: thread.c:1607
long tv_nsec
Definition: missing.h:52
void rb_thread_stop_timer_thread(int close_anyway)
Definition: thread.c:3860
#define UNLIKELY(x)
Definition: vm_core.h:109
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:611
static void rb_threadptr_ready(rb_thread_t *th)
Definition: thread.c:2038
int st_delete(st_table *, st_data_t *, st_data_t *)
int enclosed
Definition: thread.c:3964
#define rb_intern(str)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2228
void rb_thread_atfork_before_exec(void)
Definition: thread.c:3958
#define thread_debug
Definition: thread.c:210
static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
Definition: thread.c:1601
#define OBJ_FROZEN(x)
Definition: ruby.h:1193
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:3819
VALUE rb_class_inherited_p(VALUE, VALUE)
Definition: object.c:1560
int thread_abort_on_exception
Definition: vm_core.h:361
int argc
Definition: ruby.c:131
rb_thread_status
Definition: vm_core.h:475
static VALUE rb_thread_variable_get(VALUE thread, VALUE key)
Definition: thread.c:2918
#define Qfalse
Definition: ruby.h:425
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:497
VALUE rb_proc_location(VALUE self)
Definition: proc.c:939
static VALUE rb_thread_exit(void)
Definition: thread.c:2252
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:1580
void rb_thread_check_ints(void)
Definition: thread.c:1143
#define RUBY_UBF_PROCESS
Definition: intern.h:873
void rb_exit(int status)
Definition: process.c:3656
void rb_thread_fd_close(int fd)
Definition: thread.c:2135
#define T_NODE
Definition: ruby.h:498
VALUE rb_thread_shield_new(void)
Definition: thread.c:4702
volatile int sleeper
Definition: vm_core.h:363
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:264
#define rb_str_new2
Definition: intern.h:840
VALUE rb_obj_alloc(VALUE)
Definition: object.c:1804
int err
Definition: win32.c:114
#define OBJ_FREEZE(x)
Definition: ruby.h:1194
#define EXIT_FAILURE
Definition: eval_intern.h:24
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4738
void rb_thread_atfork(void)
Definition: thread.c:3935
#define POP_TAG()
Definition: eval_intern.h:142
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:136
static const rb_data_type_t thgroup_data_type
Definition: thread.c:3974
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:745
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1740
static VALUE thread_s_current(VALUE klass)
Definition: thread.c:2420
#define FD_SET(fd, set)
Definition: win32.h:629
VALUE rb_cThreadShield
Definition: thread.c:78
static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
Definition: thread.c:1104
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
#define ALLOC(type)
Definition: ruby.h:1342
void rb_thread_polling(void)
Definition: thread.c:1126
VALUE read
Definition: io.c:8344
#define GetMutexPtr(obj, tobj)
Definition: thread.c:4186
static VALUE rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:5058
int rb_thread_select(int max, fd_set *read, fd_set *write, fd_set *except, struct timeval *timeout)
Definition: thread.c:3536
VALUE rb_yield(VALUE)
Definition: vm_eval.c:948
static int recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4884
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:886
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1370
struct rb_unblock_callback oldubf
Definition: thread.c:111
#define rb_thread_set_current(th)
Definition: vm_core.h:942
int errno
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5317
static int thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:4018
#define EXIT_SUCCESS
Definition: error.c:29
VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:368
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:591
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4718
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1250
int rb_get_next_signal(void)
Definition: signal.c:696
VALUE rb_hash_delete(VALUE hash, VALUE key)
Definition: hash.c:1005
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3580
#define rb_fd_copy(d, s, n)
Definition: intern.h:353
static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg, struct rb_unblock_callback *old, int fail_if_interrupted)
Definition: thread.c:304
#define const
Definition: strftime.c:102
static int thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
Definition: thread.c:2120
VALUE rb_hash_new(void)
Definition: hash.c:307
void ruby_xfree(void *x)
Definition: gc.c:6245
#define DELAY_INFTY
Definition: thread.c:752
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2110
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1719
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:961
#define PRIsVALUE
Definition: ruby.h:137
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Definition: vm_trace.c:145
unsigned long ID
Definition: ruby.h:89
static VALUE thread_initialize(VALUE thread, VALUE args)
Definition: thread.c:720
handle_interrupt_timing
Definition: thread.c:1554
static void rb_check_deadlock(rb_vm_t *vm)
Definition: thread.c:5256
static VALUE rb_mutex_synchronize_m(VALUE self, VALUE args)
Definition: thread.c:4637
#define GVL_UNLOCK_END()
Definition: thread.c:141
#define Qnil
Definition: ruby.h:427
void rb_thread_sleep_forever(void)
Definition: thread.c:1073
static VALUE thread_shield_alloc(VALUE klass)
Definition: thread.c:4670
VALUE group
Definition: thread.c:3965
#define OBJ_TAINT(x)
Definition: ruby.h:1184
unsigned long VALUE
Definition: ruby.h:88
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4676
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:112
static VALUE result
Definition: nkf.c:40
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1594
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3779
static int keys_i(VALUE key, VALUE value, VALUE ary)
Definition: thread.c:3032
#define UNINITIALIZED_VAR(x)
Definition: vm_core.h:121
#define RBASIC(obj)
Definition: ruby.h:1116
const char * rb_class2name(VALUE)
Definition: variable.c:397
struct rb_thread_struct * main_thread
Definition: vm_core.h:354
static int clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
Definition: thread.c:3881
int error
Definition: thread.c:3736
static VALUE rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
Definition: thread.c:1928
VALUE first_proc
Definition: vm_core.h:615
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1542
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:3806
static void rb_thread_shield_waiting_dec(VALUE b)
Definition: thread.c:4692
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:122
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4325
void rb_thread_schedule(void)
Definition: thread.c:1191
VALUE rb_mutex_new(void)
Definition: thread.c:4255
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:839
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5027
static VALUE thread_value(VALUE self)
Definition: thread.c:930
static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
Definition: thread.c:333
rb_atomic_t interrupt_flag
Definition: vm_core.h:585
rb_nativethread_cond_t interrupt_cond
Definition: vm_core.h:588
static void timer_thread_function(void *)
Definition: thread.c:3829
void rb_thread_wait_fd(int fd)
Definition: thread.c:3523
st_table * st_init_numtable(void)
Definition: st.c:272
VALUE rb_blocking_function_t(void *)
Definition: intern.h:865
void rb_sys_fail(const char *mesg)
Definition: error.c:1976
VALUE rb_thread_main(void)
Definition: thread.c:2426
void rb_threadptr_restore_recursive_data(rb_thread_t *th, VALUE old)
Definition: thread.c:4807
static VALUE rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
Definition: thread.c:5041
int abort_on_exception
Definition: vm_core.h:650
static VALUE rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
Definition: thread.c:1843
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:617
enum rb_thread_status status
Definition: vm_core.h:562
static void st_delete_wrap(st_table *table, st_data_t key)
Definition: thread.c:100
void rb_thread_sleep(int sec)
Definition: thread.c:1168
#define rb_fd_max(f)
Definition: intern.h:360
static VALUE thread_s_pass(VALUE klass)
Definition: thread.c:1517
VALUE rb_threadptr_reset_recursive_data(rb_thread_t *th)
Definition: thread.c:4799
static VALUE thread_join_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:899
#define RSTRING_PTR(str)
Definition: ruby.h:845
#define thread_start_func_2(th, st, rst)
Definition: thread.c:214
void rb_thread_sleep_deadly(void)
Definition: thread.c:1080
enum rb_thread_status prev_status
Definition: thread.c:110
static VALUE mutex_sleep(int argc, VALUE *argv, VALUE self)
Definition: thread.c:4606
#define RARRAY_ASET(a, i, v)
Definition: ruby.h:902
VALUE first_args
Definition: vm_core.h:616
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:1996
void rb_thread_terminate_all(void)
Definition: thread.c:421
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4677
static void rb_threadptr_to_kill(rb_thread_t *th)
Definition: thread.c:1934
int size
Definition: encoding.c:49
void rb_reset_coverages(void)
Definition: thread.c:5310
#define f
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:717
#define INT2FIX(i)
Definition: ruby.h:231
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2030
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4903
int rb_sourceline(void)
Definition: vm.c:1001
ID rb_frame_last_func(void)
Definition: eval.c:982
static VALUE thgroup_list(VALUE group)
Definition: thread.c:4042
#define RARRAY_AREF(a, i)
Definition: ruby.h:901
VALUE * stack_start
Definition: vm_core.h:621
#define RUBY_INTERNAL_EVENT_SWITCH
Definition: ruby.h:1737
unsigned long interrupt_mask
Definition: vm_core.h:586
VALUE rb_block_proc(void)
Definition: proc.c:620
#define xmalloc
Definition: defines.h:108
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:70
#define ANYARGS
Definition: defines.h:98
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2570
struct rb_unblock_callback unblock
Definition: vm_core.h:589
unsigned long rb_event_flag_t
Definition: ruby.h:1748
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:151
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:706
static VALUE recursive_list_access(VALUE sym)
Definition: thread.c:4779
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:361
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:128
void rb_thread_reset_timer_thread(void)
Definition: thread.c:3868
rb_nativethread_id_t thread_id
Definition: vm_core.h:561
static VALUE rb_thread_status(VALUE thread)
Definition: thread.c:2641
rb_nativethread_lock_t thread_destruct_lock
Definition: vm_core.h:352
int rb_signal_buff_size(void)
Definition: signal.c:666
static void rb_thread_shield_waiting_inc(VALUE b)
Definition: thread.c:4681
uint8_t key[16]
Definition: random.c:1250
#define rb_fd_clr(n, f)
Definition: intern.h:351
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
#define LONG2FIX(i)
Definition: ruby.h:232
#define RTEST(v)
Definition: ruby.h:437
#define FD_CLR(f, s)
Definition: win32.h:647
VALUE root_fiber
Definition: vm_core.h:642
rb_thread_t * waiting
Definition: thread.c:755
#define OBJ_INFECT(x, s)
Definition: ruby.h:1188
struct rb_encoding_entry * list
Definition: encoding.c:47
#define ETIMEDOUT
Definition: win32.h:584
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4749
static VALUE rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
Definition: thread.c:1620
static void recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
Definition: thread.c:4854
static VALUE thread_start(VALUE klass, VALUE args)
Definition: thread.c:713
static VALUE threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
Definition: thread.c:2841
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1030
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:292
static VALUE rb_mutex_wait_for(VALUE time)
Definition: thread.c:4563
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1899
const struct st_hash_type * type
Definition: st.h:70
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:472
static unsigned int hash(const char *str, unsigned int len)
Definition: lex.c:56
int rb_atomic_t
Definition: ruby_atomic.h:120
static VALUE thread_raise_m(int argc, VALUE *argv, VALUE self)
Definition: thread.c:2162
#define rb_fd_resize(n, f)
Definition: intern.h:355
#define rb_thread_shield_waiting(b)
Definition: thread.c:4678
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:1036
static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check)
Definition: thread.c:1031
#define ruby_debug
Definition: ruby.h:1484
#define RUBY_EVENT_COVERAGE
Definition: ruby.h:1734
#define xrealloc
Definition: defines.h:111
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:1611
#define ID2SYM(x)
Definition: ruby.h:355
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1384
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:365
static VALUE exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
Definition: thread.c:4912
VALUE rb_eFatal
Definition: error.c:545
int forever
Definition: thread.c:757
#define rb_fd_init_copy(d, s)
Definition: intern.h:358
static int terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
Definition: thread.c:371
struct rb_thread_list_struct * next
Definition: vm_core.h:508
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:965
const struct st_hash_type st_hashtype_num
#define rb_fd_init(f)
Definition: intern.h:357
static VALUE rb_thread_s_abort_exc(void)
Definition: thread.c:2466
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:2858
#define rb_fd_dup(d, s)
Definition: intern.h:354
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:272
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1657
static VALUE rb_thread_aref(VALUE thread, VALUE key)
Definition: thread.c:2833
void st_clear(st_table *)
Definition: st.c:308
rb_fdset_t * except
Definition: thread.c:3740
static void mutex_locked(rb_thread_t *th, VALUE self)
Definition: thread.c:4275
static VALUE ident_hash_new(void)
Definition: thread.c:4763
VALUE th
Definition: thread.c:257
#define FD_ISSET(f, s)
Definition: win32.h:650
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1011
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
static VALUE rb_thread_keys(VALUE self)
Definition: thread.c:3019
#define GetThreadShieldPtr(obj)
Definition: thread.c:4675
#define vsnprintf
Definition: subst.h:7
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:587
void void xfree(void *)
#define RB_WAITFD_IN
Definition: io.h:47
VALUE pending_interrupt_queue
Definition: vm_core.h:581
#define RHASH_EMPTY_P(h)
Definition: ruby.h:931
VALUE write
Definition: io.c:8344
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1454
static void getclockofday(struct timeval *tp)
Definition: thread.c:1015
static VALUE select_single_cleanup(VALUE ptr)
Definition: thread.c:3767
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1101
static VALUE select_single(VALUE ptr)
Definition: thread.c:3745
struct rb_mutex_struct rb_mutex_t
#define eTerminateSignal
Definition: thread.c:94
int cond_waiting
Definition: thread.c:394
VALUE rb_get_coverages(void)
Definition: thread.c:5297
VALUE except
Definition: io.c:8344
VALUE rb_vm_thread_backtrace_locations(int argc, VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:912
VALUE rb_eSystemExit
Definition: error.c:542
#define NULL
Definition: _sdbm.c:102
#define FIX2LONG(x)
Definition: ruby.h:345
#define Qundef
Definition: ruby.h:428
static int thread_keys_i(ID key, VALUE value, VALUE ary)
Definition: thread.c:2981
static void * call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
Definition: thread.c:1256
#define GET_THROWOBJ_STATE(obj)
Definition: eval_intern.h:207
static VALUE rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
Definition: thread.c:2938
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:359
st_index_t num_entries
Definition: st.h:85
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2285
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1417
void rb_obj_call_init(VALUE obj, int argc, VALUE *argv)
Definition: eval.c:1311
static void mutex_free(void *ptr)
Definition: thread.c:4192
VALUE rb_mutex_unlock(VALUE self)
Definition: thread.c:4508
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:929
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1479
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1034
#define GET_THROWOBJ_VAL(obj)
Definition: eval_intern.h:205
void rb_set_coverages(VALUE coverages)
Definition: thread.c:5303
ID rb_to_id(VALUE)
Definition: string.c:8734
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:730
static VALUE rb_thread_key_p(VALUE self, VALUE key)
Definition: thread.c:2964
VALUE rb_eArgError
Definition: error.c:549
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: vm_core.h:980
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4220
static VALUE rb_thread_s_kill(VALUE obj, VALUE th)
Definition: thread.c:2233
static VALUE recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
Definition: thread.c:4819
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2322
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2074
static void lock_interrupt(void *ptr)
Definition: thread.c:4353
static void rb_thread_atfork_internal(int(*atfork)(st_data_t, st_data_t, st_data_t))
Definition: thread.c:3904
#define TYPEOF_TIMEVAL_TV_SEC
Definition: timev.h:22
char ** argv
Definition: ruby.c:132
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1160
struct timeval rb_time_timeval(VALUE)
Definition: time.c:2417
VALUE rb_mutex_lock(VALUE self)
Definition: thread.c:4377
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2100
#define RUBY_UBF_IO
Definition: intern.h:872
static enum handle_interrupt_timing rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
Definition: thread.c:1562
static int terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
Definition: thread.c:3920
#define GET_VM()
Definition: vm_core.h:922
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:286
static ID recursive_key
Definition: thread.c:4758