12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION 16 #ifdef HAVE_SYS_RESOURCE_H 17 #include <sys/resource.h> 19 #ifdef HAVE_THR_STKSEGMENT 24 #elif HAVE_SYS_FCNTL_H 25 #include <sys/fcntl.h> 27 #ifdef HAVE_SYS_PRCTL_H 28 #include <sys/prctl.h> 30 #if defined(__native_client__) && defined(NACL_NEWLIB) 33 #if defined(HAVE_SYS_TIME_H) 37 static void native_mutex_lock(pthread_mutex_t *lock);
38 static void native_mutex_unlock(pthread_mutex_t *lock);
39 static int native_mutex_trylock(pthread_mutex_t *lock);
40 static void native_mutex_initialize(pthread_mutex_t *lock);
41 static void native_mutex_destroy(pthread_mutex_t *lock);
47 static void rb_thread_wakeup_timer_thread_low(
void);
48 static pthread_t timer_thread_id;
50 #define RB_CONDATTR_CLOCK_MONOTONIC 1 52 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \ 53 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \ 54 defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT) 55 #define USE_MONOTONIC_COND 1 57 #define USE_MONOTONIC_COND 0 60 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__) 62 # define USE_SLEEPY_TIMER_THREAD 1 64 # define USE_SLEEPY_TIMER_THREAD 0 79 rb_thread_wakeup_timer_thread_low();
100 native_mutex_lock(&vm->
gvl.
lock);
101 gvl_acquire_common(vm);
102 native_mutex_unlock(&vm->
gvl.
lock);
106 gvl_release_common(
rb_vm_t *vm)
110 native_cond_signal(&vm->
gvl.
cond);
116 native_mutex_lock(&vm->
gvl.
lock);
117 gvl_release_common(vm);
118 native_mutex_unlock(&vm->
gvl.
lock);
124 native_mutex_lock(&vm->
gvl.
lock);
126 gvl_release_common(vm);
144 native_mutex_unlock(&vm->
gvl.
lock);
146 native_mutex_lock(&vm->
gvl.
lock);
151 gvl_acquire_common(vm);
152 native_mutex_unlock(&vm->
gvl.
lock);
158 native_mutex_initialize(&vm->
gvl.
lock);
159 native_cond_initialize(&vm->
gvl.
cond, RB_CONDATTR_CLOCK_MONOTONIC);
160 native_cond_initialize(&vm->
gvl.
switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
173 native_cond_destroy(&vm->
gvl.
cond);
174 native_mutex_destroy(&vm->
gvl.
lock);
184 #define NATIVE_MUTEX_LOCK_DEBUG 0 187 mutex_debug(
const char *
msg, pthread_mutex_t *lock)
189 if (NATIVE_MUTEX_LOCK_DEBUG) {
191 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
193 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
194 fprintf(stdout,
"%s: %p\n",
msg, (
void *)lock);
195 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
200 native_mutex_lock(pthread_mutex_t *lock)
203 mutex_debug(
"lock", lock);
204 if ((r = pthread_mutex_lock(lock)) != 0) {
210 native_mutex_unlock(pthread_mutex_t *lock)
213 mutex_debug(
"unlock", lock);
214 if ((r = pthread_mutex_unlock(lock)) != 0) {
220 native_mutex_trylock(pthread_mutex_t *lock)
223 mutex_debug(
"trylock", lock);
224 if ((r = pthread_mutex_trylock(lock)) != 0) {
236 native_mutex_initialize(pthread_mutex_t *lock)
238 int r = pthread_mutex_init(lock, 0);
239 mutex_debug(
"init", lock);
246 native_mutex_destroy(pthread_mutex_t *lock)
248 int r = pthread_mutex_destroy(lock);
249 mutex_debug(
"destroy", lock);
258 #ifdef HAVE_PTHREAD_COND_INIT 260 # if USE_MONOTONIC_COND 261 pthread_condattr_t attr;
263 pthread_condattr_init(&attr);
266 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
273 r = pthread_cond_init(&
cond->cond, &attr);
274 pthread_condattr_destroy(&attr);
276 r = pthread_cond_init(&
cond->cond,
NULL);
289 #ifdef HAVE_PTHREAD_COND_INIT 290 int r = pthread_cond_destroy(&
cond->cond);
312 r = pthread_cond_signal(&
cond->cond);
313 }
while (r == EAGAIN);
324 r = pthread_cond_broadcast(&
cond->cond);
325 }
while (r == EAGAIN);
334 int r = pthread_cond_wait(&
cond->cond, mutex);
352 r = pthread_cond_timedwait(&
cond->cond, mutex, ts);
353 }
while (r == EINTR);
370 #if USE_MONOTONIC_COND 385 now.tv_sec = tv.tv_sec;
386 now.tv_nsec = tv.tv_usec * 1000;
388 #if USE_MONOTONIC_COND 391 timeout.tv_sec = now.tv_sec;
392 timeout.tv_nsec = now.tv_nsec;
393 timeout.tv_sec += timeout_rel.tv_sec;
394 timeout.tv_nsec += timeout_rel.tv_nsec;
396 if (timeout.tv_nsec >= 1000*1000*1000) {
398 timeout.tv_nsec -= 1000*1000*1000;
401 if (timeout.tv_sec < now.tv_sec)
402 timeout.tv_sec = TIMET_MAX;
407 #define native_cleanup_push pthread_cleanup_push 408 #define native_cleanup_pop pthread_cleanup_pop 409 #ifdef HAVE_SCHED_YIELD 410 #define native_thread_yield() (void)sched_yield() 412 #define native_thread_yield() ((void)0) 415 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__) 416 #define USE_SIGNAL_THREAD_LIST 1 418 #ifdef USE_SIGNAL_THREAD_LIST 419 static void add_signal_thread_list(
rb_thread_t *th);
420 static void remove_signal_thread_list(
rb_thread_t *th);
424 static pthread_key_t ruby_native_thread_key;
433 ruby_thread_from_native(
void)
435 return pthread_getspecific(ruby_native_thread_key);
441 return pthread_setspecific(ruby_native_thread_key, th) == 0;
451 pthread_key_create(&ruby_native_thread_key,
NULL);
453 native_thread_init(th);
454 #ifdef USE_SIGNAL_THREAD_LIST 455 native_mutex_initialize(&signal_thread_list_lock);
457 #ifndef __native_client__ 466 ruby_thread_set_native(th);
475 #ifndef USE_THREAD_CACHE 476 #define USE_THREAD_CACHE 0 480 static rb_thread_t *register_cached_thread_and_wait(
void);
483 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP 484 #define STACKADDR_AVAILABLE 1 485 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP 486 #define STACKADDR_AVAILABLE 1 487 #undef MAINSTACKADDR_AVAILABLE 488 #define MAINSTACKADDR_AVAILABLE 1 489 void *pthread_get_stackaddr_np(pthread_t);
490 size_t pthread_get_stacksize_np(pthread_t);
491 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP 492 #define STACKADDR_AVAILABLE 1 493 #elif defined HAVE_PTHREAD_GETTHRDS_NP 494 #define STACKADDR_AVAILABLE 1 495 #elif defined __ia64 && defined _HPUX_SOURCE 496 #include <sys/dyntune.h> 498 #define STACKADDR_AVAILABLE 1 505 #undef PTHREAD_STACK_MIN 507 #define HAVE_PTHREAD_ATTR_GET_NP 1 508 #undef HAVE_PTHREAD_ATTR_GETSTACK 515 #define pthread_attr_get_np(thid, attr) 0 524 hpux_attr_getstackaddr(
const pthread_attr_t *attr,
void **addr)
530 if (gettune(
"vps_pagesize", &pagesize)) {
535 pthread_attr_getstacksize(attr, &
size);
536 *addr = (
void *)((
size_t)((
char *)_Asm_get_sp() -
size) & ~(pagesize - 1));
539 #define pthread_attr_getstackaddr(attr, addr) hpux_attr_getstackaddr(attr, addr) 542 #ifndef MAINSTACKADDR_AVAILABLE 543 # ifdef STACKADDR_AVAILABLE 544 # define MAINSTACKADDR_AVAILABLE 1 546 # define MAINSTACKADDR_AVAILABLE 0 549 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack) 550 # define get_main_stack(addr, size) get_stack(addr, size) 553 #ifdef STACKADDR_AVAILABLE 558 get_stack(
void **addr,
size_t *
size)
560 #define CHECK_ERR(expr) \ 561 {int err = (expr); if (err) return err;} 562 #ifdef HAVE_PTHREAD_GETATTR_NP 566 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
567 # ifdef HAVE_PTHREAD_ATTR_GETSTACK 568 CHECK_ERR(pthread_attr_getstack(&attr, addr,
size));
571 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
572 CHECK_ERR(pthread_attr_getstacksize(&attr,
size));
574 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
576 pthread_attr_destroy(&attr);
577 #elif defined HAVE_PTHREAD_ATTR_GET_NP 579 CHECK_ERR(pthread_attr_init(&attr));
580 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
581 # ifdef HAVE_PTHREAD_ATTR_GETSTACK 582 CHECK_ERR(pthread_attr_getstack(&attr, addr,
size));
584 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
585 CHECK_ERR(pthread_attr_getstacksize(&attr,
size));
588 pthread_attr_destroy(&attr);
589 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) 590 pthread_t th = pthread_self();
591 *addr = pthread_get_stackaddr_np(th);
592 *
size = pthread_get_stacksize_np(th);
593 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP 595 # if defined HAVE_THR_STKSEGMENT 596 CHECK_ERR(thr_stksegment(&stk));
598 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
602 #elif defined HAVE_PTHREAD_GETTHRDS_NP 603 pthread_t th = pthread_self();
604 struct __pthrdsinfo thinfo;
606 int regsiz=
sizeof(reg);
607 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
608 &thinfo,
sizeof(thinfo),
610 *addr = thinfo.__pi_stackaddr;
614 *
size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
617 #error STACKADDR_AVAILABLE is defined but not implemented. 626 size_t stack_maxsize;
629 VALUE *register_stack_start;
631 } native_main_thread;
633 #ifdef STACK_END_ADDRESS 634 extern void *STACK_END_ADDRESS;
638 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
639 RUBY_STACK_SPACE_RATIO = 5
643 space_size(
size_t stack_size)
645 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
646 if (space_size > RUBY_STACK_SPACE_LIMIT) {
647 return RUBY_STACK_SPACE_LIMIT;
655 static __attribute__((noinline))
void 656 reserve_stack(
volatile char *limit,
size_t size)
659 # error needs alloca() 662 volatile char buf[0x100];
663 enum {stack_check_margin = 0x1000};
667 if (!
getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
670 if (
size < stack_check_margin)
return;
671 size -= stack_check_margin;
675 const volatile char *end =
buf +
sizeof(
buf);
685 size_t sz = limit - end;
700 size_t sz =
buf - limit;
707 # define reserve_stack(limit, size) ((void)(limit), (void)(size)) 710 #undef ruby_init_stack 722 native_main_thread.id = pthread_self();
723 #if MAINSTACKADDR_AVAILABLE 724 if (native_main_thread.stack_maxsize)
return;
728 if (get_main_stack(&stackaddr, &
size) == 0) {
729 native_main_thread.stack_maxsize =
size;
730 native_main_thread.stack_start = stackaddr;
731 reserve_stack(stackaddr,
size);
736 #ifdef STACK_END_ADDRESS 737 native_main_thread.stack_start = STACK_END_ADDRESS;
739 if (!native_main_thread.stack_start ||
741 native_main_thread.stack_start > addr,
742 native_main_thread.stack_start < addr)) {
743 native_main_thread.stack_start = (
VALUE *)addr;
747 if (!native_main_thread.register_stack_start ||
748 (
VALUE*)bsp < native_main_thread.register_stack_start) {
749 native_main_thread.register_stack_start = (
VALUE*)bsp;
753 #if defined(HAVE_GETRLIMIT) 754 #if defined(PTHREAD_STACK_DEFAULT) 755 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5 756 # error "PTHREAD_STACK_DEFAULT is too small" 758 size_t size = PTHREAD_STACK_DEFAULT;
763 int pagesize = getpagesize();
766 if (
getrlimit(RLIMIT_STACK, &rlim) == 0) {
767 size = (size_t)rlim.rlim_cur;
769 addr = native_main_thread.stack_start;
771 space = ((size_t)((
char *)addr +
size) / pagesize) * pagesize - (size_t)addr;
774 space = (size_t)addr - ((
size_t)((
char *)addr -
size) / pagesize + 1) * pagesize;
776 native_main_thread.stack_maxsize = space;
787 start = native_main_thread.stack_start;
788 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
791 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
792 end = native_main_thread.stack_start;
795 if ((
void *)addr < start || (void *)addr > end) {
797 native_main_thread.stack_start = (
VALUE *)addr;
798 native_main_thread.stack_maxsize = 0;
803 #define CHECK_ERR(expr) \ 804 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}} 811 if (pthread_equal(curr, native_main_thread.id)) {
816 #ifdef STACKADDR_AVAILABLE 820 if (get_stack(&start, &
size) == 0) {
824 #elif defined get_stack_of 834 th->
machine.register_stack_start = native_main_thread.register_stack_start;
842 #define USE_NATIVE_THREAD_INIT 1 846 thread_start_func_1(
void *th_ptr)
853 #if !defined USE_NATIVE_THREAD_INIT 857 #if defined USE_NATIVE_THREAD_INIT 858 native_thread_init_stack(th);
860 native_thread_init(th);
862 #if defined USE_NATIVE_THREAD_INIT 872 if ((th = register_cached_thread_and_wait()) != 0) {
882 struct cached_thread_entry {
885 struct cached_thread_entry *next;
890 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
891 struct cached_thread_entry *cached_thread_root;
894 register_cached_thread_and_wait(
void)
900 struct cached_thread_entry *entry =
901 (
struct cached_thread_entry *)
malloc(
sizeof(
struct cached_thread_entry));
908 ts.
tv_sec = tv.tv_sec + 60;
909 ts.
tv_nsec = tv.tv_usec * 1000;
911 pthread_mutex_lock(&thread_cache_lock);
913 entry->th_area = &th_area;
915 entry->next = cached_thread_root;
916 cached_thread_root = entry;
918 native_cond_timedwait(&
cond, &thread_cache_lock, &ts);
921 struct cached_thread_entry *e, **prev = &cached_thread_root;
923 while ((e = *prev) != 0) {
933 native_cond_destroy(&
cond);
935 pthread_mutex_unlock(&thread_cache_lock);
946 struct cached_thread_entry *entry;
948 if (cached_thread_root) {
949 pthread_mutex_lock(&thread_cache_lock);
950 entry = cached_thread_root;
952 if (cached_thread_root) {
953 cached_thread_root = entry->next;
954 *entry->th_area = th;
959 native_cond_signal(entry->cond);
961 pthread_mutex_unlock(&thread_cache_lock);
972 if (use_cached_thread(th)) {
973 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
976 #ifdef HAVE_PTHREAD_ATTR_INIT 978 pthread_attr_t *
const attrp = &attr;
980 pthread_attr_t *
const attrp =
NULL;
983 const size_t space = space_size(stack_size);
991 #ifdef HAVE_PTHREAD_ATTR_INIT 992 CHECK_ERR(pthread_attr_init(&attr));
994 # ifdef PTHREAD_STACK_MIN 995 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
996 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
999 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED 1000 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1002 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1007 err = pthread_create(&th->
thread_id, attrp, thread_start_func_1, th);
1017 #ifdef HAVE_PTHREAD_ATTR_INIT 1018 CHECK_ERR(pthread_attr_destroy(&attr));
1025 native_thread_join(pthread_t th)
1027 int err = pthread_join(th, 0);
1034 #if USE_NATIVE_THREAD_PRIORITY 1039 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0) 1040 struct sched_param sp;
1044 pthread_getschedparam(th->
thread_id, &policy, &sp);
1045 max = sched_get_priority_max(policy);
1046 min = sched_get_priority_min(policy);
1048 if (min > priority) {
1051 else if (
max < priority) {
1055 sp.sched_priority = priority;
1056 pthread_setschedparam(th->
thread_id, policy, &sp);
1067 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1071 ubf_pthread_cond_signal(
void *ptr)
1074 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
1089 timeout_rel.tv_nsec = timeout_tv->
tv_usec * 1000;
1099 if (timeout_rel.tv_sec > 100000000) {
1100 timeout_rel.tv_sec = 100000000;
1101 timeout_rel.tv_nsec = 0;
1104 timeout = native_cond_timeout(
cond, timeout_rel);
1109 pthread_mutex_lock(lock);
1115 thread_debug(
"native_sleep: interrupted before sleep\n");
1119 native_cond_wait(
cond, lock);
1121 native_cond_timedwait(
cond, lock, &timeout);
1126 pthread_mutex_unlock(lock);
1133 #ifdef USE_SIGNAL_THREAD_LIST 1134 struct signal_thread_list {
1136 struct signal_thread_list *prev;
1137 struct signal_thread_list *next;
1140 static struct signal_thread_list signal_thread_list_anchor = {
1144 #define FGLOCK(lock, body) do { \ 1145 native_mutex_lock(lock); \ 1149 native_mutex_unlock(lock); \ 1154 print_signal_list(
char *str)
1156 struct signal_thread_list *
list =
1157 signal_thread_list_anchor.next;
1171 FGLOCK(&signal_thread_list_lock, {
1172 struct signal_thread_list *
list =
1173 malloc(
sizeof(
struct signal_thread_list));
1176 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
1182 list->prev = &signal_thread_list_anchor;
1183 list->next = signal_thread_list_anchor.next;
1187 signal_thread_list_anchor.next =
list;
1197 FGLOCK(&signal_thread_list_lock, {
1198 struct signal_thread_list *
list =
1199 (
struct signal_thread_list *)
1223 ubf_select(
void *ptr)
1226 add_signal_thread_list(th);
1235 if (pthread_self() != timer_thread_id)
1237 ubf_select_each(th);
1241 ping_signal_thread_list(
void)
1243 if (signal_thread_list_anchor.next) {
1244 FGLOCK(&signal_thread_list_lock, {
1245 struct signal_thread_list *
list;
1247 list = signal_thread_list_anchor.next;
1249 ubf_select_each(
list->th);
1257 check_signal_thread_list(
void)
1259 if (signal_thread_list_anchor.next)
1265 #define add_signal_thread_list(th) (void)(th) 1266 #define remove_signal_thread_list(th) (void)(th) 1267 #define ubf_select 0 1268 static void ping_signal_thread_list(
void) {
return; }
1269 static int check_signal_thread_list(
void) {
return 0; }
1273 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0) 1278 #define TIME_QUANTUM_USEC (100 * 1000) 1280 #if USE_SLEEPY_TIMER_THREAD 1281 static int timer_thread_pipe[2] = {-1, -1};
1282 static int timer_thread_pipe_low[2] = {-1, -1};
1283 static int timer_thread_pipe_owner_process;
1287 rb_thread_wakeup_timer_thread_fd(
int fd)
1292 if (timer_thread_pipe_owner_process == getpid()) {
1293 const char *buff =
"!";
1295 if ((
result = write(fd, buff, 1)) <= 0) {
1297 case EINTR:
goto retry;
1299 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN 1307 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1317 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe[1]);
1321 rb_thread_wakeup_timer_thread_low(
void)
1323 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe_low[1]);
1328 consume_communication_pipe(
int fd)
1330 #define CCP_READ_BUFF_SIZE 1024 1332 static char buff[CCP_READ_BUFF_SIZE];
1336 result = read(fd, buff,
sizeof(buff));
1354 close_communication_pipe(
int pipes[2])
1356 if (close(pipes[0]) < 0) {
1359 if (close(pipes[1]) < 0) {
1362 pipes[0] = pipes[1] = -1;
1366 set_nonblock(
int fd)
1371 oflags =
fcntl(fd, F_GETFL);
1381 setup_communication_pipe_internal(
int pipes[2])
1385 if (pipes[0] != -1) {
1387 close_communication_pipe(pipes);
1392 rb_bug_errno(
"setup_communication_pipe: Failed to create communication pipe for timer thread",
errno);
1396 set_nonblock(pipes[0]);
1397 set_nonblock(pipes[1]);
1402 setup_communication_pipe(
void)
1404 if (timer_thread_pipe_owner_process == getpid()) {
1408 setup_communication_pipe_internal(timer_thread_pipe);
1409 setup_communication_pipe_internal(timer_thread_pipe_low);
1412 timer_thread_pipe_owner_process = getpid();
1426 struct pollfd pollfds[2];
1428 pollfds[0].fd = timer_thread_pipe[0];
1429 pollfds[0].events = POLLIN;
1430 pollfds[1].fd = timer_thread_pipe_low[0];
1431 pollfds[1].events = POLLIN;
1433 need_polling = check_signal_thread_list();
1435 if (gvl->
waiting > 0 || need_polling) {
1437 result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1448 consume_communication_pipe(timer_thread_pipe[0]);
1449 consume_communication_pipe(timer_thread_pipe_low[0]);
1465 # define PER_NANO 1000000000 1467 static void rb_thread_wakeup_timer_thread_low(
void) {}
1469 static pthread_mutex_t timer_thread_lock;
1477 ts.
tv_nsec = TIME_QUANTUM_USEC * 1000;
1478 ts = native_cond_timeout(&timer_thread_cond, ts);
1480 native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
1484 #if defined(__linux__) && defined(PR_SET_NAME) 1485 # undef SET_THREAD_NAME 1486 # define SET_THREAD_NAME(name) prctl(PR_SET_NAME, name) 1487 #elif !defined(SET_THREAD_NAME) 1488 # define SET_THREAD_NAME(name) (void)0 1492 thread_timer(
void *p)
1496 if (TT_DEBUG)
WRITE_CONST(2,
"start timer thread\n");
1498 SET_THREAD_NAME(
"ruby-timer-thr");
1500 #if !USE_SLEEPY_TIMER_THREAD 1501 native_mutex_initialize(&timer_thread_lock);
1502 native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
1503 native_mutex_lock(&timer_thread_lock);
1508 ping_signal_thread_list();
1514 timer_thread_sleep(gvl);
1516 #if !USE_SLEEPY_TIMER_THREAD 1517 native_mutex_unlock(&timer_thread_lock);
1518 native_cond_destroy(&timer_thread_cond);
1519 native_mutex_destroy(&timer_thread_lock);
1522 if (TT_DEBUG)
WRITE_CONST(2,
"finish timer thread\n");
1527 rb_thread_create_timer_thread(
void)
1529 if (!timer_thread_id) {
1531 #ifdef HAVE_PTHREAD_ATTR_INIT 1532 pthread_attr_t attr;
1534 err = pthread_attr_init(&attr);
1536 fprintf(stderr,
"[FATAL] Failed to initialize pthread attr: %s\n",
strerror(
err));
1539 # ifdef PTHREAD_STACK_MIN 1541 const size_t min_size = (4096 * 4);
1546 size_t stack_size = PTHREAD_STACK_MIN;
1547 if (stack_size < min_size) stack_size = min_size;
1549 pthread_attr_setstacksize(&attr, stack_size);
1554 #if USE_SLEEPY_TIMER_THREAD 1555 setup_communication_pipe();
1559 if (timer_thread_id) {
1560 rb_bug(
"rb_thread_create_timer_thread: Timer thread was already created\n");
1562 #ifdef HAVE_PTHREAD_ATTR_INIT 1563 err = pthread_create(&timer_thread_id, &attr, thread_timer, &
GET_VM()->gvl);
1565 err = pthread_create(&timer_thread_id,
NULL, thread_timer, &
GET_VM()->gvl);
1568 fprintf(stderr,
"[FATAL] Failed to create timer thread: %s\n",
strerror(
err));
1571 #ifdef HAVE_PTHREAD_ATTR_INIT 1572 pthread_attr_destroy(&attr);
1578 native_stop_timer_thread(
int close_anyway)
1583 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1587 native_thread_join(timer_thread_id);
1588 if (TT_DEBUG) fprintf(stderr,
"joined timer thread\n");
1589 timer_thread_id = 0;
1607 native_reset_timer_thread(
void)
1609 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1612 #ifdef HAVE_SIGALTSTACK 1614 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1618 const size_t water_mark = 1024 * 1024;
1621 #ifdef STACKADDR_AVAILABLE 1622 if (get_stack(&base, &
size) == 0) {
1624 if (pthread_equal(th->
thread_id, native_main_thread.id)) {
1626 if (
getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur >
size) {
1627 size = (size_t)rlim.rlim_cur;
1642 size /= RUBY_STACK_SPACE_RATIO;
1643 if (
size > water_mark)
size = water_mark;
1645 if (
size > ~(
size_t)base+1)
size = ~(
size_t)base+1;
1646 if (addr > base && addr <= (
void *)((
char *)base +
size))
return 1;
1649 if (
size > (
size_t)base)
size = (
size_t)base;
1650 if (addr > (
void *)((
char *)base -
size) && addr <= base)
return 1;
1659 #if USE_SLEEPY_TIMER_THREAD 1660 if (fd == timer_thread_pipe[0] ||
1661 fd == timer_thread_pipe[1] ||
1662 fd == timer_thread_pipe_low[0] ||
1663 fd == timer_thread_pipe_low[1]) {
1677 return pthread_self();
rb_nativethread_cond_t sleep_cond
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
void rb_update_max_fd(int fd)
volatile unsigned long waiting
static int max(int a, int b)
void * signal_thread_list
rb_unblock_function_t * func
rb_nativethread_cond_t switch_cond
void rb_async_bug_errno(const char *mesg, int errno_arg)
#define STACK_UPPER(x, a, b)
if((ID)(DISPID) nameid !=nameid)
void rb_raise(VALUE exc, const char *fmt,...)
pthread_mutex_t rb_nativethread_lock_t
static volatile int system_working
struct rb_thread_struct::@169 machine
sighandler_t posix_signal(int signum, sighandler_t handler)
unsigned long long uint64_t
#define RUBY_VM_THREAD_VM_STACK_SIZE
void rb_thread_wakeup_timer_thread(void)
int getrlimit(int resource, struct rlimit *rlp)
#define GVL_UNLOCK_BEGIN()
rb_nativethread_cond_t cond
#define STACK_DIR_UPPER(a, b)
pthread_t rb_nativethread_id_t
unsigned char buf[MIME_BUF_SIZE]
int pthread_kill(pthread_t thread, int sig)
#define STACK_GROW_DIR_DETECTION
void Init_native_thread(void)
void rb_bug_errno(const char *mesg, int errno_arg)
int clock_gettime(clockid_t, struct timespec *)
void ruby_init_stack(volatile VALUE *)
rb_nativethread_cond_t switch_wait_cond
static void timer_thread_function(void *)
void rb_sys_fail(const char *mesg)
int rb_reserved_fd_p(int fd)
#define WRITE_CONST(fd, str)
int rb_cloexec_pipe(int fildes[2])
#define thread_start_func_2(th, st, rst)
struct rb_vm_struct::@168 default_params
struct rb_unblock_callback unblock
#define rb_fd_select(n, rfds, wfds, efds, timeout)
rb_nativethread_id_t thread_id
RUBY_EXTERN char * strerror(int)
struct rb_encoding_entry * list
native_thread_data_t native_thread_data
size_t thread_machine_stack_size
static VALUE thread_start(VALUE klass, VALUE args)
RUBY_SYMBOL_EXPORT_BEGIN void * alloca()
#define RUBY_VM_INTERRUPTED(th)
rb_nativethread_lock_t interrupt_lock
static rb_thread_t * GET_THREAD(void)
#define IS_STACK_DIR_UPPER()