Ruby 2.7.7p221 (2022-11-24 revision 168ec2b1e5ad0e4688e963d9de019557c78feed9)
thread.c
Go to the documentation of this file.
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/config.h"
67#include "ruby/io.h"
68#include "eval_intern.h"
69#include "timev.h"
70#include "ruby/thread.h"
71#include "ruby/thread_native.h"
72#include "ruby/debug.h"
73#include "gc.h"
74#include "internal.h"
75#include "iseq.h"
76#include "vm_core.h"
77#include "mjit.h"
78#include "hrtime.h"
79
80#ifdef __linux__
81// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
82#include <alloca.h>
83#endif
84
85#ifndef USE_NATIVE_THREAD_PRIORITY
86#define USE_NATIVE_THREAD_PRIORITY 0
87#define RUBY_THREAD_PRIORITY_MAX 3
88#define RUBY_THREAD_PRIORITY_MIN -3
89#endif
90
91#ifndef THREAD_DEBUG
92#define THREAD_DEBUG 0
93#endif
94
95static VALUE rb_cThreadShield;
96
97static VALUE sym_immediate;
98static VALUE sym_on_blocking;
99static VALUE sym_never;
100
105
106#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
107#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
108
109static inline VALUE
110rb_thread_local_storage(VALUE thread)
111{
113 rb_ivar_set(thread, idLocals, rb_hash_new());
115 }
116 return rb_ivar_get(thread, idLocals);
117}
118
119static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
120static void sleep_forever(rb_thread_t *th, unsigned int fl);
121static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
122static int rb_threadptr_dead(rb_thread_t *th);
123static void rb_check_deadlock(rb_vm_t *vm);
124static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
125static const char *thread_status_name(rb_thread_t *th, int detail);
126static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
127NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
128static int consume_communication_pipe(int fd);
129static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
130void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
131
132#define eKillSignal INT2FIX(0)
133#define eTerminateSignal INT2FIX(1)
134static volatile int system_working = 1;
135
137 struct list_node wfd_node; /* <=> vm.waiting_fds */
139 int fd;
140};
141
142inline static void
143st_delete_wrap(st_table *table, st_data_t key)
144{
145 st_delete(table, &key, 0);
146}
147
148/********************************************************************************/
149
150#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
151
154};
155
156static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
157static void unblock_function_clear(rb_thread_t *th);
158
159static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
160 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
161static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
162
163#define RB_GC_SAVE_MACHINE_CONTEXT(th) \
164 do { \
165 FLUSH_REGISTER_WINDOWS; \
166 setjmp((th)->ec->machine.regs); \
167 SET_MACHINE_STACK_END(&(th)->ec->machine.stack_end); \
168 } while (0)
169
170#define GVL_UNLOCK_BEGIN(th) do { \
171 RB_GC_SAVE_MACHINE_CONTEXT(th); \
172 gvl_release(th->vm);
173
174#define GVL_UNLOCK_END(th) \
175 gvl_acquire(th->vm, th); \
176 rb_thread_set_current(th); \
177} while(0)
178
179#ifdef __GNUC__
180#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
182#else
183#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
184#endif
185#else
186#define only_if_constant(expr, notconst) notconst
187#endif
188#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189 struct rb_blocking_region_buffer __region; \
190 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
191 /* always return true unless fail_if_interrupted */ \
192 !only_if_constant(fail_if_interrupted, TRUE)) { \
193 exec; \
194 blocking_region_end(th, &__region); \
195 }; \
196} while(0)
197
198/*
199 * returns true if this thread was spuriously interrupted, false otherwise
200 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
201 */
202#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
203static inline int
204vm_check_ints_blocking(rb_execution_context_t *ec)
205{
206 rb_thread_t *th = rb_ec_thread_ptr(ec);
207
208 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
209 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
210 }
211 else {
214 }
216}
217
218static int
219vm_living_thread_num(const rb_vm_t *vm)
220{
221 return vm->living_thread_num;
222}
223
224/*
225 * poll() is supported by many OSes, but so far Linux is the only
226 * one we know of that supports using poll() in all places select()
227 * would work.
228 */
229#if defined(HAVE_POLL)
230# if defined(__linux__)
231# define USE_POLL
232# endif
233# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
234# define USE_POLL
235 /* FreeBSD does not set POLLOUT when POLLHUP happens */
236# define POLLERR_SET (POLLHUP | POLLERR)
237# endif
238#endif
239
240static void
241timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
242 const struct timeval *timeout)
243{
244 if (timeout) {
245 *rel = rb_timeval2hrtime(timeout);
246 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
247 *to = rel;
248 }
249 else {
250 *to = 0;
251 }
252}
253
254#if THREAD_DEBUG
255#ifdef HAVE_VA_ARGS_MACRO
256void rb_thread_debug(const char *file, int line, const char *fmt, ...);
257#define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
258#define POSITION_FORMAT "%s:%d:"
259#define POSITION_ARGS ,file, line
260#else
261void rb_thread_debug(const char *fmt, ...);
262#define thread_debug rb_thread_debug
263#define POSITION_FORMAT
264#define POSITION_ARGS
265#endif
266
267# ifdef NON_SCALAR_THREAD_ID
268#define fill_thread_id_string ruby_fill_thread_id_string
269const char *
270ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
271{
272 extern const char ruby_digitmap[];
273 size_t i;
274
275 buf[0] = '0';
276 buf[1] = 'x';
277 for (i = 0; i < sizeof(thid); i++) {
278# ifdef LITTLE_ENDIAN
279 size_t j = sizeof(thid) - i - 1;
280# else
281 size_t j = i;
282# endif
283 unsigned char c = (unsigned char)((char *)&thid)[j];
284 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
285 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
286 }
287 buf[sizeof(rb_thread_id_string_t)-1] = '\0';
288 return buf;
289}
290# define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
291# define thread_id_str(th) ((th)->thread_id_string)
292# define PRI_THREAD_ID "s"
293# endif
294
295# if THREAD_DEBUG < 0
296static int rb_thread_debug_enabled;
297
298/*
299 * call-seq:
300 * Thread.DEBUG -> num
301 *
302 * Returns the thread debug level. Available only if compiled with
303 * THREAD_DEBUG=-1.
304 */
305
306static VALUE
307rb_thread_s_debug(void)
308{
309 return INT2NUM(rb_thread_debug_enabled);
310}
311
312/*
313 * call-seq:
314 * Thread.DEBUG = num
315 *
316 * Sets the thread debug level. Available only if compiled with
317 * THREAD_DEBUG=-1.
318 */
319
320static VALUE
321rb_thread_s_debug_set(VALUE self, VALUE val)
322{
323 rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
324 return val;
325}
326# else
327# define rb_thread_debug_enabled THREAD_DEBUG
328# endif
329#else
330#define thread_debug if(0)printf
331#endif
332
333#ifndef fill_thread_id_str
334# define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
335# define fill_thread_id_str(th) (void)0
336# define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
337# define PRI_THREAD_ID "p"
338#endif
339
340NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
341static void timer_thread_function(void);
342void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
343
344static void
345ubf_sigwait(void *ignore)
346{
348}
349
350#if defined(_WIN32)
351#include "thread_win32.c"
352
353#define DEBUG_OUT() \
354 WaitForSingleObject(&debug_mutex, INFINITE); \
355 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
356 fflush(stdout); \
357 ReleaseMutex(&debug_mutex);
358
359#elif defined(HAVE_PTHREAD_H)
360#include "thread_pthread.c"
361
362#define DEBUG_OUT() \
363 pthread_mutex_lock(&debug_mutex); \
364 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
365 fill_thread_id_string(pthread_self(), thread_id_string), buf); \
366 fflush(stdout); \
367 pthread_mutex_unlock(&debug_mutex);
368
369#else
370#error "unsupported thread type"
371#endif
372
373/*
374 * TODO: somebody with win32 knowledge should be able to get rid of
375 * timer-thread by busy-waiting on signals. And it should be possible
376 * to make the GVL in thread_pthread.c be platform-independent.
377 */
378#ifndef BUSY_WAIT_SIGNALS
379# define BUSY_WAIT_SIGNALS (0)
380#endif
381
382#ifndef USE_EVENTFD
383# define USE_EVENTFD (0)
384#endif
385
386#if THREAD_DEBUG
387static int debug_mutex_initialized = 1;
388static rb_nativethread_lock_t debug_mutex;
389
390void
391rb_thread_debug(
393 const char *file, int line,
394#endif
395 const char *fmt, ...)
396{
397 va_list args;
398 char buf[BUFSIZ];
399#ifdef NON_SCALAR_THREAD_ID
400 rb_thread_id_string_t thread_id_string;
401#endif
402
403 if (!rb_thread_debug_enabled) return;
404
405 if (debug_mutex_initialized == 1) {
406 debug_mutex_initialized = 0;
407 rb_native_mutex_initialize(&debug_mutex);
408 }
409
410 va_start(args, fmt);
411 vsnprintf(buf, BUFSIZ, fmt, args);
412 va_end(args);
413
414 DEBUG_OUT();
415}
416#endif
417
418#include "thread_sync.c"
419
420void
422{
423 gvl_release(vm);
424 gvl_destroy(vm);
425}
426
427void
429{
431}
432
433void
435{
437}
438
439void
441{
443}
444
445void
447{
449}
450
451static int
452unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
453{
454 do {
455 if (fail_if_interrupted) {
456 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
457 return FALSE;
458 }
459 }
460 else {
462 }
463
465 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
467
468 VM_ASSERT(th->unblock.func == NULL);
469
470 th->unblock.func = func;
471 th->unblock.arg = arg;
473
474 return TRUE;
475}
476
477static void
478unblock_function_clear(rb_thread_t *th)
479{
481 th->unblock.func = NULL;
483}
484
485static void
486rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
487{
489 if (trap) {
491 }
492 else {
494 }
495 if (th->unblock.func != NULL) {
496 (th->unblock.func)(th->unblock.arg);
497 }
498 else {
499 /* none */
500 }
502}
503
504void
506{
507 rb_threadptr_interrupt_common(th, 0);
508}
509
510static void
511threadptr_trap_interrupt(rb_thread_t *th)
512{
513 rb_threadptr_interrupt_common(th, 1);
514}
515
516static void
517terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
518{
519 rb_thread_t *th = 0;
520
521 list_for_each(&vm->living_threads, th, vmlt_node) {
522 if (th != main_thread) {
523 thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
524 thread_id_str(th), thread_status_name(th, TRUE));
527 thread_debug("terminate_all: end (thid: %"PRI_THREAD_ID", status: %s)\n",
528 thread_id_str(th), thread_status_name(th, TRUE));
529 }
530 else {
531 thread_debug("terminate_all: main thread (%p)\n", (void *)th);
532 }
533 }
534}
535
536void
538{
539 const char *err;
540 rb_mutex_t *mutex;
541 rb_mutex_t *mutexes = th->keeping_mutexes;
542
543 while (mutexes) {
544 mutex = mutexes;
545 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
546 (void *)mutexes); */
547 mutexes = mutex->next_mutex;
548 err = rb_mutex_unlock_th(mutex, th);
549 if (err) rb_bug("invalid keeping_mutexes: %s", err);
550 }
551}
552
553void
555{
556 rb_thread_t *volatile th = GET_THREAD(); /* main thread */
557 rb_execution_context_t * volatile ec = th->ec;
558 rb_vm_t *volatile vm = th->vm;
559 volatile int sleeping = 0;
560
561 if (vm->main_thread != th) {
562 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
563 (void *)vm->main_thread, (void *)th);
564 }
565
566 /* unlock all locking mutexes */
568
569 EC_PUSH_TAG(ec);
570 if (EC_EXEC_TAG() == TAG_NONE) {
571 retry:
572 thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
573 terminate_all(vm, th);
574
575 while (vm_living_thread_num(vm) > 1) {
577 /*
578 * Thread exiting routine in thread_start_func_2 notify
579 * me when the last sub-thread exit.
580 */
581 sleeping = 1;
582 native_sleep(th, &rel);
584 sleeping = 0;
585 }
586 }
587 else {
588 /*
589 * When caught an exception (e.g. Ctrl+C), let's broadcast
590 * kill request again to ensure killing all threads even
591 * if they are blocked on sleep, mutex, etc.
592 */
593 if (sleeping) {
594 sleeping = 0;
595 goto retry;
596 }
597 }
598 EC_POP_TAG();
599}
600
602
603static void
604thread_cleanup_func_before_exec(void *th_ptr)
605{
606 rb_thread_t *th = th_ptr;
607 th->status = THREAD_KILLED;
608 // The thread stack doesn't exist in the forked process:
610
612}
613
614static void
615thread_cleanup_func(void *th_ptr, int atfork)
616{
617 rb_thread_t *th = th_ptr;
618
619 th->locking_mutex = Qfalse;
620 thread_cleanup_func_before_exec(th_ptr);
621
622 /*
623 * Unfortunately, we can't release native threading resource at fork
624 * because libc may have unstable locking state therefore touching
625 * a threading resource may cause a deadlock.
626 *
627 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
628 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
629 * which calls free(3), so there is a small memory leak atfork, here.
630 */
631 if (atfork)
632 return;
633
635 native_thread_destroy(th);
636}
637
638static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
639static VALUE rb_thread_to_s(VALUE thread);
640
641void
643{
644 native_thread_init_stack(th);
645}
646
647const VALUE *
649{
650 const VALUE *ep = vm_proc_ep(proc);
651
652 if (ep) {
653 return rb_vm_ep_local_ep(ep);
654 }
655 else {
656 return NULL;
657 }
658}
659
660static void
661thread_do_start(rb_thread_t *th)
662{
663 native_set_thread_name(th);
664
665 if (th->invoke_type == thread_invoke_type_proc) {
666 VALUE args = th->invoke_arg.proc.args;
667 int args_len = (int)RARRAY_LEN(args);
668 int kw_splat = th->invoke_arg.proc.kw_splat;
669 const VALUE *args_ptr;
670 VALUE procval = th->invoke_arg.proc.proc;
671 rb_proc_t *proc;
672 GetProcPtr(procval, proc);
673
674 th->ec->errinfo = Qnil;
675 th->ec->root_lep = rb_vm_proc_local_ep(procval);
676 th->ec->root_svar = Qfalse;
677
679 vm_check_ints_blocking(th->ec);
680
681 if (args_len < 8) {
682 /* free proc.args if the length is enough small */
683 args_ptr = ALLOCA_N(VALUE, args_len);
684 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
685 th->invoke_arg.proc.args = Qnil;
686 }
687 else {
688 args_ptr = RARRAY_CONST_PTR(args);
689 }
690
691 rb_adjust_argv_kw_splat(&args_len, &args_ptr, &kw_splat);
692 th->value = rb_vm_invoke_proc(th->ec, proc,
693 args_len, args_ptr,
694 kw_splat, VM_BLOCK_HANDLER_NONE);
695
697 }
698 else {
699 th->value = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
700 }
701}
702
704
705static int
706thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
707{
709 enum ruby_tag_type state;
710 rb_thread_list_t *join_list;
711 rb_thread_t *main_th;
712 VALUE errinfo = Qnil;
713 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
714 VALUE * vm_stack = NULL;
715
716 if (th == th->vm->main_thread) {
717 rb_bug("thread_start_func_2 must not be used for main thread");
718 }
719
720 thread_debug("thread start: %p\n", (void *)th);
721 VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
722
723 vm_stack = alloca(size * sizeof(VALUE));
724 VM_ASSERT(vm_stack);
725
726 gvl_acquire(th->vm, th);
727
728 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
729 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
730 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
731
732 ruby_thread_set_native(th);
733
734 {
735 thread_debug("thread start (get lock): %p\n", (void *)th);
736 rb_thread_set_current(th);
737
738 EC_PUSH_TAG(th->ec);
739 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
740 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
741 }
742 else {
743 errinfo = th->ec->errinfo;
744
746 if (!NIL_P(exc)) errinfo = exc;
747
748 if (state == TAG_FATAL) {
749 /* fatal error within this thread, need to stop whole script */
750 }
751 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
752 /* exit on main_thread. */
753 }
754 else {
755 if (th->report_on_exception) {
756 VALUE mesg = rb_thread_to_s(th->self);
757 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
758 rb_write_error_str(mesg);
759 rb_ec_error_print(th->ec, errinfo);
760 }
761 if (th->vm->thread_abort_on_exception ||
763 /* exit on main_thread */
764 }
765 else {
766 errinfo = Qnil;
767 }
768 }
769 th->value = Qnil;
770 }
771
772 th->status = THREAD_KILLED;
773 thread_debug("thread end: %p\n", (void *)th);
774
775 main_th = th->vm->main_thread;
776 if (main_th == th) {
777 ruby_stop(0);
778 }
779 if (RB_TYPE_P(errinfo, T_OBJECT)) {
780 /* treat with normal error object */
781 rb_threadptr_raise(main_th, 1, &errinfo);
782 }
783 EC_POP_TAG();
784
786
787 /* locking_mutex must be Qfalse */
788 if (th->locking_mutex != Qfalse) {
789 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
790 (void *)th, th->locking_mutex);
791 }
792
793 /* delete self other than main thread from living_threads */
794 rb_vm_living_threads_remove(th->vm, th);
795 if (main_th->status == THREAD_KILLED && rb_thread_alone()) {
796 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
797 rb_threadptr_interrupt(main_th);
798 }
799
800 /* wake up joining threads */
801 join_list = th->join_list;
802 while (join_list) {
803 rb_threadptr_interrupt(join_list->th);
804 switch (join_list->th->status) {
806 join_list->th->status = THREAD_RUNNABLE;
807 default: break;
808 }
809 join_list = join_list->next;
810 }
811
813 rb_check_deadlock(th->vm);
814
816 }
817
818 thread_cleanup_func(th, FALSE);
819 VM_ASSERT(th->ec->vm_stack == NULL);
820
821 gvl_release(th->vm);
822
823 return 0;
824}
825
826static VALUE
827thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(void *))
828{
829 rb_thread_t *th = rb_thread_ptr(thval), *current_th = GET_THREAD();
830 int err;
831
832 if (OBJ_FROZEN(current_th->thgroup)) {
834 "can't start a new thread (frozen ThreadGroup)");
835 }
836
837 if (fn) {
838 th->invoke_type = thread_invoke_type_func;
839 th->invoke_arg.func.func = fn;
840 th->invoke_arg.func.arg = (void *)args;
841 }
842 else {
843 (void)RARRAY_LENINT(args);
844 th->invoke_type = thread_invoke_type_proc;
845 th->invoke_arg.proc.proc = rb_block_proc();
846 th->invoke_arg.proc.args = args;
847 th->invoke_arg.proc.kw_splat = rb_empty_keyword_given_p() ?
850 }
851
852 th->priority = current_th->priority;
853 th->thgroup = current_th->thgroup;
854
857 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
859
861
862 /* kick thread */
863 err = native_thread_create(th);
864 if (err) {
865 th->status = THREAD_KILLED;
866 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
867 }
868 rb_vm_living_threads_insert(th->vm, th);
869 return thval;
870}
871
872#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
873
874/*
875 * call-seq:
876 * Thread.new { ... } -> thread
877 * Thread.new(*args, &proc) -> thread
878 * Thread.new(*args) { |args| ... } -> thread
879 *
880 * Creates a new thread executing the given block.
881 *
882 * Any +args+ given to ::new will be passed to the block:
883 *
884 * arr = []
885 * a, b, c = 1, 2, 3
886 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
887 * arr #=> [1, 2, 3]
888 *
889 * A ThreadError exception is raised if ::new is called without a block.
890 *
891 * If you're going to subclass Thread, be sure to call super in your
892 * +initialize+ method, otherwise a ThreadError will be raised.
893 */
894static VALUE
895thread_s_new(int argc, VALUE *argv, VALUE klass)
896{
897 rb_thread_t *th;
898 VALUE thread = rb_thread_alloc(klass);
899
900 if (GET_VM()->main_thread->status == THREAD_KILLED)
901 rb_raise(rb_eThreadError, "can't alloc thread");
902
904 th = rb_thread_ptr(thread);
905 if (!threadptr_initialized(th)) {
906 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
907 klass);
908 }
909 return thread;
910}
911
912/*
913 * call-seq:
914 * Thread.start([args]*) {|args| block } -> thread
915 * Thread.fork([args]*) {|args| block } -> thread
916 *
917 * Basically the same as ::new. However, if class Thread is subclassed, then
918 * calling +start+ in that subclass will not invoke the subclass's
919 * +initialize+ method.
920 */
921
922static VALUE
923thread_start(VALUE klass, VALUE args)
924{
925 return thread_create_core(rb_thread_alloc(klass), args, 0);
926}
927
928static VALUE
929threadptr_invoke_proc_location(rb_thread_t *th)
930{
931 if (th->invoke_type == thread_invoke_type_proc) {
932 return rb_proc_location(th->invoke_arg.proc.proc);
933 }
934 else {
935 return Qnil;
936 }
937}
938
939/* :nodoc: */
940static VALUE
941thread_initialize(VALUE thread, VALUE args)
942{
943 rb_thread_t *th = rb_thread_ptr(thread);
944
945 if (!rb_block_given_p()) {
946 rb_raise(rb_eThreadError, "must be called with a block");
947 }
948 else if (th->invoke_type != thread_invoke_type_none) {
949 VALUE loc = threadptr_invoke_proc_location(th);
950 if (!NIL_P(loc)) {
952 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
953 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
954 }
955 else {
956 rb_raise(rb_eThreadError, "already initialized thread");
957 }
958 }
959 else {
960 return thread_create_core(thread, args, NULL);
961 }
962}
963
964VALUE
965rb_thread_create(VALUE (*fn)(void *), void *arg)
966{
967 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
968}
969
970
971struct join_arg {
974};
975
976static VALUE
977remove_from_join_list(VALUE arg)
978{
979 struct join_arg *p = (struct join_arg *)arg;
980 rb_thread_t *target_th = p->target, *th = p->waiting;
981
982 if (target_th->status != THREAD_KILLED) {
983 rb_thread_list_t **p = &target_th->join_list;
984
985 while (*p) {
986 if ((*p)->th == th) {
987 *p = (*p)->next;
988 break;
989 }
990 p = &(*p)->next;
991 }
992 }
993
994 return Qnil;
995}
996
997static VALUE
998thread_join_sleep(VALUE arg)
999{
1000 struct join_arg *p = (struct join_arg *)arg;
1001 rb_thread_t *target_th = p->target, *th = p->waiting;
1002 rb_hrtime_t end = 0;
1003
1004 if (p->limit) {
1005 end = rb_hrtime_add(*p->limit, rb_hrtime_now());
1006 }
1007
1008 while (target_th->status != THREAD_KILLED) {
1009 if (!p->limit) {
1011 th->vm->sleeper++;
1012 rb_check_deadlock(th->vm);
1013 native_sleep(th, 0);
1014 th->vm->sleeper--;
1015 }
1016 else {
1017 if (hrtime_update_expire(p->limit, end)) {
1018 thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
1019 thread_id_str(target_th));
1020 return Qfalse;
1021 }
1022 th->status = THREAD_STOPPED;
1023 native_sleep(th, p->limit);
1024 }
1026 th->status = THREAD_RUNNABLE;
1027 thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID", status: %s)\n",
1028 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1029 }
1030 return Qtrue;
1031}
1032
1033static VALUE
1034thread_join(rb_thread_t *target_th, rb_hrtime_t *rel)
1035{
1036 rb_thread_t *th = GET_THREAD();
1037 struct join_arg arg;
1038
1039 if (th == target_th) {
1040 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1041 }
1042 if (GET_VM()->main_thread == target_th) {
1043 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1044 }
1045
1046 arg.target = target_th;
1047 arg.waiting = th;
1048 arg.limit = rel;
1049
1050 thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
1051 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1052
1053 if (target_th->status != THREAD_KILLED) {
1055 list.next = target_th->join_list;
1056 list.th = th;
1057 target_th->join_list = &list;
1058 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
1059 remove_from_join_list, (VALUE)&arg)) {
1060 return Qnil;
1061 }
1062 }
1063
1064 thread_debug("thread_join: success (thid: %"PRI_THREAD_ID", status: %s)\n",
1065 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1066
1067 if (target_th->ec->errinfo != Qnil) {
1068 VALUE err = target_th->ec->errinfo;
1069
1070 if (FIXNUM_P(err)) {
1071 switch (err) {
1072 case INT2FIX(TAG_FATAL):
1073 thread_debug("thread_join: terminated (thid: %"PRI_THREAD_ID", status: %s)\n",
1074 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1075
1076 /* OK. killed. */
1077 break;
1078 default:
1079 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1080 }
1081 }
1082 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1083 rb_bug("thread_join: THROW_DATA should not reach here.");
1084 }
1085 else {
1086 /* normal exception */
1088 }
1089 }
1090 return target_th->self;
1091}
1092
1093static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
1094
1095/*
1096 * call-seq:
1097 * thr.join -> thr
1098 * thr.join(limit) -> thr
1099 *
1100 * The calling thread will suspend execution and run this +thr+.
1101 *
1102 * Does not return until +thr+ exits or until the given +limit+ seconds have
1103 * passed.
1104 *
1105 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1106 * returned.
1107 *
1108 * Any threads not joined will be killed when the main program exits.
1109 *
1110 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1111 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1112 * will be processed at this time.
1113 *
1114 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1115 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1116 * x.join # Let thread x finish, thread a will be killed on exit.
1117 * #=> "axyz"
1118 *
1119 * The following example illustrates the +limit+ parameter.
1120 *
1121 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1122 * puts "Waiting" until y.join(0.15)
1123 *
1124 * This will produce:
1125 *
1126 * tick...
1127 * Waiting
1128 * tick...
1129 * Waiting
1130 * tick...
1131 * tick...
1132 */
1133
1134static VALUE
1135thread_join_m(int argc, VALUE *argv, VALUE self)
1136{
1137 VALUE limit;
1138 rb_hrtime_t rel, *to = 0;
1139
1140 /*
1141 * This supports INFINITY and negative values, so we can't use
1142 * rb_time_interval right now...
1143 */
1144 if (!rb_check_arity(argc, 0, 1) || NIL_P(argv[0])) {
1145 /* unlimited */
1146 }
1147 else if (FIXNUM_P(limit = argv[0])) {
1148 rel = rb_sec2hrtime(NUM2TIMET(limit));
1149 to = &rel;
1150 }
1151 else {
1152 to = double2hrtime(&rel, rb_num2dbl(limit));
1153 }
1154
1155 return thread_join(rb_thread_ptr(self), to);
1156}
1157
1158/*
1159 * call-seq:
1160 * thr.value -> obj
1161 *
1162 * Waits for +thr+ to complete, using #join, and returns its value or raises
1163 * the exception which terminated the thread.
1164 *
1165 * a = Thread.new { 2 + 2 }
1166 * a.value #=> 4
1167 *
1168 * b = Thread.new { raise 'something went wrong' }
1169 * b.value #=> RuntimeError: something went wrong
1170 */
1171
1172static VALUE
1173thread_value(VALUE self)
1174{
1175 rb_thread_t *th = rb_thread_ptr(self);
1176 thread_join(th, 0);
1177 return th->value;
1178}
1179
1180/*
1181 * Thread Scheduling
1182 */
1183
1184/*
1185 * Back when we used "struct timeval", not all platforms implemented
1186 * tv_sec as time_t. Nowadays we use "struct timespec" and tv_sec
1187 * seems to be implemented more consistently across platforms.
1188 * At least other parts of our code hasn't had to deal with non-time_t
1189 * tv_sec in timespec...
1190 */
1191#define TIMESPEC_SEC_MAX TIMET_MAX
1192#define TIMESPEC_SEC_MIN TIMET_MIN
1193
1194static rb_hrtime_t *
1195double2hrtime(rb_hrtime_t *hrt, double d)
1196{
1197 /* assume timespec.tv_sec has same signedness as time_t */
1198 const double TIMESPEC_SEC_MAX_PLUS_ONE = TIMET_MAX_PLUS_ONE;
1199
1200 if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1201 return NULL;
1202 }
1203 else if (d <= 0) {
1204 *hrt = 0;
1205 }
1206 else {
1207 *hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
1208 }
1209 return hrt;
1210}
1211
1212static void
1213getclockofday(struct timespec *ts)
1214{
1215#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1216 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1217 return;
1218#endif
1219 rb_timespec_now(ts);
1220}
1221
1222/*
1223 * Don't inline this, since library call is already time consuming
1224 * and we don't want "struct timespec" on stack too long for GC
1225 */
1229{
1230 struct timespec ts;
1231
1232 getclockofday(&ts);
1233 return rb_timespec2hrtime(&ts);
1234}
1235
1236static void
1237sleep_forever(rb_thread_t *th, unsigned int fl)
1238{
1239 enum rb_thread_status prev_status = th->status;
1240 enum rb_thread_status status;
1241 int woke;
1242
1244 th->status = status;
1246 while (th->status == status) {
1247 if (fl & SLEEP_DEADLOCKABLE) {
1248 th->vm->sleeper++;
1249 rb_check_deadlock(th->vm);
1250 }
1251 native_sleep(th, 0);
1252 if (fl & SLEEP_DEADLOCKABLE) {
1253 th->vm->sleeper--;
1254 }
1255 woke = vm_check_ints_blocking(th->ec);
1256 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1257 break;
1258 }
1259 th->status = prev_status;
1260}
1261
1262/*
1263 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1264 * being uninitialized, maybe other versions, too.
1265 */
1267#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1268COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1269#endif
1270#ifndef PRIu64
1271#define PRIu64 PRI_64_PREFIX "u"
1272#endif
1273/*
1274 * @end is the absolute time when @ts is set to expire
1275 * Returns true if @end has past
1276 * Updates @ts and returns false otherwise
1277 */
1278static int
1279hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1280{
1281 rb_hrtime_t now = rb_hrtime_now();
1282
1283 if (now > end) return 1;
1284 thread_debug("hrtime_update_expire: "
1285 "%"PRIu64" > %"PRIu64"\n",
1286 (uint64_t)end, (uint64_t)now);
1287 *timeout = end - now;
1288 return 0;
1289}
1291
1292static void
1293sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1294{
1295 enum rb_thread_status prev_status = th->status;
1296 int woke;
1297 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1298
1299 th->status = THREAD_STOPPED;
1301 while (th->status == THREAD_STOPPED) {
1302 native_sleep(th, &rel);
1303 woke = vm_check_ints_blocking(th->ec);
1304 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1305 break;
1306 if (hrtime_update_expire(&rel, end))
1307 break;
1308 }
1309 th->status = prev_status;
1310}
1311
1312void
1314{
1315 thread_debug("rb_thread_sleep_forever\n");
1316 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1317}
1318
1319void
1321{
1322 thread_debug("rb_thread_sleep_deadly\n");
1324}
1325
1326void
1328{
1329 rb_thread_t *th = GET_THREAD();
1330 enum rb_thread_status prev_status = th->status;
1331
1332 th->status = THREAD_STOPPED;
1333 native_sleep(th, 0);
1335 th->status = prev_status;
1336}
1337
1338static void
1339rb_thread_sleep_deadly_allow_spurious_wakeup(void)
1340{
1341 thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1342 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1343}
1344
1345void
1347{
1348 rb_thread_t *th = GET_THREAD();
1349
1350 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1351}
1352
1353/*
1354 * CAUTION: This function causes thread switching.
1355 * rb_thread_check_ints() check ruby's interrupts.
1356 * some interrupt needs thread switching/invoke handlers,
1357 * and so on.
1358 */
1359
1360void
1362{
1364}
1365
1366/*
1367 * Hidden API for tcl/tk wrapper.
1368 * There is no guarantee to perpetuate it.
1369 */
1370int
1372{
1373 return rb_signal_buff_size() != 0;
1374}
1375
1376/* This function can be called in blocking region. */
1377int
1379{
1380 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1381}
1382
1383void
1385{
1387}
1388
1389static void
1390rb_thread_schedule_limits(uint32_t limits_us)
1391{
1392 thread_debug("rb_thread_schedule\n");
1393 if (!rb_thread_alone()) {
1394 rb_thread_t *th = GET_THREAD();
1395
1396 if (th->running_time_us >= limits_us) {
1397 thread_debug("rb_thread_schedule/switch start\n");
1399 gvl_yield(th->vm, th);
1400 rb_thread_set_current(th);
1401 thread_debug("rb_thread_schedule/switch done\n");
1402 }
1403 }
1404}
1405
1406void
1408{
1409 rb_thread_schedule_limits(0);
1411}
1412
1413/* blocking region */
1414
1415static inline int
1416blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1417 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1418{
1419 region->prev_status = th->status;
1420 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1421 th->blocking_region_buffer = region;
1422 th->status = THREAD_STOPPED;
1423 thread_debug("enter blocking region (%p)\n", (void *)th);
1425 gvl_release(th->vm);
1426 return TRUE;
1427 }
1428 else {
1429 return FALSE;
1430 }
1431}
1432
1433static inline void
1434blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1435{
1436 /* entry to ubf_list still permitted at this point, make it impossible: */
1437 unblock_function_clear(th);
1438 /* entry to ubf_list impossible at this point, so unregister is safe: */
1439 unregister_ubf_list(th);
1440
1441 gvl_acquire(th->vm, th);
1442 rb_thread_set_current(th);
1443 thread_debug("leave blocking region (%p)\n", (void *)th);
1444 th->blocking_region_buffer = 0;
1445 if (th->status == THREAD_STOPPED) {
1446 th->status = region->prev_status;
1447 }
1448}
1449
1450void *
1451rb_nogvl(void *(*func)(void *), void *data1,
1452 rb_unblock_function_t *ubf, void *data2,
1453 int flags)
1454{
1455 void *val = 0;
1457 rb_thread_t *th = rb_ec_thread_ptr(ec);
1458 int saved_errno = 0;
1459 VALUE ubf_th = Qfalse;
1460
1461 if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1462 ubf = ubf_select;
1463 data2 = th;
1464 }
1465 else if (ubf && vm_living_thread_num(th->vm) == 1) {
1466 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1467 th->vm->ubf_async_safe = 1;
1468 }
1469 else {
1470 ubf_th = rb_thread_start_unblock_thread();
1471 }
1472 }
1473
1474 BLOCKING_REGION(th, {
1475 val = func(data1);
1476 saved_errno = errno;
1477 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1478
1479 th->vm->ubf_async_safe = 0;
1480
1481 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1483 }
1484
1485 if (ubf_th != Qfalse) {
1486 thread_value(rb_thread_kill(ubf_th));
1487 }
1488
1489 errno = saved_errno;
1490
1491 return val;
1492}
1493
1494/*
1495 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1496 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1497 * without interrupt process.
1498 *
1499 * rb_thread_call_without_gvl() does:
1500 * (1) Check interrupts.
1501 * (2) release GVL.
1502 * Other Ruby threads may run in parallel.
1503 * (3) call func with data1
1504 * (4) acquire GVL.
1505 * Other Ruby threads can not run in parallel any more.
1506 * (5) Check interrupts.
1507 *
1508 * rb_thread_call_without_gvl2() does:
1509 * (1) Check interrupt and return if interrupted.
1510 * (2) release GVL.
1511 * (3) call func with data1 and a pointer to the flags.
1512 * (4) acquire GVL.
1513 *
1514 * If another thread interrupts this thread (Thread#kill, signal delivery,
1515 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1516 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1517 * toggling a cancellation flag, canceling the invocation of a call inside
1518 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1519 *
1520 * There are built-in ubfs and you can specify these ubfs:
1521 *
1522 * * RUBY_UBF_IO: ubf for IO operation
1523 * * RUBY_UBF_PROCESS: ubf for process operation
1524 *
1525 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1526 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1527 * provide proper ubf(), your program will not stop for Control+C or other
1528 * shutdown events.
1529 *
1530 * "Check interrupts" on above list means checking asynchronous
1531 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1532 * request, and so on) and calling corresponding procedures
1533 * (such as `trap' for signals, raise an exception for Thread#raise).
1534 * If `func()' finished and received interrupts, you may skip interrupt
1535 * checking. For example, assume the following func() it reads data from file.
1536 *
1537 * read_func(...) {
1538 * // (a) before read
1539 * read(buffer); // (b) reading
1540 * // (c) after read
1541 * }
1542 *
1543 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1544 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1545 * at (c), after *read* operation is completed, checking interrupts is harmful
1546 * because it causes irrevocable side-effect, the read data will vanish. To
1547 * avoid such problem, the `read_func()' should be used with
1548 * `rb_thread_call_without_gvl2()'.
1549 *
1550 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1551 * immediately. This function does not show when the execution was interrupted.
1552 * For example, there are 4 possible timing (a), (b), (c) and before calling
1553 * read_func(). You need to record progress of a read_func() and check
1554 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1555 * `rb_thread_check_ints()' correctly or your program can not process proper
1556 * process such as `trap' and so on.
1557 *
1558 * NOTE: You can not execute most of Ruby C API and touch Ruby
1559 * objects in `func()' and `ubf()', including raising an
1560 * exception, because current thread doesn't acquire GVL
1561 * (it causes synchronization problems). If you need to
1562 * call ruby functions either use rb_thread_call_with_gvl()
1563 * or read source code of C APIs and confirm safety by
1564 * yourself.
1565 *
1566 * NOTE: In short, this API is difficult to use safely. I recommend you
1567 * use other ways if you have. We lack experiences to use this API.
1568 * Please report your problem related on it.
1569 *
1570 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1571 * for a short running `func()'. Be sure to benchmark and use this
1572 * mechanism when `func()' consumes enough time.
1573 *
1574 * Safe C API:
1575 * * rb_thread_interrupted() - check interrupt flag
1576 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1577 * they will work without GVL, and may acquire GVL when GC is needed.
1578 */
1579void *
1580rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1581 rb_unblock_function_t *ubf, void *data2)
1582{
1583 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1584}
1585
1586void *
1587rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1588 rb_unblock_function_t *ubf, void *data2)
1589{
1590 return rb_nogvl(func, data1, ubf, data2, 0);
1591}
1592
1593VALUE
1595{
1596 volatile VALUE val = Qundef; /* shouldn't be used */
1597 rb_execution_context_t * volatile ec = GET_EC();
1598 volatile int saved_errno = 0;
1599 enum ruby_tag_type state;
1600 struct waiting_fd wfd;
1601
1602 wfd.fd = fd;
1603 wfd.th = rb_ec_thread_ptr(ec);
1604 list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &wfd.wfd_node);
1605
1606 EC_PUSH_TAG(ec);
1607 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1608 BLOCKING_REGION(wfd.th, {
1609 val = func(data1);
1610 saved_errno = errno;
1611 }, ubf_select, wfd.th, FALSE);
1612 }
1613 EC_POP_TAG();
1614
1615 /*
1616 * must be deleted before jump
1617 * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
1618 */
1619 list_del(&wfd.wfd_node);
1620
1621 if (state) {
1622 EC_JUMP_TAG(ec, state);
1623 }
1624 /* TODO: check func() */
1626
1627 errno = saved_errno;
1628
1629 return val;
1630}
1631
1632/*
1633 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1634 *
1635 * After releasing GVL using
1636 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1637 * methods. If you need to access Ruby you must use this function
1638 * rb_thread_call_with_gvl().
1639 *
1640 * This function rb_thread_call_with_gvl() does:
1641 * (1) acquire GVL.
1642 * (2) call passed function `func'.
1643 * (3) release GVL.
1644 * (4) return a value which is returned at (2).
1645 *
1646 * NOTE: You should not return Ruby object at (2) because such Object
1647 * will not be marked.
1648 *
1649 * NOTE: If an exception is raised in `func', this function DOES NOT
1650 * protect (catch) the exception. If you have any resources
1651 * which should free before throwing exception, you need use
1652 * rb_protect() in `func' and return a value which represents
1653 * exception was raised.
1654 *
1655 * NOTE: This function should not be called by a thread which was not
1656 * created as Ruby thread (created by Thread.new or so). In other
1657 * words, this function *DOES NOT* associate or convert a NON-Ruby
1658 * thread to a Ruby thread.
1659 */
1660void *
1661rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1662{
1663 rb_thread_t *th = ruby_thread_from_native();
1664 struct rb_blocking_region_buffer *brb;
1665 struct rb_unblock_callback prev_unblock;
1666 void *r;
1667
1668 if (th == 0) {
1669 /* Error has occurred, but we can't use rb_bug()
1670 * because this thread is not Ruby's thread.
1671 * What should we do?
1672 */
1673
1674 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1676 }
1677
1679 prev_unblock = th->unblock;
1680
1681 if (brb == 0) {
1682 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1683 }
1684
1685 blocking_region_end(th, brb);
1686 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1687 r = (*func)(data1);
1688 /* leave from Ruby world: You can not access Ruby values, etc. */
1689 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1690 RUBY_ASSERT_ALWAYS(released);
1691 return r;
1692}
1693
1694/*
1695 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1696 *
1697 ***
1698 *** This API is EXPERIMENTAL!
1699 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1700 ***
1701 */
1702
1703int
1705{
1706 rb_thread_t *th = ruby_thread_from_native();
1707
1708 if (th && th->blocking_region_buffer == 0) {
1709 return 1;
1710 }
1711 else {
1712 return 0;
1713 }
1714}
1715
1716/*
1717 * call-seq:
1718 * Thread.pass -> nil
1719 *
1720 * Give the thread scheduler a hint to pass execution to another thread.
1721 * A running thread may or may not switch, it depends on OS and processor.
1722 */
1723
1724static VALUE
1725thread_s_pass(VALUE klass)
1726{
1728 return Qnil;
1729}
1730
1731/*****************************************************/
1732
1733/*
1734 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1735 *
1736 * Async events such as an exception thrown by Thread#raise,
1737 * Thread#kill and thread termination (after main thread termination)
1738 * will be queued to th->pending_interrupt_queue.
1739 * - clear: clear the queue.
1740 * - enque: enqueue err object into queue.
1741 * - deque: dequeue err object from queue.
1742 * - active_p: return 1 if the queue should be checked.
1743 *
1744 * All rb_threadptr_pending_interrupt_* functions are called by
1745 * a GVL acquired thread, of course.
1746 * Note that all "rb_" prefix APIs need GVL to call.
1747 */
1748
1749void
1751{
1753}
1754
1755void
1757{
1760}
1761
1762static void
1763threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1764{
1765 if (!th->pending_interrupt_queue) {
1766 rb_raise(rb_eThreadError, "uninitialized thread");
1767 }
1768}
1769
1776
1777static enum handle_interrupt_timing
1778rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1779{
1780 VALUE mask;
1781 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1782 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1783 VALUE mod;
1784 long i;
1785
1786 for (i=0; i<mask_stack_len; i++) {
1787 mask = mask_stack[mask_stack_len-(i+1)];
1788
1789 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1790 VALUE klass = mod;
1791 VALUE sym;
1792
1793 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1794 klass = RBASIC(mod)->klass;
1795 }
1796 else if (mod != RCLASS_ORIGIN(mod)) {
1797 continue;
1798 }
1799
1800 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1801 if (sym == sym_immediate) {
1802 return INTERRUPT_IMMEDIATE;
1803 }
1804 else if (sym == sym_on_blocking) {
1805 return INTERRUPT_ON_BLOCKING;
1806 }
1807 else if (sym == sym_never) {
1808 return INTERRUPT_NEVER;
1809 }
1810 else {
1811 rb_raise(rb_eThreadError, "unknown mask signature");
1812 }
1813 }
1814 }
1815 /* try to next mask */
1816 }
1817 return INTERRUPT_NONE;
1818}
1819
1820static int
1821rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1822{
1823 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1824}
1825
1826static int
1827rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1828{
1829 int i;
1830 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1832 if (rb_class_inherited_p(e, err)) {
1833 return TRUE;
1834 }
1835 }
1836 return FALSE;
1837}
1838
1839static VALUE
1840rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1841{
1842#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1843 int i;
1844
1845 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1847
1848 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1849
1850 switch (mask_timing) {
1852 if (timing != INTERRUPT_ON_BLOCKING) {
1853 break;
1854 }
1855 /* fall through */
1856 case INTERRUPT_NONE: /* default: IMMEDIATE */
1859 return err;
1860 case INTERRUPT_NEVER:
1861 break;
1862 }
1863 }
1864
1866 return Qundef;
1867#else
1869 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1871 }
1872 return err;
1873#endif
1874}
1875
1876static int
1877threadptr_pending_interrupt_active_p(rb_thread_t *th)
1878{
1879 /*
1880 * For optimization, we don't check async errinfo queue
1881 * if the queue and the thread interrupt mask were not changed
1882 * since last check.
1883 */
1885 return 0;
1886 }
1887
1888 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1889 return 0;
1890 }
1891
1892 return 1;
1893}
1894
1895static int
1896handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
1897{
1898 VALUE *maskp = (VALUE *)args;
1899
1900 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1901 rb_raise(rb_eArgError, "unknown mask signature");
1902 }
1903
1904 if (!*maskp) {
1905 *maskp = rb_ident_hash_new();
1906 }
1907 rb_hash_aset(*maskp, key, val);
1908
1909 return ST_CONTINUE;
1910}
1911
1912/*
1913 * call-seq:
1914 * Thread.handle_interrupt(hash) { ... } -> result of the block
1915 *
1916 * Changes asynchronous interrupt timing.
1917 *
1918 * _interrupt_ means asynchronous event and corresponding procedure
1919 * by Thread#raise, Thread#kill, signal trap (not supported yet)
1920 * and main thread termination (if main thread terminates, then all
1921 * other thread will be killed).
1922 *
1923 * The given +hash+ has pairs like <code>ExceptionClass =>
1924 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1925 * the given block. The TimingSymbol can be one of the following symbols:
1926 *
1927 * [+:immediate+] Invoke interrupts immediately.
1928 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1929 * [+:never+] Never invoke all interrupts.
1930 *
1931 * _BlockingOperation_ means that the operation will block the calling thread,
1932 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1933 * operation executed without GVL.
1934 *
1935 * Masked asynchronous interrupts are delayed until they are enabled.
1936 * This method is similar to sigprocmask(3).
1937 *
1938 * === NOTE
1939 *
1940 * Asynchronous interrupts are difficult to use.
1941 *
1942 * If you need to communicate between threads, please consider to use another way such as Queue.
1943 *
1944 * Or use them with deep understanding about this method.
1945 *
1946 * === Usage
1947 *
1948 * In this example, we can guard from Thread#raise exceptions.
1949 *
1950 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1951 * ignored in the first block of the main thread. In the second
1952 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1953 *
1954 * th = Thread.new do
1955 * Thread.handle_interrupt(RuntimeError => :never) {
1956 * begin
1957 * # You can write resource allocation code safely.
1958 * Thread.handle_interrupt(RuntimeError => :immediate) {
1959 * # ...
1960 * }
1961 * ensure
1962 * # You can write resource deallocation code safely.
1963 * end
1964 * }
1965 * end
1966 * Thread.pass
1967 * # ...
1968 * th.raise "stop"
1969 *
1970 * While we are ignoring the RuntimeError exception, it's safe to write our
1971 * resource allocation code. Then, the ensure block is where we can safely
1972 * deallocate your resources.
1973 *
1974 * ==== Guarding from Timeout::Error
1975 *
1976 * In the next example, we will guard from the Timeout::Error exception. This
1977 * will help prevent from leaking resources when Timeout::Error exceptions occur
1978 * during normal ensure clause. For this example we use the help of the
1979 * standard library Timeout, from lib/timeout.rb
1980 *
1981 * require 'timeout'
1982 * Thread.handle_interrupt(Timeout::Error => :never) {
1983 * timeout(10){
1984 * # Timeout::Error doesn't occur here
1985 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
1986 * # possible to be killed by Timeout::Error
1987 * # while blocking operation
1988 * }
1989 * # Timeout::Error doesn't occur here
1990 * }
1991 * }
1992 *
1993 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
1994 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
1995 * operation that will block the calling thread is susceptible to a
1996 * Timeout::Error exception being raised.
1997 *
1998 * ==== Stack control settings
1999 *
2000 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2001 * to control more than one ExceptionClass and TimingSymbol at a time.
2002 *
2003 * Thread.handle_interrupt(FooError => :never) {
2004 * Thread.handle_interrupt(BarError => :never) {
2005 * # FooError and BarError are prohibited.
2006 * }
2007 * }
2008 *
2009 * ==== Inheritance with ExceptionClass
2010 *
2011 * All exceptions inherited from the ExceptionClass parameter will be considered.
2012 *
2013 * Thread.handle_interrupt(Exception => :never) {
2014 * # all exceptions inherited from Exception are prohibited.
2015 * }
2016 *
2017 */
2018static VALUE
2019rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2020{
2021 VALUE mask;
2022 rb_execution_context_t * volatile ec = GET_EC();
2023 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2024 volatile VALUE r = Qnil;
2025 enum ruby_tag_type state;
2026
2027 if (!rb_block_given_p()) {
2028 rb_raise(rb_eArgError, "block is needed.");
2029 }
2030
2031 mask = 0;
2032 mask_arg = rb_to_hash_type(mask_arg);
2033 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2034 if (!mask) {
2035 return rb_yield(Qnil);
2036 }
2039 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2042 }
2043
2044 EC_PUSH_TAG(th->ec);
2045 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2046 r = rb_yield(Qnil);
2047 }
2048 EC_POP_TAG();
2049
2051 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2054 }
2055
2057
2058 if (state) {
2059 EC_JUMP_TAG(th->ec, state);
2060 }
2061
2062 return r;
2063}
2064
2065/*
2066 * call-seq:
2067 * target_thread.pending_interrupt?(error = nil) -> true/false
2068 *
2069 * Returns whether or not the asynchronous queue is empty for the target thread.
2070 *
2071 * If +error+ is given, then check only for +error+ type deferred events.
2072 *
2073 * See ::pending_interrupt? for more information.
2074 */
2075static VALUE
2076rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2077{
2078 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2079
2080 if (!target_th->pending_interrupt_queue) {
2081 return Qfalse;
2082 }
2083 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2084 return Qfalse;
2085 }
2086 if (rb_check_arity(argc, 0, 1)) {
2087 VALUE err = argv[0];
2089 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2090 }
2091 if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
2092 return Qtrue;
2093 }
2094 else {
2095 return Qfalse;
2096 }
2097 }
2098 else {
2099 return Qtrue;
2100 }
2101}
2102
2103/*
2104 * call-seq:
2105 * Thread.pending_interrupt?(error = nil) -> true/false
2106 *
2107 * Returns whether or not the asynchronous queue is empty.
2108 *
2109 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2110 * this method can be used to determine if there are any deferred events.
2111 *
2112 * If you find this method returns true, then you may finish +:never+ blocks.
2113 *
2114 * For example, the following method processes deferred asynchronous events
2115 * immediately.
2116 *
2117 * def Thread.kick_interrupt_immediately
2118 * Thread.handle_interrupt(Object => :immediate) {
2119 * Thread.pass
2120 * }
2121 * end
2122 *
2123 * If +error+ is given, then check only for +error+ type deferred events.
2124 *
2125 * === Usage
2126 *
2127 * th = Thread.new{
2128 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2129 * while true
2130 * ...
2131 * # reach safe point to invoke interrupt
2132 * if Thread.pending_interrupt?
2133 * Thread.handle_interrupt(Object => :immediate){}
2134 * end
2135 * ...
2136 * end
2137 * }
2138 * }
2139 * ...
2140 * th.raise # stop thread
2141 *
2142 * This example can also be written as the following, which you should use to
2143 * avoid asynchronous interrupts.
2144 *
2145 * flag = true
2146 * th = Thread.new{
2147 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2148 * while true
2149 * ...
2150 * # reach safe point to invoke interrupt
2151 * break if flag == false
2152 * ...
2153 * end
2154 * }
2155 * }
2156 * ...
2157 * flag = false # stop thread
2158 */
2159
2160static VALUE
2161rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2162{
2163 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2164}
2165
2166NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2167
2168static void
2169rb_threadptr_to_kill(rb_thread_t *th)
2170{
2172 th->status = THREAD_RUNNABLE;
2173 th->to_kill = 1;
2174 th->ec->errinfo = INT2FIX(TAG_FATAL);
2175 EC_JUMP_TAG(th->ec, TAG_FATAL);
2176}
2177
2178static inline rb_atomic_t
2179threadptr_get_interrupts(rb_thread_t *th)
2180{
2181 rb_execution_context_t *ec = th->ec;
2182 rb_atomic_t interrupt;
2184
2185 do {
2186 interrupt = ec->interrupt_flag;
2187 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2188 } while (old != interrupt);
2189 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2190}
2191
2194{
2195 rb_atomic_t interrupt;
2196 int postponed_job_interrupt = 0;
2197 int ret = FALSE;
2198
2199 if (th->ec->raised_flag) return ret;
2200
2201 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2202 int sig;
2203 int timer_interrupt;
2204 int pending_interrupt;
2205 int trap_interrupt;
2206
2207 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2208 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2209 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2210 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2211
2212 if (postponed_job_interrupt) {
2214 }
2215
2216 /* signal handling */
2217 if (trap_interrupt && (th == th->vm->main_thread)) {
2219 int sigwait_fd = rb_sigwait_fd_get(th);
2220
2221 if (sigwait_fd >= 0) {
2222 (void)consume_communication_pipe(sigwait_fd);
2224 rb_sigwait_fd_put(th, sigwait_fd);
2226 }
2227 th->status = THREAD_RUNNABLE;
2228 while ((sig = rb_get_next_signal()) != 0) {
2229 ret |= rb_signal_exec(th, sig);
2230 }
2231 th->status = prev_status;
2232 }
2233
2234 /* exception from another thread */
2235 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2236 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2237 thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2238 ret = TRUE;
2239
2240 if (err == Qundef) {
2241 /* no error */
2242 }
2243 else if (err == eKillSignal /* Thread#kill received */ ||
2244 err == eTerminateSignal /* Terminate thread */ ||
2245 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2246 rb_threadptr_to_kill(th);
2247 }
2248 else {
2250 /* the only special exception to be queued across thread */
2252 }
2253 /* set runnable if th was slept. */
2254 if (th->status == THREAD_STOPPED ||
2256 th->status = THREAD_RUNNABLE;
2258 }
2259 }
2260
2261 if (timer_interrupt) {
2262 uint32_t limits_us = TIME_QUANTUM_USEC;
2263
2264 if (th->priority > 0)
2265 limits_us <<= th->priority;
2266 else
2267 limits_us >>= -th->priority;
2268
2269 if (th->status == THREAD_RUNNABLE)
2270 th->running_time_us += TIME_QUANTUM_USEC;
2271
2272 VM_ASSERT(th->ec->cfp);
2274 0, 0, 0, Qundef);
2275
2276 rb_thread_schedule_limits(limits_us);
2277 }
2278 }
2279 return ret;
2280}
2281
2282void
2284{
2285 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2286}
2287
2288static void
2289rb_threadptr_ready(rb_thread_t *th)
2290{
2292}
2293
2294static VALUE
2295rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2296{
2297 VALUE exc;
2298
2299 if (rb_threadptr_dead(target_th)) {
2300 return Qnil;
2301 }
2302
2303 if (argc == 0) {
2305 }
2306 else {
2308 }
2309
2310 /* making an exception object can switch thread,
2311 so we need to check thread deadness again */
2312 if (rb_threadptr_dead(target_th)) {
2313 return Qnil;
2314 }
2315
2318 rb_threadptr_interrupt(target_th);
2319 return Qnil;
2320}
2321
2322void
2324{
2325 VALUE argv[2];
2326
2327 argv[0] = rb_eSignal;
2328 argv[1] = INT2FIX(sig);
2329 rb_threadptr_raise(th->vm->main_thread, 2, argv);
2330}
2331
2332void
2334{
2335 VALUE argv[2];
2336
2337 argv[0] = rb_eSystemExit;
2338 argv[1] = rb_str_new2("exit");
2339 rb_threadptr_raise(th->vm->main_thread, 2, argv);
2340}
2341
2342int
2344{
2345 if (ec->raised_flag & RAISED_EXCEPTION) {
2346 return 1;
2347 }
2349 return 0;
2350}
2351
2352int
2354{
2355 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2356 return 0;
2357 }
2358 ec->raised_flag &= ~RAISED_EXCEPTION;
2359 return 1;
2360}
2361
2362int
2363rb_notify_fd_close(int fd, struct list_head *busy)
2364{
2365 rb_vm_t *vm = GET_THREAD()->vm;
2366 struct waiting_fd *wfd = 0, *next;
2367
2368 list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2369 if (wfd->fd == fd) {
2370 rb_thread_t *th = wfd->th;
2371 VALUE err;
2372
2373 list_del(&wfd->wfd_node);
2374 list_add(busy, &wfd->wfd_node);
2375
2379 }
2380 }
2381 return !list_empty(busy);
2382}
2383
2384void
2386{
2387 struct list_head busy;
2388
2389 list_head_init(&busy);
2390 if (rb_notify_fd_close(fd, &busy)) {
2391 do rb_thread_schedule(); while (!list_empty(&busy));
2392 }
2393}
2394
2395/*
2396 * call-seq:
2397 * thr.raise
2398 * thr.raise(string)
2399 * thr.raise(exception [, string [, array]])
2400 *
2401 * Raises an exception from the given thread. The caller does not have to be
2402 * +thr+. See Kernel#raise for more information.
2403 *
2404 * Thread.abort_on_exception = true
2405 * a = Thread.new { sleep(200) }
2406 * a.raise("Gotcha")
2407 *
2408 * This will produce:
2409 *
2410 * prog.rb:3: Gotcha (RuntimeError)
2411 * from prog.rb:2:in `initialize'
2412 * from prog.rb:2:in `new'
2413 * from prog.rb:2
2414 */
2415
2416static VALUE
2417thread_raise_m(int argc, VALUE *argv, VALUE self)
2418{
2419 rb_thread_t *target_th = rb_thread_ptr(self);
2420 const rb_thread_t *current_th = GET_THREAD();
2421
2422 threadptr_check_pending_interrupt_queue(target_th);
2423 rb_threadptr_raise(target_th, argc, argv);
2424
2425 /* To perform Thread.current.raise as Kernel.raise */
2426 if (current_th == target_th) {
2427 RUBY_VM_CHECK_INTS(target_th->ec);
2428 }
2429 return Qnil;
2430}
2431
2432
2433/*
2434 * call-seq:
2435 * thr.exit -> thr
2436 * thr.kill -> thr
2437 * thr.terminate -> thr
2438 *
2439 * Terminates +thr+ and schedules another thread to be run, returning
2440 * the terminated Thread. If this is the main thread, or the last
2441 * thread, exits the process.
2442 */
2443
2444VALUE
2446{
2447 rb_thread_t *th = rb_thread_ptr(thread);
2448
2449 if (th->to_kill || th->status == THREAD_KILLED) {
2450 return thread;
2451 }
2452 if (th == th->vm->main_thread) {
2454 }
2455
2456 thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2457
2458 if (th == GET_THREAD()) {
2459 /* kill myself immediately */
2460 rb_threadptr_to_kill(th);
2461 }
2462 else {
2463 threadptr_check_pending_interrupt_queue(th);
2466 }
2467 return thread;
2468}
2469
2470int
2472{
2473 rb_thread_t *th = rb_thread_ptr(thread);
2474
2475 if (th->to_kill || th->status == THREAD_KILLED) {
2476 return TRUE;
2477 }
2478 return FALSE;
2479}
2480
2481/*
2482 * call-seq:
2483 * Thread.kill(thread) -> thread
2484 *
2485 * Causes the given +thread+ to exit, see also Thread::exit.
2486 *
2487 * count = 0
2488 * a = Thread.new { loop { count += 1 } }
2489 * sleep(0.1) #=> 0
2490 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2491 * count #=> 93947
2492 * a.alive? #=> false
2493 */
2494
2495static VALUE
2496rb_thread_s_kill(VALUE obj, VALUE th)
2497{
2498 return rb_thread_kill(th);
2499}
2500
2501
2502/*
2503 * call-seq:
2504 * Thread.exit -> thread
2505 *
2506 * Terminates the currently running thread and schedules another thread to be
2507 * run.
2508 *
2509 * If this thread is already marked to be killed, ::exit returns the Thread.
2510 *
2511 * If this is the main thread, or the last thread, exit the process.
2512 */
2513
2514static VALUE
2515rb_thread_exit(VALUE _)
2516{
2517 rb_thread_t *th = GET_THREAD();
2518 return rb_thread_kill(th->self);
2519}
2520
2521
2522/*
2523 * call-seq:
2524 * thr.wakeup -> thr
2525 *
2526 * Marks a given thread as eligible for scheduling, however it may still
2527 * remain blocked on I/O.
2528 *
2529 * *Note:* This does not invoke the scheduler, see #run for more information.
2530 *
2531 * c = Thread.new { Thread.stop; puts "hey!" }
2532 * sleep 0.1 while c.status!='sleep'
2533 * c.wakeup
2534 * c.join
2535 * #=> "hey!"
2536 */
2537
2538VALUE
2540{
2541 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2542 rb_raise(rb_eThreadError, "killed thread");
2543 }
2544 return thread;
2545}
2546
2547VALUE
2549{
2550 rb_thread_t *target_th = rb_thread_ptr(thread);
2551 if (target_th->status == THREAD_KILLED) return Qnil;
2552
2553 rb_threadptr_ready(target_th);
2554
2555 if (target_th->status == THREAD_STOPPED ||
2556 target_th->status == THREAD_STOPPED_FOREVER) {
2557 target_th->status = THREAD_RUNNABLE;
2558 }
2559
2560 return thread;
2561}
2562
2563
2564/*
2565 * call-seq:
2566 * thr.run -> thr
2567 *
2568 * Wakes up +thr+, making it eligible for scheduling.
2569 *
2570 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2571 * sleep 0.1 while a.status!='sleep'
2572 * puts "Got here"
2573 * a.run
2574 * a.join
2575 *
2576 * This will produce:
2577 *
2578 * a
2579 * Got here
2580 * c
2581 *
2582 * See also the instance method #wakeup.
2583 */
2584
2585VALUE
2587{
2588 rb_thread_wakeup(thread);
2590 return thread;
2591}
2592
2593
2594VALUE
2596{
2597 if (rb_thread_alone()) {
2599 "stopping only thread\n\tnote: use sleep to stop forever");
2600 }
2602 return Qnil;
2603}
2604
2605/*
2606 * call-seq:
2607 * Thread.stop -> nil
2608 *
2609 * Stops execution of the current thread, putting it into a ``sleep'' state,
2610 * and schedules execution of another thread.
2611 *
2612 * a = Thread.new { print "a"; Thread.stop; print "c" }
2613 * sleep 0.1 while a.status!='sleep'
2614 * print "b"
2615 * a.run
2616 * a.join
2617 * #=> "abc"
2618 */
2619
2620static VALUE
2621thread_stop(VALUE _)
2622{
2623 return rb_thread_stop();
2624}
2625
2626/********************************************************************/
2627
2628VALUE
2630{
2631 VALUE ary = rb_ary_new();
2632 rb_vm_t *vm = GET_THREAD()->vm;
2633 rb_thread_t *th = 0;
2634
2635 list_for_each(&vm->living_threads, th, vmlt_node) {
2636 switch (th->status) {
2637 case THREAD_RUNNABLE:
2638 case THREAD_STOPPED:
2640 rb_ary_push(ary, th->self);
2641 default:
2642 break;
2643 }
2644 }
2645 return ary;
2646}
2647
2648/*
2649 * call-seq:
2650 * Thread.list -> array
2651 *
2652 * Returns an array of Thread objects for all threads that are either runnable
2653 * or stopped.
2654 *
2655 * Thread.new { sleep(200) }
2656 * Thread.new { 1000000.times {|i| i*i } }
2657 * Thread.new { Thread.stop }
2658 * Thread.list.each {|t| p t}
2659 *
2660 * This will produce:
2661 *
2662 * #<Thread:0x401b3e84 sleep>
2663 * #<Thread:0x401b3f38 run>
2664 * #<Thread:0x401b3fb0 sleep>
2665 * #<Thread:0x401bdf4c run>
2666 */
2667
2668static VALUE
2669thread_list(VALUE _)
2670{
2671 return rb_thread_list();
2672}
2673
2674VALUE
2676{
2677 return GET_THREAD()->self;
2678}
2679
2680/*
2681 * call-seq:
2682 * Thread.current -> thread
2683 *
2684 * Returns the currently executing thread.
2685 *
2686 * Thread.current #=> #<Thread:0x401bdf4c run>
2687 */
2688
2689static VALUE
2690thread_s_current(VALUE klass)
2691{
2692 return rb_thread_current();
2693}
2694
2695VALUE
2697{
2698 return GET_THREAD()->vm->main_thread->self;
2699}
2700
2701/*
2702 * call-seq:
2703 * Thread.main -> thread
2704 *
2705 * Returns the main thread.
2706 */
2707
2708static VALUE
2709rb_thread_s_main(VALUE klass)
2710{
2711 return rb_thread_main();
2712}
2713
2714
2715/*
2716 * call-seq:
2717 * Thread.abort_on_exception -> true or false
2718 *
2719 * Returns the status of the global ``abort on exception'' condition.
2720 *
2721 * The default is +false+.
2722 *
2723 * When set to +true+, if any thread is aborted by an exception, the
2724 * raised exception will be re-raised in the main thread.
2725 *
2726 * Can also be specified by the global $DEBUG flag or command line option
2727 * +-d+.
2728 *
2729 * See also ::abort_on_exception=.
2730 *
2731 * There is also an instance level method to set this for a specific thread,
2732 * see #abort_on_exception.
2733 */
2734
2735static VALUE
2736rb_thread_s_abort_exc(VALUE _)
2737{
2738 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
2739}
2740
2741
2742/*
2743 * call-seq:
2744 * Thread.abort_on_exception= boolean -> true or false
2745 *
2746 * When set to +true+, if any thread is aborted by an exception, the
2747 * raised exception will be re-raised in the main thread.
2748 * Returns the new state.
2749 *
2750 * Thread.abort_on_exception = true
2751 * t1 = Thread.new do
2752 * puts "In new thread"
2753 * raise "Exception from thread"
2754 * end
2755 * sleep(1)
2756 * puts "not reached"
2757 *
2758 * This will produce:
2759 *
2760 * In new thread
2761 * prog.rb:4: Exception from thread (RuntimeError)
2762 * from prog.rb:2:in `initialize'
2763 * from prog.rb:2:in `new'
2764 * from prog.rb:2
2765 *
2766 * See also ::abort_on_exception.
2767 *
2768 * There is also an instance level method to set this for a specific thread,
2769 * see #abort_on_exception=.
2770 */
2771
2772static VALUE
2773rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2774{
2775 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2776 return val;
2777}
2778
2779
2780/*
2781 * call-seq:
2782 * thr.abort_on_exception -> true or false
2783 *
2784 * Returns the status of the thread-local ``abort on exception'' condition for
2785 * this +thr+.
2786 *
2787 * The default is +false+.
2788 *
2789 * See also #abort_on_exception=.
2790 *
2791 * There is also a class level method to set this for all threads, see
2792 * ::abort_on_exception.
2793 */
2794
2795static VALUE
2796rb_thread_abort_exc(VALUE thread)
2797{
2798 return rb_thread_ptr(thread)->abort_on_exception ? Qtrue : Qfalse;
2799}
2800
2801
2802/*
2803 * call-seq:
2804 * thr.abort_on_exception= boolean -> true or false
2805 *
2806 * When set to +true+, if this +thr+ is aborted by an exception, the
2807 * raised exception will be re-raised in the main thread.
2808 *
2809 * See also #abort_on_exception.
2810 *
2811 * There is also a class level method to set this for all threads, see
2812 * ::abort_on_exception=.
2813 */
2814
2815static VALUE
2816rb_thread_abort_exc_set(VALUE thread, VALUE val)
2817{
2818 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2819 return val;
2820}
2821
2822
2823/*
2824 * call-seq:
2825 * Thread.report_on_exception -> true or false
2826 *
2827 * Returns the status of the global ``report on exception'' condition.
2828 *
2829 * The default is +true+ since Ruby 2.5.
2830 *
2831 * All threads created when this flag is true will report
2832 * a message on $stderr if an exception kills the thread.
2833 *
2834 * Thread.new { 1.times { raise } }
2835 *
2836 * will produce this output on $stderr:
2837 *
2838 * #<Thread:...> terminated with exception (report_on_exception is true):
2839 * Traceback (most recent call last):
2840 * 2: from -e:1:in `block in <main>'
2841 * 1: from -e:1:in `times'
2842 *
2843 * This is done to catch errors in threads early.
2844 * In some cases, you might not want this output.
2845 * There are multiple ways to avoid the extra output:
2846 *
2847 * * If the exception is not intended, the best is to fix the cause of
2848 * the exception so it does not happen anymore.
2849 * * If the exception is intended, it might be better to rescue it closer to
2850 * where it is raised rather then let it kill the Thread.
2851 * * If it is guaranteed the Thread will be joined with Thread#join or
2852 * Thread#value, then it is safe to disable this report with
2853 * <code>Thread.current.report_on_exception = false</code>
2854 * when starting the Thread.
2855 * However, this might handle the exception much later, or not at all
2856 * if the Thread is never joined due to the parent thread being blocked, etc.
2857 *
2858 * See also ::report_on_exception=.
2859 *
2860 * There is also an instance level method to set this for a specific thread,
2861 * see #report_on_exception=.
2862 *
2863 */
2864
2865static VALUE
2866rb_thread_s_report_exc(VALUE _)
2867{
2868 return GET_THREAD()->vm->thread_report_on_exception ? Qtrue : Qfalse;
2869}
2870
2871
2872/*
2873 * call-seq:
2874 * Thread.report_on_exception= boolean -> true or false
2875 *
2876 * Returns the new state.
2877 * When set to +true+, all threads created afterwards will inherit the
2878 * condition and report a message on $stderr if an exception kills a thread:
2879 *
2880 * Thread.report_on_exception = true
2881 * t1 = Thread.new do
2882 * puts "In new thread"
2883 * raise "Exception from thread"
2884 * end
2885 * sleep(1)
2886 * puts "In the main thread"
2887 *
2888 * This will produce:
2889 *
2890 * In new thread
2891 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
2892 * Traceback (most recent call last):
2893 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
2894 * In the main thread
2895 *
2896 * See also ::report_on_exception.
2897 *
2898 * There is also an instance level method to set this for a specific thread,
2899 * see #report_on_exception=.
2900 */
2901
2902static VALUE
2903rb_thread_s_report_exc_set(VALUE self, VALUE val)
2904{
2905 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
2906 return val;
2907}
2908
2909
2910/*
2911 * call-seq:
2912 * thr.report_on_exception -> true or false
2913 *
2914 * Returns the status of the thread-local ``report on exception'' condition for
2915 * this +thr+.
2916 *
2917 * The default value when creating a Thread is the value of
2918 * the global flag Thread.report_on_exception.
2919 *
2920 * See also #report_on_exception=.
2921 *
2922 * There is also a class level method to set this for all new threads, see
2923 * ::report_on_exception=.
2924 */
2925
2926static VALUE
2927rb_thread_report_exc(VALUE thread)
2928{
2929 return rb_thread_ptr(thread)->report_on_exception ? Qtrue : Qfalse;
2930}
2931
2932
2933/*
2934 * call-seq:
2935 * thr.report_on_exception= boolean -> true or false
2936 *
2937 * When set to +true+, a message is printed on $stderr if an exception
2938 * kills this +thr+. See ::report_on_exception for details.
2939 *
2940 * See also #report_on_exception.
2941 *
2942 * There is also a class level method to set this for all new threads, see
2943 * ::report_on_exception=.
2944 */
2945
2946static VALUE
2947rb_thread_report_exc_set(VALUE thread, VALUE val)
2948{
2949 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
2950 return val;
2951}
2952
2953
2954/*
2955 * call-seq:
2956 * thr.group -> thgrp or nil
2957 *
2958 * Returns the ThreadGroup which contains the given thread, or returns +nil+
2959 * if +thr+ is not a member of any group.
2960 *
2961 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2962 */
2963
2964VALUE
2966{
2967 VALUE group = rb_thread_ptr(thread)->thgroup;
2968 return group == 0 ? Qnil : group;
2969}
2970
2971static const char *
2972thread_status_name(rb_thread_t *th, int detail)
2973{
2974 switch (th->status) {
2975 case THREAD_RUNNABLE:
2976 return th->to_kill ? "aborting" : "run";
2978 if (detail) return "sleep_forever";
2979 case THREAD_STOPPED:
2980 return "sleep";
2981 case THREAD_KILLED:
2982 return "dead";
2983 default:
2984 return "unknown";
2985 }
2986}
2987
2988static int
2989rb_threadptr_dead(rb_thread_t *th)
2990{
2991 return th->status == THREAD_KILLED;
2992}
2993
2994
2995/*
2996 * call-seq:
2997 * thr.status -> string, false or nil
2998 *
2999 * Returns the status of +thr+.
3000 *
3001 * [<tt>"sleep"</tt>]
3002 * Returned if this thread is sleeping or waiting on I/O
3003 * [<tt>"run"</tt>]
3004 * When this thread is executing
3005 * [<tt>"aborting"</tt>]
3006 * If this thread is aborting
3007 * [+false+]
3008 * When this thread is terminated normally
3009 * [+nil+]
3010 * If terminated with an exception.
3011 *
3012 * a = Thread.new { raise("die now") }
3013 * b = Thread.new { Thread.stop }
3014 * c = Thread.new { Thread.exit }
3015 * d = Thread.new { sleep }
3016 * d.kill #=> #<Thread:0x401b3678 aborting>
3017 * a.status #=> nil
3018 * b.status #=> "sleep"
3019 * c.status #=> false
3020 * d.status #=> "aborting"
3021 * Thread.current.status #=> "run"
3022 *
3023 * See also the instance methods #alive? and #stop?
3024 */
3025
3026static VALUE
3027rb_thread_status(VALUE thread)
3028{
3029 rb_thread_t *target_th = rb_thread_ptr(thread);
3030
3031 if (rb_threadptr_dead(target_th)) {
3032 if (!NIL_P(target_th->ec->errinfo) &&
3033 !FIXNUM_P(target_th->ec->errinfo)) {
3034 return Qnil;
3035 }
3036 else {
3037 return Qfalse;
3038 }
3039 }
3040 else {
3041 return rb_str_new2(thread_status_name(target_th, FALSE));
3042 }
3043}
3044
3045
3046/*
3047 * call-seq:
3048 * thr.alive? -> true or false
3049 *
3050 * Returns +true+ if +thr+ is running or sleeping.
3051 *
3052 * thr = Thread.new { }
3053 * thr.join #=> #<Thread:0x401b3fb0 dead>
3054 * Thread.current.alive? #=> true
3055 * thr.alive? #=> false
3056 *
3057 * See also #stop? and #status.
3058 */
3059
3060static VALUE
3061rb_thread_alive_p(VALUE thread)
3062{
3063 if (rb_threadptr_dead(rb_thread_ptr(thread))) {
3064 return Qfalse;
3065 }
3066 else {
3067 return Qtrue;
3068 }
3069}
3070
3071/*
3072 * call-seq:
3073 * thr.stop? -> true or false
3074 *
3075 * Returns +true+ if +thr+ is dead or sleeping.
3076 *
3077 * a = Thread.new { Thread.stop }
3078 * b = Thread.current
3079 * a.stop? #=> true
3080 * b.stop? #=> false
3081 *
3082 * See also #alive? and #status.
3083 */
3084
3085static VALUE
3086rb_thread_stop_p(VALUE thread)
3087{
3088 rb_thread_t *th = rb_thread_ptr(thread);
3089
3090 if (rb_threadptr_dead(th)) {
3091 return Qtrue;
3092 }
3093 else if (th->status == THREAD_STOPPED ||
3095 return Qtrue;
3096 }
3097 else {
3098 return Qfalse;
3099 }
3100}
3101
3102/*
3103 * call-seq:
3104 * thr.safe_level -> integer
3105 *
3106 * Returns the safe level.
3107 *
3108 * This method is obsolete because $SAFE is a process global state.
3109 * Simply check $SAFE.
3110 */
3111
3112static VALUE
3113rb_thread_safe_level(VALUE thread)
3114{
3115 rb_warn("Thread#safe_level will be removed in Ruby 3.0");
3116 return UINT2NUM(GET_VM()->safe_level_);
3117}
3118
3119/*
3120 * call-seq:
3121 * thr.name -> string
3122 *
3123 * show the name of the thread.
3124 */
3125
3126static VALUE
3127rb_thread_getname(VALUE thread)
3128{
3129 return rb_thread_ptr(thread)->name;
3130}
3131
3132/*
3133 * call-seq:
3134 * thr.name=(name) -> string
3135 *
3136 * set given name to the ruby thread.
3137 * On some platform, it may set the name to pthread and/or kernel.
3138 */
3139
3140static VALUE
3141rb_thread_setname(VALUE thread, VALUE name)
3142{
3143 rb_thread_t *target_th = rb_thread_ptr(thread);
3144
3145 if (!NIL_P(name)) {
3146 rb_encoding *enc;
3148 enc = rb_enc_get(name);
3149 if (!rb_enc_asciicompat(enc)) {
3150 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3151 rb_enc_name(enc));
3152 }
3154 }
3155 target_th->name = name;
3156 if (threadptr_initialized(target_th)) {
3157 native_set_another_thread_name(target_th->thread_id, name);
3158 }
3159 return name;
3160}
3161
3162/*
3163 * call-seq:
3164 * thr.to_s -> string
3165 *
3166 * Dump the name, id, and status of _thr_ to a string.
3167 */
3168
3169static VALUE
3170rb_thread_to_s(VALUE thread)
3171{
3172 VALUE cname = rb_class_path(rb_obj_class(thread));
3173 rb_thread_t *target_th = rb_thread_ptr(thread);
3174 const char *status;
3175 VALUE str, loc;
3176
3177 status = thread_status_name(target_th, TRUE);
3178 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3179 if (!NIL_P(target_th->name)) {
3180 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3181 }
3182 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3184 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3186 }
3187 rb_str_catf(str, " %s>", status);
3188
3189 return str;
3190}
3191
3192/* variables for recursive traversals */
3193static ID recursive_key;
3194
3195static VALUE
3196threadptr_local_aref(rb_thread_t *th, ID id)
3197{
3198 if (id == recursive_key) {
3199 return th->ec->local_storage_recursive_hash;
3200 }
3201 else {
3202 st_data_t val;
3203 st_table *local_storage = th->ec->local_storage;
3204
3205 if (local_storage != NULL && st_lookup(local_storage, id, &val)) {
3206 return (VALUE)val;
3207 }
3208 else {
3209 return Qnil;
3210 }
3211 }
3212}
3213
3214VALUE
3216{
3217 return threadptr_local_aref(rb_thread_ptr(thread), id);
3218}
3219
3220/*
3221 * call-seq:
3222 * thr[sym] -> obj or nil
3223 *
3224 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3225 * if not explicitly inside a Fiber), using either a symbol or a string name.
3226 * If the specified variable does not exist, returns +nil+.
3227 *
3228 * [
3229 * Thread.new { Thread.current["name"] = "A" },
3230 * Thread.new { Thread.current[:name] = "B" },
3231 * Thread.new { Thread.current["name"] = "C" }
3232 * ].each do |th|
3233 * th.join
3234 * puts "#{th.inspect}: #{th[:name]}"
3235 * end
3236 *
3237 * This will produce:
3238 *
3239 * #<Thread:0x00000002a54220 dead>: A
3240 * #<Thread:0x00000002a541a8 dead>: B
3241 * #<Thread:0x00000002a54130 dead>: C
3242 *
3243 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3244 * This confusion did not exist in Ruby 1.8 because
3245 * fibers are only available since Ruby 1.9.
3246 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3247 * following idiom for dynamic scope.
3248 *
3249 * def meth(newvalue)
3250 * begin
3251 * oldvalue = Thread.current[:name]
3252 * Thread.current[:name] = newvalue
3253 * yield
3254 * ensure
3255 * Thread.current[:name] = oldvalue
3256 * end
3257 * end
3258 *
3259 * The idiom may not work as dynamic scope if the methods are thread-local
3260 * and a given block switches fiber.
3261 *
3262 * f = Fiber.new {
3263 * meth(1) {
3264 * Fiber.yield
3265 * }
3266 * }
3267 * meth(2) {
3268 * f.resume
3269 * }
3270 * f.resume
3271 * p Thread.current[:name]
3272 * #=> nil if fiber-local
3273 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3274 *
3275 * For thread-local variables, please see #thread_variable_get and
3276 * #thread_variable_set.
3277 *
3278 */
3279
3280static VALUE
3281rb_thread_aref(VALUE thread, VALUE key)
3282{
3283 ID id = rb_check_id(&key);
3284 if (!id) return Qnil;
3285 return rb_thread_local_aref(thread, id);
3286}
3287
3288/*
3289 * call-seq:
3290 * thr.fetch(sym) -> obj
3291 * thr.fetch(sym) { } -> obj
3292 * thr.fetch(sym, default) -> obj
3293 *
3294 * Returns a fiber-local for the given key. If the key can't be
3295 * found, there are several options: With no other arguments, it will
3296 * raise a KeyError exception; if <i>default</i> is given, then that
3297 * will be returned; if the optional code block is specified, then
3298 * that will be run and its result returned. See Thread#[] and
3299 * Hash#fetch.
3300 */
3301static VALUE
3302rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3303{
3304 VALUE key, val;
3305 ID id;
3306 rb_thread_t *target_th = rb_thread_ptr(self);
3307 int block_given;
3308
3309 rb_check_arity(argc, 1, 2);
3310 key = argv[0];
3311
3312 block_given = rb_block_given_p();
3313 if (block_given && argc == 2) {
3314 rb_warn("block supersedes default value argument");
3315 }
3316
3317 id = rb_check_id(&key);
3318
3319 if (id == recursive_key) {
3320 return target_th->ec->local_storage_recursive_hash;
3321 }
3322 else if (id && target_th->ec->local_storage &&
3323 st_lookup(target_th->ec->local_storage, id, &val)) {
3324 return val;
3325 }
3326 else if (block_given) {
3327 return rb_yield(key);
3328 }
3329 else if (argc == 1) {
3330 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3331 }
3332 else {
3333 return argv[1];
3334 }
3335}
3336
3337static VALUE
3338threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3339{
3340 if (id == recursive_key) {
3342 return val;
3343 }
3344 else {
3345 st_table *local_storage = th->ec->local_storage;
3346
3347 if (NIL_P(val)) {
3348 if (!local_storage) return Qnil;
3349 st_delete_wrap(local_storage, id);
3350 return Qnil;
3351 }
3352 else {
3353 if (local_storage == NULL) {
3354 th->ec->local_storage = local_storage = st_init_numtable();
3355 }
3356 st_insert(local_storage, id, val);
3357 return val;
3358 }
3359 }
3360}
3361
3362VALUE
3364{
3365 if (OBJ_FROZEN(thread)) {
3366 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3367 }
3368
3369 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3370}
3371
3372/*
3373 * call-seq:
3374 * thr[sym] = obj -> obj
3375 *
3376 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3377 * using either a symbol or a string.
3378 *
3379 * See also Thread#[].
3380 *
3381 * For thread-local variables, please see #thread_variable_set and
3382 * #thread_variable_get.
3383 */
3384
3385static VALUE
3386rb_thread_aset(VALUE self, VALUE id, VALUE val)
3387{
3388 return rb_thread_local_aset(self, rb_to_id(id), val);
3389}
3390
3391/*
3392 * call-seq:
3393 * thr.thread_variable_get(key) -> obj or nil
3394 *
3395 * Returns the value of a thread local variable that has been set. Note that
3396 * these are different than fiber local values. For fiber local values,
3397 * please see Thread#[] and Thread#[]=.
3398 *
3399 * Thread local values are carried along with threads, and do not respect
3400 * fibers. For example:
3401 *
3402 * Thread.new {
3403 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3404 * Thread.current["foo"] = "bar" # set a fiber local
3405 *
3406 * Fiber.new {
3407 * Fiber.yield [
3408 * Thread.current.thread_variable_get("foo"), # get the thread local
3409 * Thread.current["foo"], # get the fiber local
3410 * ]
3411 * }.resume
3412 * }.join.value # => ['bar', nil]
3413 *
3414 * The value "bar" is returned for the thread local, where nil is returned
3415 * for the fiber local. The fiber is executed in the same thread, so the
3416 * thread local values are available.
3417 */
3418
3419static VALUE
3420rb_thread_variable_get(VALUE thread, VALUE key)
3421{
3422 VALUE locals;
3423
3425 return Qnil;
3426 }
3427 locals = rb_thread_local_storage(thread);
3428 return rb_hash_aref(locals, rb_to_symbol(key));
3429}
3430
3431/*
3432 * call-seq:
3433 * thr.thread_variable_set(key, value)
3434 *
3435 * Sets a thread local with +key+ to +value+. Note that these are local to
3436 * threads, and not to fibers. Please see Thread#thread_variable_get and
3437 * Thread#[] for more information.
3438 */
3439
3440static VALUE
3441rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
3442{
3443 VALUE locals;
3444
3445 if (OBJ_FROZEN(thread)) {
3446 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3447 }
3448
3449 locals = rb_thread_local_storage(thread);
3450 return rb_hash_aset(locals, rb_to_symbol(id), val);
3451}
3452
3453/*
3454 * call-seq:
3455 * thr.key?(sym) -> true or false
3456 *
3457 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3458 * variable.
3459 *
3460 * me = Thread.current
3461 * me[:oliver] = "a"
3462 * me.key?(:oliver) #=> true
3463 * me.key?(:stanley) #=> false
3464 */
3465
3466static VALUE
3467rb_thread_key_p(VALUE self, VALUE key)
3468{
3469 ID id = rb_check_id(&key);
3470 st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3471
3472 if (!id || local_storage == NULL) {
3473 return Qfalse;
3474 }
3475 else if (st_is_member(local_storage, id)) {
3476 return Qtrue;
3477 }
3478 else {
3479 return Qfalse;
3480 }
3481}
3482
3483static int
3484thread_keys_i(ID key, VALUE value, VALUE ary)
3485{
3486 rb_ary_push(ary, ID2SYM(key));
3487 return ST_CONTINUE;
3488}
3489
3490int
3492{
3493 return vm_living_thread_num(GET_VM()) == 1;
3494}
3495
3496/*
3497 * call-seq:
3498 * thr.keys -> array
3499 *
3500 * Returns an array of the names of the fiber-local variables (as Symbols).
3501 *
3502 * thr = Thread.new do
3503 * Thread.current[:cat] = 'meow'
3504 * Thread.current["dog"] = 'woof'
3505 * end
3506 * thr.join #=> #<Thread:0x401b3f10 dead>
3507 * thr.keys #=> [:dog, :cat]
3508 */
3509
3510static VALUE
3511rb_thread_keys(VALUE self)
3512{
3513 st_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3514 VALUE ary = rb_ary_new();
3515
3516 if (local_storage) {
3517 st_foreach(local_storage, thread_keys_i, ary);
3518 }
3519 return ary;
3520}
3521
3522static int
3523keys_i(VALUE key, VALUE value, VALUE ary)
3524{
3525 rb_ary_push(ary, key);
3526 return ST_CONTINUE;
3527}
3528
3529/*
3530 * call-seq:
3531 * thr.thread_variables -> array
3532 *
3533 * Returns an array of the names of the thread-local variables (as Symbols).
3534 *
3535 * thr = Thread.new do
3536 * Thread.current.thread_variable_set(:cat, 'meow')
3537 * Thread.current.thread_variable_set("dog", 'woof')
3538 * end
3539 * thr.join #=> #<Thread:0x401b3f10 dead>
3540 * thr.thread_variables #=> [:dog, :cat]
3541 *
3542 * Note that these are not fiber local variables. Please see Thread#[] and
3543 * Thread#thread_variable_get for more details.
3544 */
3545
3546static VALUE
3547rb_thread_variables(VALUE thread)
3548{
3549 VALUE locals;
3550 VALUE ary;
3551
3552 ary = rb_ary_new();
3554 return ary;
3555 }
3556 locals = rb_thread_local_storage(thread);
3557 rb_hash_foreach(locals, keys_i, ary);
3558
3559 return ary;
3560}
3561
3562/*
3563 * call-seq:
3564 * thr.thread_variable?(key) -> true or false
3565 *
3566 * Returns +true+ if the given string (or symbol) exists as a thread-local
3567 * variable.
3568 *
3569 * me = Thread.current
3570 * me.thread_variable_set(:oliver, "a")
3571 * me.thread_variable?(:oliver) #=> true
3572 * me.thread_variable?(:stanley) #=> false
3573 *
3574 * Note that these are not fiber local variables. Please see Thread#[] and
3575 * Thread#thread_variable_get for more details.
3576 */
3577
3578static VALUE
3579rb_thread_variable_p(VALUE thread, VALUE key)
3580{
3581 VALUE locals;
3582 ID id = rb_check_id(&key);
3583
3584 if (!id) return Qfalse;
3585
3587 return Qfalse;
3588 }
3589 locals = rb_thread_local_storage(thread);
3590
3591 if (rb_hash_lookup(locals, ID2SYM(id)) != Qnil) {
3592 return Qtrue;
3593 }
3594 else {
3595 return Qfalse;
3596 }
3597
3598 return Qfalse;
3599}
3600
3601/*
3602 * call-seq:
3603 * thr.priority -> integer
3604 *
3605 * Returns the priority of <i>thr</i>. Default is inherited from the
3606 * current thread which creating the new thread, or zero for the
3607 * initial main thread; higher-priority thread will run more frequently
3608 * than lower-priority threads (but lower-priority threads can also run).
3609 *
3610 * This is just hint for Ruby thread scheduler. It may be ignored on some
3611 * platform.
3612 *
3613 * Thread.current.priority #=> 0
3614 */
3615
3616static VALUE
3617rb_thread_priority(VALUE thread)
3618{
3619 return INT2NUM(rb_thread_ptr(thread)->priority);
3620}
3621
3622
3623/*
3624 * call-seq:
3625 * thr.priority= integer -> thr
3626 *
3627 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3628 * will run more frequently than lower-priority threads (but lower-priority
3629 * threads can also run).
3630 *
3631 * This is just hint for Ruby thread scheduler. It may be ignored on some
3632 * platform.
3633 *
3634 * count1 = count2 = 0
3635 * a = Thread.new do
3636 * loop { count1 += 1 }
3637 * end
3638 * a.priority = -1
3639 *
3640 * b = Thread.new do
3641 * loop { count2 += 1 }
3642 * end
3643 * b.priority = -2
3644 * sleep 1 #=> 1
3645 * count1 #=> 622504
3646 * count2 #=> 5832
3647 */
3648
3649static VALUE
3650rb_thread_priority_set(VALUE thread, VALUE prio)
3651{
3652 rb_thread_t *target_th = rb_thread_ptr(thread);
3653 int priority;
3654
3655#if USE_NATIVE_THREAD_PRIORITY
3656 target_th->priority = NUM2INT(prio);
3657 native_thread_apply_priority(th);
3658#else
3659 priority = NUM2INT(prio);
3660 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3661 priority = RUBY_THREAD_PRIORITY_MAX;
3662 }
3663 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3664 priority = RUBY_THREAD_PRIORITY_MIN;
3665 }
3666 target_th->priority = (int8_t)priority;
3667#endif
3668 return INT2NUM(target_th->priority);
3669}
3670
3671/* for IO */
3672
3673#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3674
3675/*
3676 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3677 * in select(2) system call.
3678 *
3679 * - Linux 2.2.12 (?)
3680 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3681 * select(2) documents how to allocate fd_set dynamically.
3682 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3683 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3684 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3685 * select(2) documents how to allocate fd_set dynamically.
3686 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3687 * - HP-UX documents how to allocate fd_set dynamically.
3688 * http://docs.hp.com/en/B2355-60105/select.2.html
3689 * - Solaris 8 has select_large_fdset
3690 * - Mac OS X 10.7 (Lion)
3691 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3692 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3693 * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3694 *
3695 * When fd_set is not big enough to hold big file descriptors,
3696 * it should be allocated dynamically.
3697 * Note that this assumes fd_set is structured as bitmap.
3698 *
3699 * rb_fd_init allocates the memory.
3700 * rb_fd_term free the memory.
3701 * rb_fd_set may re-allocates bitmap.
3702 *
3703 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3704 */
3705
3706void
3708{
3709 fds->maxfd = 0;
3710 fds->fdset = ALLOC(fd_set);
3711 FD_ZERO(fds->fdset);
3712}
3713
3714void
3716{
3717 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3718
3719 if (size < sizeof(fd_set))
3720 size = sizeof(fd_set);
3721 dst->maxfd = src->maxfd;
3722 dst->fdset = xmalloc(size);
3723 memcpy(dst->fdset, src->fdset, size);
3724}
3725
3726void
3728{
3729 if (fds->fdset) xfree(fds->fdset);
3730 fds->maxfd = 0;
3731 fds->fdset = 0;
3732}
3733
3734void
3736{
3737 if (fds->fdset)
3738 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3739}
3740
3741static void
3742rb_fd_resize(int n, rb_fdset_t *fds)
3743{
3744 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3745 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3746
3747 if (m < sizeof(fd_set)) m = sizeof(fd_set);
3748 if (o < sizeof(fd_set)) o = sizeof(fd_set);
3749
3750 if (m > o) {
3751 fds->fdset = xrealloc(fds->fdset, m);
3752 memset((char *)fds->fdset + o, 0, m - o);
3753 }
3754 if (n >= fds->maxfd) fds->maxfd = n + 1;
3755}
3756
3757void
3758rb_fd_set(int n, rb_fdset_t *fds)
3759{
3760 rb_fd_resize(n, fds);
3761 FD_SET(n, fds->fdset);
3762}
3763
3764void
3765rb_fd_clr(int n, rb_fdset_t *fds)
3766{
3767 if (n >= fds->maxfd) return;
3768 FD_CLR(n, fds->fdset);
3769}
3770
3771int
3772rb_fd_isset(int n, const rb_fdset_t *fds)
3773{
3774 if (n >= fds->maxfd) return 0;
3775 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3776}
3777
3778void
3779rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3780{
3781 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3782
3783 if (size < sizeof(fd_set)) size = sizeof(fd_set);
3784 dst->maxfd = max;
3785 dst->fdset = xrealloc(dst->fdset, size);
3786 memcpy(dst->fdset, src, size);
3787}
3788
3789void
3790rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3791{
3792 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3793
3794 if (size < sizeof(fd_set))
3795 size = sizeof(fd_set);
3796 dst->maxfd = src->maxfd;
3797 dst->fdset = xrealloc(dst->fdset, size);
3798 memcpy(dst->fdset, src->fdset, size);
3799}
3800
3801int
3802rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3803{
3804 fd_set *r = NULL, *w = NULL, *e = NULL;
3805 if (readfds) {
3806 rb_fd_resize(n - 1, readfds);
3807 r = rb_fd_ptr(readfds);
3808 }
3809 if (writefds) {
3810 rb_fd_resize(n - 1, writefds);
3811 w = rb_fd_ptr(writefds);
3812 }
3813 if (exceptfds) {
3814 rb_fd_resize(n - 1, exceptfds);
3815 e = rb_fd_ptr(exceptfds);
3816 }
3817 return select(n, r, w, e, timeout);
3818}
3819
3820#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3821
3822#undef FD_ZERO
3823#undef FD_SET
3824#undef FD_CLR
3825#undef FD_ISSET
3826
3827#define FD_ZERO(f) rb_fd_zero(f)
3828#define FD_SET(i, f) rb_fd_set((i), (f))
3829#define FD_CLR(i, f) rb_fd_clr((i), (f))
3830#define FD_ISSET(i, f) rb_fd_isset((i), (f))
3831
3832#elif defined(_WIN32)
3833
3834void
3836{
3837 set->capa = FD_SETSIZE;
3838 set->fdset = ALLOC(fd_set);
3839 FD_ZERO(set->fdset);
3840}
3841
3842void
3844{
3845 rb_fd_init(dst);
3846 rb_fd_dup(dst, src);
3847}
3848
3849void
3851{
3852 xfree(set->fdset);
3853 set->fdset = NULL;
3854 set->capa = 0;
3855}
3856
3857void
3858rb_fd_set(int fd, rb_fdset_t *set)
3859{
3860 unsigned int i;
3861 SOCKET s = rb_w32_get_osfhandle(fd);
3862
3863 for (i = 0; i < set->fdset->fd_count; i++) {
3864 if (set->fdset->fd_array[i] == s) {
3865 return;
3866 }
3867 }
3868 if (set->fdset->fd_count >= (unsigned)set->capa) {
3869 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3870 set->fdset =
3872 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
3873 }
3874 set->fdset->fd_array[set->fdset->fd_count++] = s;
3875}
3876
3877#undef FD_ZERO
3878#undef FD_SET
3879#undef FD_CLR
3880#undef FD_ISSET
3881
3882#define FD_ZERO(f) rb_fd_zero(f)
3883#define FD_SET(i, f) rb_fd_set((i), (f))
3884#define FD_CLR(i, f) rb_fd_clr((i), (f))
3885#define FD_ISSET(i, f) rb_fd_isset((i), (f))
3886
3887#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
3888
3889#endif
3890
3891#ifndef rb_fd_no_init
3892#define rb_fd_no_init(fds) (void)(fds)
3893#endif
3894
3895static int
3896wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
3897{
3898 if (*result < 0) {
3899 switch (errnum) {
3900 case EINTR:
3901#ifdef ERESTART
3902 case ERESTART:
3903#endif
3904 *result = 0;
3905 if (rel && hrtime_update_expire(rel, end)) {
3906 *rel = 0;
3907 }
3908 return TRUE;
3909 }
3910 return FALSE;
3911 }
3912 else if (*result == 0) {
3913 /* check for spurious wakeup */
3914 if (rel) {
3915 return !hrtime_update_expire(rel, end);
3916 }
3917 return TRUE;
3918 }
3919 return FALSE;
3920}
3921
3923 int max;
3933};
3934
3935static VALUE
3936select_set_free(VALUE p)
3937{
3938 struct select_set *set = (struct select_set *)p;
3939
3940 if (set->sigwait_fd >= 0) {
3941 rb_sigwait_fd_put(set->th, set->sigwait_fd);
3943 }
3944
3945 rb_fd_term(&set->orig_rset);
3946 rb_fd_term(&set->orig_wset);
3947 rb_fd_term(&set->orig_eset);
3948
3949 return Qfalse;
3950}
3951
3952static const rb_hrtime_t *
3953sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
3954 int *drained_p)
3955{
3956 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
3957
3958 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
3959 *drained_p = check_signals_nogvl(th, sigwait_fd);
3960 if (!orig || *orig > quantum)
3961 return &quantum;
3962 }
3963
3964 return orig;
3965}
3966
3967static VALUE
3968do_select(VALUE p)
3969{
3970 struct select_set *set = (struct select_set *)p;
3971 int result = 0;
3972 int lerrno;
3973 rb_hrtime_t *to, rel, end = 0;
3974
3975 timeout_prepare(&to, &rel, &end, set->timeout);
3976#define restore_fdset(dst, src) \
3977 ((dst) ? rb_fd_dup(dst, src) : (void)0)
3978#define do_select_update() \
3979 (restore_fdset(set->rset, &set->orig_rset), \
3980 restore_fdset(set->wset, &set->orig_wset), \
3981 restore_fdset(set->eset, &set->orig_eset), \
3982 TRUE)
3983
3984 do {
3985 int drained;
3986 lerrno = 0;
3987
3988 BLOCKING_REGION(set->th, {
3989 const rb_hrtime_t *sto;
3990 struct timeval tv;
3991
3992 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
3993 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
3994 result = native_fd_select(set->max, set->rset, set->wset,
3995 set->eset,
3996 rb_hrtime2timeval(&tv, sto), set->th);
3997 if (result < 0) lerrno = errno;
3998 }
3999 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4000
4001 if (set->sigwait_fd >= 0) {
4002 if (result > 0 && rb_fd_isset(set->sigwait_fd, set->rset)) {
4003 result--;
4004 (void)check_signals_nogvl(set->th, set->sigwait_fd);
4005 } else {
4006 (void)check_signals_nogvl(set->th, -1);
4007 }
4008 }
4009
4010 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4011 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4012
4013 if (result < 0) {
4014 errno = lerrno;
4015 }
4016
4017 return (VALUE)result;
4018}
4019
4020static void
4021rb_thread_wait_fd_rw(int fd, int read)
4022{
4023 int result = 0;
4024 int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
4025
4026 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
4027
4028 if (fd < 0) {
4029 rb_raise(rb_eIOError, "closed stream");
4030 }
4031
4032 result = rb_wait_for_single_fd(fd, events, NULL);
4033 if (result < 0) {
4034 rb_sys_fail(0);
4035 }
4036
4037 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
4038}
4039
4040void
4042{
4043 rb_thread_wait_fd_rw(fd, 1);
4044}
4045
4046int
4048{
4049 rb_thread_wait_fd_rw(fd, 0);
4050 return TRUE;
4051}
4052
4053static rb_fdset_t *
4054init_set_fd(int fd, rb_fdset_t *fds)
4055{
4056 if (fd < 0) {
4057 return 0;
4058 }
4059 rb_fd_init(fds);
4060 rb_fd_set(fd, fds);
4061
4062 return fds;
4063}
4064
4065int
4067 struct timeval *timeout)
4068{
4069 struct select_set set;
4070
4071 set.th = GET_THREAD();
4073 set.max = max;
4074 set.rset = read;
4075 set.wset = write;
4076 set.eset = except;
4077 set.timeout = timeout;
4078
4079 if (!set.rset && !set.wset && !set.eset) {
4080 if (!timeout) {
4082 return 0;
4083 }
4085 return 0;
4086 }
4087
4088 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4089 if (set.sigwait_fd >= 0) {
4090 if (set.rset)
4091 rb_fd_set(set.sigwait_fd, set.rset);
4092 else
4093 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4094 if (set.sigwait_fd >= set.max) {
4095 set.max = set.sigwait_fd + 1;
4096 }
4097 }
4098#define fd_init_copy(f) do { \
4099 if (set.f) { \
4100 rb_fd_resize(set.max - 1, set.f); \
4101 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4102 rb_fd_init_copy(&set.orig_##f, set.f); \
4103 } \
4104 } \
4105 else { \
4106 rb_fd_no_init(&set.orig_##f); \
4107 } \
4108 } while (0)
4112#undef fd_init_copy
4113
4114 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4115}
4116
4117#ifdef USE_POLL
4118
4119/* The same with linux kernel. TODO: make platform independent definition. */
4120#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4121#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4122#define POLLEX_SET (POLLPRI)
4123
4124#ifndef POLLERR_SET /* defined for FreeBSD for now */
4125# define POLLERR_SET (0)
4126#endif
4127
4128/*
4129 * returns a mask of events
4130 */
4131int
4132rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4133{
4134 struct pollfd fds[2];
4135 int result = 0, lerrno;
4136 rb_hrtime_t *to, rel, end = 0;
4137 int drained;
4138 nfds_t nfds;
4140 struct waiting_fd wfd;
4141 int state;
4142
4143 wfd.th = GET_THREAD();
4144 wfd.fd = fd;
4145 list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4146 EC_PUSH_TAG(wfd.th->ec);
4147 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4148 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4149 timeout_prepare(&to, &rel, &end, timeout);
4150 fds[0].fd = fd;
4151 fds[0].events = (short)events;
4152 fds[0].revents = 0;
4153 do {
4154 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4155
4156 if (fds[1].fd >= 0) {
4157 fds[1].events = POLLIN;
4158 fds[1].revents = 0;
4159 nfds = 2;
4160 ubf = ubf_sigwait;
4161 }
4162 else {
4163 nfds = 1;
4164 ubf = ubf_select;
4165 }
4166
4167 lerrno = 0;
4168 BLOCKING_REGION(wfd.th, {
4169 const rb_hrtime_t *sto;
4170 struct timespec ts;
4171
4172 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4173 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4174 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4175 if (result < 0) lerrno = errno;
4176 }
4177 }, ubf, wfd.th, TRUE);
4178
4179 if (fds[1].fd >= 0) {
4180 if (result > 0 && fds[1].revents) {
4181 result--;
4182 (void)check_signals_nogvl(wfd.th, fds[1].fd);
4183 } else {
4184 (void)check_signals_nogvl(wfd.th, -1);
4185 }
4186 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4187 rb_sigwait_fd_migrate(wfd.th->vm);
4188 }
4189 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4190 } while (wait_retryable(&result, lerrno, to, end));
4191 }
4192 EC_POP_TAG();
4193 list_del(&wfd.wfd_node);
4194 if (state) {
4195 EC_JUMP_TAG(wfd.th->ec, state);
4196 }
4197
4198 if (result < 0) {
4199 errno = lerrno;
4200 return -1;
4201 }
4202
4203 if (fds[0].revents & POLLNVAL) {
4204 errno = EBADF;
4205 return -1;
4206 }
4207
4208 /*
4209 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4210 * Therefore we need to fix it up.
4211 */
4212 result = 0;
4213 if (fds[0].revents & POLLIN_SET)
4214 result |= RB_WAITFD_IN;
4215 if (fds[0].revents & POLLOUT_SET)
4216 result |= RB_WAITFD_OUT;
4217 if (fds[0].revents & POLLEX_SET)
4218 result |= RB_WAITFD_PRI;
4219
4220 /* all requested events are ready if there is an error */
4221 if (fds[0].revents & POLLERR_SET)
4222 result |= events;
4223
4224 return result;
4225}
4226#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4227struct select_args {
4228 union {
4229 int fd;
4236 struct timeval *tv;
4237};
4238
4239static VALUE
4240select_single(VALUE ptr)
4241{
4242 struct select_args *args = (struct select_args *)ptr;
4243 int r;
4244
4245 r = rb_thread_fd_select(args->as.fd + 1,
4246 args->read, args->write, args->except, args->tv);
4247 if (r == -1)
4248 args->as.error = errno;
4249 if (r > 0) {
4250 r = 0;
4251 if (args->read && rb_fd_isset(args->as.fd, args->read))
4252 r |= RB_WAITFD_IN;
4253 if (args->write && rb_fd_isset(args->as.fd, args->write))
4254 r |= RB_WAITFD_OUT;
4255 if (args->except && rb_fd_isset(args->as.fd, args->except))
4256 r |= RB_WAITFD_PRI;
4257 }
4258 return (VALUE)r;
4259}
4260
4261static VALUE
4262select_single_cleanup(VALUE ptr)
4263{
4264 struct select_args *args = (struct select_args *)ptr;
4265
4266 list_del(&args->wfd.wfd_node);
4267 if (args->read) rb_fd_term(args->read);
4268 if (args->write) rb_fd_term(args->write);
4269 if (args->except) rb_fd_term(args->except);
4270
4271 return (VALUE)-1;
4272}
4273
4274int
4275rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
4276{
4277 rb_fdset_t rfds, wfds, efds;
4278 struct select_args args;
4279 int r;
4280 VALUE ptr = (VALUE)&args;
4281
4282 args.as.fd = fd;
4283 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4284 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4285 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4286 args.tv = tv;
4287 args.wfd.fd = fd;
4288 args.wfd.th = GET_THREAD();
4289
4290 list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4291 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4292 if (r == -1)
4293 errno = args.as.error;
4294
4295 return r;
4296}
4297#endif /* ! USE_POLL */
4298
4299/*
4300 * for GC
4301 */
4302
4303#ifdef USE_CONSERVATIVE_STACK_END
4304void
4306{
4307 VALUE stack_end;
4308 *stack_end_p = &stack_end;
4309}
4310#endif
4311
4312/*
4313 *
4314 */
4315
4316void
4318{
4319 /* mth must be main_thread */
4320 if (rb_signal_buff_size() > 0) {
4321 /* wakeup main thread */
4322 threadptr_trap_interrupt(mth);
4323 }
4324}
4325
4326static void
4327timer_thread_function(void)
4328{
4329 volatile rb_execution_context_t *ec;
4330
4331 /* for time slice */
4334 if (ec) RUBY_VM_SET_TIMER_INTERRUPT(ec);
4335}
4336
4337static void
4338async_bug_fd(const char *mesg, int errno_arg, int fd)
4339{
4340 char buff[64];
4341 size_t n = strlcpy(buff, mesg, sizeof(buff));
4342 if (n < sizeof(buff)-3) {
4343 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4344 }
4345 rb_async_bug_errno(buff, errno_arg);
4346}
4347
4348/* VM-dependent API is not available for this function */
4349static int
4350consume_communication_pipe(int fd)
4351{
4352#if USE_EVENTFD
4353 uint64_t buff[1];
4354#else
4355 /* buffer can be shared because no one refers to them. */
4356 static char buff[1024];
4357#endif
4358 ssize_t result;
4359 int ret = FALSE; /* for rb_sigwait_sleep */
4360
4361 /*
4362 * disarm UBF_TIMER before we read, because it can become
4363 * re-armed at any time via sighandler and the pipe will refill
4364 * We can disarm it because this thread is now processing signals
4365 * and we do not want unnecessary SIGVTALRM
4366 */
4367 ubf_timer_disarm();
4368
4369 while (1) {
4370 result = read(fd, buff, sizeof(buff));
4371 if (result > 0) {
4372 ret = TRUE;
4373 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4374 return ret;
4375 }
4376 }
4377 else if (result == 0) {
4378 return ret;
4379 }
4380 else if (result < 0) {
4381 int e = errno;
4382 switch (e) {
4383 case EINTR:
4384 continue; /* retry */
4385 case EAGAIN:
4386#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4387 case EWOULDBLOCK:
4388#endif
4389 return ret;
4390 default:
4391 async_bug_fd("consume_communication_pipe: read", e, fd);
4392 }
4393 }
4394 }
4395}
4396
4397static int
4398check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4399{
4400 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4401 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4402 ubf_wakeup_all_threads();
4404 if (rb_signal_buff_size()) {
4405 if (th == vm->main_thread)
4406 /* no need to lock + wakeup if already in main thread */
4408 else
4409 threadptr_trap_interrupt(vm->main_thread);
4410 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4411 }
4412 return ret;
4413}
4414
4415void
4417{
4418 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4419 native_reset_timer_thread();
4420 }
4421}
4422
4423void
4425{
4426 native_reset_timer_thread();
4427}
4428
4429void
4431{
4432 system_working = 1;
4433 rb_thread_create_timer_thread();
4434}
4435
4436static int
4437clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4438{
4439 int i;
4440 VALUE coverage = (VALUE)val;
4441 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4442 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4443
4444 if (lines) {
4445 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4446 rb_ary_clear(lines);
4447 }
4448 else {
4449 int i;
4450 for (i = 0; i < RARRAY_LEN(lines); i++) {
4451 if (RARRAY_AREF(lines, i) != Qnil)
4452 RARRAY_ASET(lines, i, INT2FIX(0));
4453 }
4454 }
4455 }
4456 if (branches) {
4457 VALUE counters = RARRAY_AREF(branches, 1);
4458 for (i = 0; i < RARRAY_LEN(counters); i++) {
4459 RARRAY_ASET(counters, i, INT2FIX(0));
4460 }
4461 }
4462
4463 return ST_CONTINUE;
4464}
4465
4466void
4468{
4469 VALUE coverages = rb_get_coverages();
4470 if (RTEST(coverages)) {
4471 rb_hash_foreach(coverages, clear_coverage_i, 0);
4472 }
4473}
4474
4475#if defined(HAVE_WORKING_FORK)
4476static void
4477rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4478{
4479 rb_thread_t *i = 0;
4480 rb_vm_t *vm = th->vm;
4481 vm->main_thread = th;
4482
4483 gvl_atfork(th->vm);
4484 ubf_list_atfork();
4485
4486 list_for_each(&vm->living_threads, i, vmlt_node) {
4487 atfork(i, th);
4488 }
4489 rb_vm_living_threads_init(vm);
4490 rb_vm_living_threads_insert(vm, th);
4491
4492 /* may be held by MJIT threads in parent */
4495
4496 /* may be held by any thread in parent */
4498
4499 vm->fork_gen++;
4500
4501 vm->sleeper = 0;
4503}
4504
4505static void
4506terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4507{
4508 if (th != current_th) {
4509 rb_mutex_abandon_keeping_mutexes(th);
4510 rb_mutex_abandon_locking_mutex(th);
4511 thread_cleanup_func(th, TRUE);
4512 }
4513}
4514
4515void rb_fiber_atfork(rb_thread_t *);
4516void
4517rb_thread_atfork(void)
4518{
4519 rb_thread_t *th = GET_THREAD();
4520 rb_thread_atfork_internal(th, terminate_atfork_i);
4521 th->join_list = NULL;
4522 rb_fiber_atfork(th);
4523
4524 /* We don't want reproduce CVE-2003-0900. */
4526
4527 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4529}
4530
4531static void
4532terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4533{
4534 if (th != current_th) {
4535 thread_cleanup_func_before_exec(th);
4536 }
4537}
4538
4539void
4541{
4542 rb_thread_t *th = GET_THREAD();
4543 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4544}
4545#else
4546void
4548{
4549}
4550
4551void
4553{
4554}
4555#endif
4556
4557struct thgroup {
4560};
4561
4562static size_t
4563thgroup_memsize(const void *ptr)
4564{
4565 return sizeof(struct thgroup);
4566}
4567
4568static const rb_data_type_t thgroup_data_type = {
4569 "thgroup",
4570 {NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4572};
4573
4574/*
4575 * Document-class: ThreadGroup
4576 *
4577 * ThreadGroup provides a means of keeping track of a number of threads as a
4578 * group.
4579 *
4580 * A given Thread object can only belong to one ThreadGroup at a time; adding
4581 * a thread to a new group will remove it from any previous group.
4582 *
4583 * Newly created threads belong to the same group as the thread from which they
4584 * were created.
4585 */
4586
4587/*
4588 * Document-const: Default
4589 *
4590 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4591 * by default.
4592 */
4593static VALUE
4594thgroup_s_alloc(VALUE klass)
4595{
4596 VALUE group;
4597 struct thgroup *data;
4598
4599 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4600 data->enclosed = 0;
4601 data->group = group;
4602
4603 return group;
4604}
4605
4606/*
4607 * call-seq:
4608 * thgrp.list -> array
4609 *
4610 * Returns an array of all existing Thread objects that belong to this group.
4611 *
4612 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4613 */
4614
4615static VALUE
4616thgroup_list(VALUE group)
4617{
4618 VALUE ary = rb_ary_new();
4619 rb_vm_t *vm = GET_THREAD()->vm;
4620 rb_thread_t *th = 0;
4621
4622 list_for_each(&vm->living_threads, th, vmlt_node) {
4623 if (th->thgroup == group) {
4624 rb_ary_push(ary, th->self);
4625 }
4626 }
4627 return ary;
4628}
4629
4630
4631/*
4632 * call-seq:
4633 * thgrp.enclose -> thgrp
4634 *
4635 * Prevents threads from being added to or removed from the receiving
4636 * ThreadGroup.
4637 *
4638 * New threads can still be started in an enclosed ThreadGroup.
4639 *
4640 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4641 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4642 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4643 * tg.add thr
4644 * #=> ThreadError: can't move from the enclosed thread group
4645 */
4646
4647static VALUE
4648thgroup_enclose(VALUE group)
4649{
4650 struct thgroup *data;
4651
4652 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4653 data->enclosed = 1;
4654
4655 return group;
4656}
4657
4658
4659/*
4660 * call-seq:
4661 * thgrp.enclosed? -> true or false
4662 *
4663 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4664 */
4665
4666static VALUE
4667thgroup_enclosed_p(VALUE group)
4668{
4669 struct thgroup *data;
4670
4671 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4672 if (data->enclosed)
4673 return Qtrue;
4674 return Qfalse;
4675}
4676
4677
4678/*
4679 * call-seq:
4680 * thgrp.add(thread) -> thgrp
4681 *
4682 * Adds the given +thread+ to this group, removing it from any other
4683 * group to which it may have previously been a member.
4684 *
4685 * puts "Initial group is #{ThreadGroup::Default.list}"
4686 * tg = ThreadGroup.new
4687 * t1 = Thread.new { sleep }
4688 * t2 = Thread.new { sleep }
4689 * puts "t1 is #{t1}"
4690 * puts "t2 is #{t2}"
4691 * tg.add(t1)
4692 * puts "Initial group now #{ThreadGroup::Default.list}"
4693 * puts "tg group now #{tg.list}"
4694 *
4695 * This will produce:
4696 *
4697 * Initial group is #<Thread:0x401bdf4c>
4698 * t1 is #<Thread:0x401b3c90>
4699 * t2 is #<Thread:0x401b3c18>
4700 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4701 * tg group now #<Thread:0x401b3c90>
4702 */
4703
4704static VALUE
4705thgroup_add(VALUE group, VALUE thread)
4706{
4707 rb_thread_t *target_th = rb_thread_ptr(thread);
4708 struct thgroup *data;
4709
4710 if (OBJ_FROZEN(group)) {
4711 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4712 }
4713 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4714 if (data->enclosed) {
4715 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4716 }
4717
4718 if (!target_th->thgroup) {
4719 return Qnil;
4720 }
4721
4722 if (OBJ_FROZEN(target_th->thgroup)) {
4723 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4724 }
4725 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4726 if (data->enclosed) {
4728 "can't move from the enclosed thread group");
4729 }
4730
4731 target_th->thgroup = group;
4732 return group;
4733}
4734
4735/*
4736 * Document-class: ThreadShield
4737 */
4738static void
4739thread_shield_mark(void *ptr)
4740{
4742}
4743
4744static const rb_data_type_t thread_shield_data_type = {
4745 "thread_shield",
4746 {thread_shield_mark, 0, 0,},
4748};
4749
4750static VALUE
4751thread_shield_alloc(VALUE klass)
4752{
4753 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4754}
4755
4756#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4757#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4758#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4759#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4761static inline unsigned int
4762rb_thread_shield_waiting(VALUE b)
4763{
4765}
4766
4767static inline void
4768rb_thread_shield_waiting_inc(VALUE b)
4769{
4770 unsigned int w = rb_thread_shield_waiting(b);
4771 w++;
4773 rb_raise(rb_eRuntimeError, "waiting count overflow");
4774 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4775 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4776}
4777
4778static inline void
4779rb_thread_shield_waiting_dec(VALUE b)
4780{
4781 unsigned int w = rb_thread_shield_waiting(b);
4782 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4783 w--;
4784 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4785 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4786}
4787
4788VALUE
4790{
4791 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4792 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4793 return thread_shield;
4794}
4795
4796/*
4797 * Wait a thread shield.
4798 *
4799 * Returns
4800 * true: acquired the thread shield
4801 * false: the thread shield was destroyed and no other threads waiting
4802 * nil: the thread shield was destroyed but still in use
4803 */
4804VALUE
4806{
4807 VALUE mutex = GetThreadShieldPtr(self);
4808 rb_mutex_t *m;
4809
4810 if (!mutex) return Qfalse;
4811 m = mutex_ptr(mutex);
4812 if (m->th == GET_THREAD()) return Qnil;
4813 rb_thread_shield_waiting_inc(self);
4814 rb_mutex_lock(mutex);
4815 rb_thread_shield_waiting_dec(self);
4816 if (DATA_PTR(self)) return Qtrue;
4817 rb_mutex_unlock(mutex);
4818 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4819}
4820
4821static VALUE
4822thread_shield_get_mutex(VALUE self)
4823{
4824 VALUE mutex = GetThreadShieldPtr(self);
4825 if (!mutex)
4826 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4827 return mutex;
4828}
4829
4830/*
4831 * Release a thread shield, and return true if it has waiting threads.
4832 */
4833VALUE
4835{
4836 VALUE mutex = thread_shield_get_mutex(self);
4837 rb_mutex_unlock(mutex);
4838 return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4839}
4840
4841/*
4842 * Release and destroy a thread shield, and return true if it has waiting threads.
4843 */
4844VALUE
4846{
4847 VALUE mutex = thread_shield_get_mutex(self);
4848 DATA_PTR(self) = 0;
4849 rb_mutex_unlock(mutex);
4850 return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4851}
4852
4853static VALUE
4854threadptr_recursive_hash(rb_thread_t *th)
4855{
4856 return th->ec->local_storage_recursive_hash;
4857}
4858
4859static void
4860threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
4861{
4862 th->ec->local_storage_recursive_hash = hash;
4863}
4864
4866
4867/*
4868 * Returns the current "recursive list" used to detect recursion.
4869 * This list is a hash table, unique for the current thread and for
4870 * the current __callee__.
4871 */
4872
4873static VALUE
4874recursive_list_access(VALUE sym)
4875{
4876 rb_thread_t *th = GET_THREAD();
4877 VALUE hash = threadptr_recursive_hash(th);
4878 VALUE list;
4879 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4880 hash = rb_ident_hash_new();
4881 threadptr_recursive_hash_set(th, hash);
4882 list = Qnil;
4883 }
4884 else {
4885 list = rb_hash_aref(hash, sym);
4886 }
4887 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4889 rb_hash_aset(hash, sym, list);
4890 }
4891 return list;
4892}
4893
4894/*
4895 * Returns Qtrue iff obj (or the pair <obj, paired_obj>) is already
4896 * in the recursion list.
4897 * Assumes the recursion list is valid.
4898 */
4899
4900static VALUE
4901recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
4902{
4903#if SIZEOF_LONG == SIZEOF_VOIDP
4904 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4905#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4906 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4907 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4908#endif
4909
4910 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4911 if (pair_list == Qundef)
4912 return Qfalse;
4913 if (paired_obj_id) {
4914 if (!RB_TYPE_P(pair_list, T_HASH)) {
4915 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4916 return Qfalse;
4917 }
4918 else {
4919 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4920 return Qfalse;
4921 }
4922 }
4923 return Qtrue;
4924}
4925
4926/*
4927 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
4928 * For a single obj, it sets list[obj] to Qtrue.
4929 * For a pair, it sets list[obj] to paired_obj_id if possible,
4930 * otherwise list[obj] becomes a hash like:
4931 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4932 * Assumes the recursion list is valid.
4933 */
4934
4935static void
4936recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
4937{
4938 VALUE pair_list;
4939
4940 if (!paired_obj) {
4942 }
4943 else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4944 rb_hash_aset(list, obj, paired_obj);
4945 }
4946 else {
4947 if (!RB_TYPE_P(pair_list, T_HASH)){
4948 VALUE other_paired_obj = pair_list;
4949 pair_list = rb_hash_new();
4950 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4951 rb_hash_aset(list, obj, pair_list);
4952 }
4953 rb_hash_aset(pair_list, paired_obj, Qtrue);
4954 }
4955}
4956
4957/*
4958 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
4959 * For a pair, if list[obj] is a hash, then paired_obj_id is
4960 * removed from the hash and no attempt is made to simplify
4961 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
4962 * Assumes the recursion list is valid.
4963 */
4964
4965static int
4966recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
4967{
4968 if (paired_obj) {
4969 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4970 if (pair_list == Qundef) {
4971 return 0;
4972 }
4973 if (RB_TYPE_P(pair_list, T_HASH)) {
4974 rb_hash_delete_entry(pair_list, paired_obj);
4975 if (!RHASH_EMPTY_P(pair_list)) {
4976 return 1; /* keep hash until is empty */
4977 }
4978 }
4979 }
4981 return 1;
4982}
4983
4990};
4991
4992static VALUE
4993exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
4994{
4995 struct exec_recursive_params *p = (void *)data;
4996 return (*p->func)(p->obj, p->arg, FALSE);
4997}
4998
4999/*
5000 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5001 * current method is called recursively on obj, or on the pair <obj, pairid>
5002 * If outer is 0, then the innermost func will be called with recursive set
5003 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5004 * all inner func are short-circuited by throw.
5005 * Implementation details: the value thrown is the recursive list which is
5006 * proper to the current method and unlikely to be caught anywhere else.
5007 * list[recursive_key] is used as a flag for the outermost call.
5008 */
5009
5010static VALUE
5011exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
5012{
5013 VALUE result = Qundef;
5014 const ID mid = rb_frame_last_func();
5015 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5016 struct exec_recursive_params p;
5017 int outermost;
5018 p.list = recursive_list_access(sym);
5019 p.obj = obj;
5020 p.pairid = pairid;
5021 p.arg = arg;
5022 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5023
5024 if (recursive_check(p.list, p.obj, pairid)) {
5025 if (outer && !outermost) {
5026 rb_throw_obj(p.list, p.list);
5027 }
5028 return (*func)(obj, arg, TRUE);
5029 }
5030 else {
5031 enum ruby_tag_type state;
5032
5033 p.func = func;
5034
5035 if (outermost) {
5036 recursive_push(p.list, ID2SYM(recursive_key), 0);
5037 recursive_push(p.list, p.obj, p.pairid);
5038 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5039 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5040 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5041 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5042 if (result == p.list) {
5043 result = (*func)(obj, arg, TRUE);
5044 }
5045 }
5046 else {
5047 volatile VALUE ret = Qundef;
5048 recursive_push(p.list, p.obj, p.pairid);
5050 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5051 ret = (*func)(obj, arg, FALSE);
5052 }
5053 EC_POP_TAG();
5054 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5055 invalid:
5056 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5057 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5059 }
5060 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5061 result = ret;
5062 }
5063 }
5064 *(volatile struct exec_recursive_params *)&p;
5065 return result;
5066}
5067
5068/*
5069 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5070 * current method is called recursively on obj
5071 */
5072
5073VALUE
5075{
5076 return exec_recursive(func, obj, 0, arg, 0);
5077}
5078
5079/*
5080 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5081 * current method is called recursively on the ordered pair <obj, paired_obj>
5082 */
5083
5084VALUE
5086{
5087 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0);
5088}
5089
5090/*
5091 * If recursion is detected on the current method and obj, the outermost
5092 * func will be called with (obj, arg, Qtrue). All inner func will be
5093 * short-circuited using throw.
5094 */
5095
5096VALUE
5098{
5099 return exec_recursive(func, obj, 0, arg, 1);
5100}
5101
5102/*
5103 * If recursion is detected on the current method, obj and paired_obj,
5104 * the outermost func will be called with (obj, arg, Qtrue). All inner
5105 * func will be short-circuited using throw.
5106 */
5107
5108VALUE
5110{
5111 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1);
5112}
5113
5114/*
5115 * call-seq:
5116 * thread.backtrace -> array
5117 *
5118 * Returns the current backtrace of the target thread.
5119 *
5120 */
5121
5122static VALUE
5123rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5124{
5125 return rb_vm_thread_backtrace(argc, argv, thval);
5126}
5127
5128/* call-seq:
5129 * thread.backtrace_locations(*args) -> array or nil
5130 *
5131 * Returns the execution stack for the target thread---an array containing
5132 * backtrace location objects.
5133 *
5134 * See Thread::Backtrace::Location for more information.
5135 *
5136 * This method behaves similarly to Kernel#caller_locations except it applies
5137 * to a specific thread.
5138 */
5139static VALUE
5140rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5141{
5143}
5144
5145/*
5146 * Document-class: ThreadError
5147 *
5148 * Raised when an invalid operation is attempted on a thread.
5149 *
5150 * For example, when no other thread has been started:
5151 *
5152 * Thread.stop
5153 *
5154 * This will raises the following exception:
5155 *
5156 * ThreadError: stopping only thread
5157 * note: use sleep to stop forever
5158 */
5159
5160void
5162{
5163#undef rb_intern
5164#define rb_intern(str) rb_intern_const(str)
5165
5166 VALUE cThGroup;
5167 rb_thread_t *th = GET_THREAD();
5168
5169 sym_never = ID2SYM(rb_intern("never"));
5170 sym_immediate = ID2SYM(rb_intern("immediate"));
5171 sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
5172
5173 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5174 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5175 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5176 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5177 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5178 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5179 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5180 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5181 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5182 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5183 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5184 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5185 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5186 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5187#if THREAD_DEBUG < 0
5188 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5189 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5190#endif
5191 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5192 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5193 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5194
5195 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5196 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5197 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5198 rb_define_method(rb_cThread, "value", thread_value, 0);
5200 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5204 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5205 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5206 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5207 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5208 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5209 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5210 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5212 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5213 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5214 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5215 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5216 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5217 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5218 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5219 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5220 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5221 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5222 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
5224 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5225 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5226
5227 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5228 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5229 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5230 rb_define_alias(rb_cThread, "inspect", "to_s");
5231
5233 "stream closed in another thread");
5234
5235 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5236 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5237 rb_define_method(cThGroup, "list", thgroup_list, 0);
5238 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5239 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5240 rb_define_method(cThGroup, "add", thgroup_add, 1);
5241
5242 {
5243 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
5244 rb_define_const(cThGroup, "Default", th->thgroup);
5245 }
5246
5247 recursive_key = rb_intern("__recursive_key__");
5249
5250 /* init thread core */
5251 {
5252 /* main thread setting */
5253 {
5254 /* acquire global vm lock */
5255 gvl_init(th->vm);
5256 gvl_acquire(th->vm, th);
5260
5264 }
5265 }
5266
5267 rb_thread_create_timer_thread();
5268
5269 /* suppress warnings on cygwin, mingw and mswin.*/
5270 (void)native_mutex_trylock;
5271
5272 Init_thread_sync();
5273}
5274
5275int
5277{
5278 rb_thread_t *th = ruby_thread_from_native();
5279
5280 return th != 0;
5281}
5282
5283static void
5284debug_deadlock_check(rb_vm_t *vm, VALUE msg)
5285{
5286 rb_thread_t *th = 0;
5287 VALUE sep = rb_str_new_cstr("\n ");
5288
5289 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5290 vm_living_thread_num(vm), vm->sleeper, (void *)GET_THREAD(), (void *)vm->main_thread);
5291 list_for_each(&vm->living_threads, th, vmlt_node) {
5292 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5293 "native:%"PRI_THREAD_ID" int:%u",
5294 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5295 if (th->locking_mutex) {
5296 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5297 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5298 (void *)mutex->th, rb_mutex_num_waiting(mutex));
5299 }
5300 {
5301 rb_thread_list_t *list = th->join_list;
5302 while (list) {
5303 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->th);
5304 list = list->next;
5305 }
5306 }
5307 rb_str_catf(msg, "\n ");
5308 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5309 rb_str_catf(msg, "\n");
5310 }
5311}
5312
5313static void
5314rb_check_deadlock(rb_vm_t *vm)
5315{
5316 int found = 0;
5317 rb_thread_t *th = 0;
5318
5319 if (vm_living_thread_num(vm) > vm->sleeper) return;
5320 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5321 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5322
5323 list_for_each(&vm->living_threads, th, vmlt_node) {
5325 found = 1;
5326 }
5327 else if (th->locking_mutex) {
5328 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5329
5330 if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
5331 found = 1;
5332 }
5333 }
5334 if (found)
5335 break;
5336 }
5337
5338 if (!found) {
5339 VALUE argv[2];
5340 argv[0] = rb_eFatal;
5341 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5342 debug_deadlock_check(vm, argv[1]);
5343 vm->sleeper--;
5344 rb_threadptr_raise(vm->main_thread, 2, argv);
5345 }
5346}
5347
5348static void
5349update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5350{
5351 const rb_control_frame_t *cfp = GET_EC()->cfp;
5352 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5353 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5354 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5355 if (lines) {
5356 long line = rb_sourceline() - 1;
5357 long count;
5358 VALUE num;
5359 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5360 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5362 rb_ary_push(lines, LONG2FIX(line + 1));
5363 return;
5364 }
5365 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5366 return;
5367 }
5368 num = RARRAY_AREF(lines, line);
5369 if (!FIXNUM_P(num)) return;
5370 count = FIX2LONG(num) + 1;
5371 if (POSFIXABLE(count)) {
5372 RARRAY_ASET(lines, line, LONG2FIX(count));
5373 }
5374 }
5375 }
5376}
5377
5378static void
5379update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5380{
5381 const rb_control_frame_t *cfp = GET_EC()->cfp;
5382 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5383 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5384 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5385 if (branches) {
5386 long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5388 VALUE counters = RARRAY_AREF(branches, 1);
5389 VALUE num = RARRAY_AREF(counters, idx);
5390 count = FIX2LONG(num) + 1;
5391 if (POSFIXABLE(count)) {
5392 RARRAY_ASET(counters, idx, LONG2FIX(count));
5393 }
5394 }
5395 }
5396}
5397
5398const rb_method_entry_t *
5400{
5401 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5402
5403 retry:
5404 switch (me->def->type) {
5405 case VM_METHOD_TYPE_ISEQ: {
5406 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5409 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5410 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5411 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5412 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5413 break;
5414 }
5417 if (iseq) {
5418 rb_iseq_location_t *loc;
5419 rb_iseq_check(iseq);
5421 loc = &iseq->body->location;
5422 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5423 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5424 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5425 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5426 break;
5427 }
5428 return NULL;
5429 }
5432 goto retry;
5435 if (!me) return NULL;
5436 goto retry;
5437 default:
5438 return NULL;
5439 }
5440
5441 /* found */
5442 if (RB_TYPE_P(path, T_ARRAY)) {
5443 path = rb_ary_entry(path, 1);
5444 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5445 }
5446 if (resolved_location) {
5447 resolved_location[0] = path;
5448 resolved_location[1] = beg_pos_lineno;
5449 resolved_location[2] = beg_pos_column;
5450 resolved_location[3] = end_pos_lineno;
5451 resolved_location[4] = end_pos_column;
5452 }
5453 return me;
5454}
5455
5456static void
5457update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5458{
5459 const rb_control_frame_t *cfp = GET_EC()->cfp;
5461 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5462 VALUE rcount;
5463 long count;
5464
5466 if (!me) return;
5467
5468 rcount = rb_hash_aref(me2counter, (VALUE) me);
5469 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5470 if (POSFIXABLE(count)) {
5471 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5472 }
5473}
5474
5475VALUE
5477{
5478 return GET_VM()->coverages;
5479}
5480
5481int
5483{
5484 return GET_VM()->coverage_mode;
5485}
5486
5487void
5488rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5489{
5490 GET_VM()->coverages = coverages;
5491 GET_VM()->coverage_mode = mode;
5493 if (mode & COVERAGE_TARGET_BRANCHES) {
5495 }
5496 if (mode & COVERAGE_TARGET_METHODS) {
5498 }
5499}
5500
5501/* Make coverage arrays empty so old covered files are no longer tracked. */
5502void
5504{
5507 GET_VM()->coverages = Qfalse;
5508 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5509 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5510 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5511 }
5512 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5513 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5514 }
5515}
5516
5517VALUE
5519{
5520 VALUE coverage = rb_ary_tmp_new_fill(3);
5521 VALUE lines = Qfalse, branches = Qfalse;
5522 int mode = GET_VM()->coverage_mode;
5523
5524 if (mode & COVERAGE_TARGET_LINES) {
5525 lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5526 }
5527 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5528
5529 if (mode & COVERAGE_TARGET_BRANCHES) {
5530 branches = rb_ary_tmp_new_fill(2);
5531 /* internal data structures for branch coverage:
5532 *
5533 * [[base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column,
5534 * target_type_1, target_first_lineno_1, target_first_column_1, target_last_lineno_1, target_last_column_1, target_counter_index_1,
5535 * target_type_2, target_first_lineno_2, target_first_column_2, target_last_lineno_2, target_last_column_2, target_counter_index_2, ...],
5536 * ...]
5537 *
5538 * Example: [[:case, 1, 0, 4, 3,
5539 * :when, 2, 8, 2, 9, 0,
5540 * :when, 3, 8, 3, 9, 1, ...],
5541 * ...]
5542 */
5543 RARRAY_ASET(branches, 0, rb_ary_tmp_new(0));
5544 /* branch execution counters */
5545 RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5546 }
5547 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5548
5549 return coverage;
5550}
5551
5552VALUE
5554{
5555 VALUE interrupt_mask = rb_ident_hash_new();
5556 rb_thread_t *cur_th = GET_THREAD();
5557
5558 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5559 OBJ_FREEZE_RAW(interrupt_mask);
5560 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5561
5562 return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5563}
int errno
#define sym(x)
Definition: date_core.c:3717
#define mod(x, y)
Definition: date_strftime.c:28
enum @73::@75::@76 mask
struct RIMemo * ptr
Definition: debug.c:65
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
Definition: vm_trace.c:170
@ RUBY_EVENT_HOOK_FLAG_SAFE
Definition: debug.h:98
@ RUBY_EVENT_HOOK_FLAG_RAW_ARG
Definition: debug.h:100
struct rb_encoding_entry * list
Definition: encoding.c:56
rb_encoding * rb_enc_get(VALUE obj)
Definition: encoding.c:872
int count
Definition: encoding.c:57
#define rb_enc_name(enc)
Definition: encoding.h:177
#define rb_enc_asciicompat(enc)
Definition: encoding.h:245
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:288
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
Definition: class.c:662
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1239
void rb_define_alias(VALUE, const char *, const char *)
Defines an alias of a method.
Definition: class.c:1818
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:898
VALUE rb_cThread
Definition: ruby.h:2049
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
VALUE rb_eIOError
Definition: ruby.h:2066
VALUE rb_eThreadError
Definition: eval.c:924
VALUE rb_cModule
Module class.
Definition: ruby.h:2036
int ruby_native_thread_p(void)
Definition: thread.c:5276
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eSystemExit
Definition: error.c:917
VALUE rb_eStandardError
Definition: error.c:921
VALUE rb_eTypeError
Definition: error.c:924
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Definition: error.c:2982
VALUE rb_eFatal
Definition: error.c:920
VALUE rb_make_exception(int, const VALUE *)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:851
VALUE rb_eRuntimeError
Definition: error.c:922
void rb_warn(const char *fmt,...)
Definition: error.c:315
VALUE rb_exc_new(VALUE, const char *, long)
Definition: error.c:961
VALUE rb_eArgError
Definition: error.c:925
VALUE rb_ensure(VALUE(*)(VALUE), VALUE, VALUE(*)(VALUE), VALUE)
An equivalent to ensure clause.
Definition: eval.c:1115
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:690
void rb_sys_fail(const char *mesg)
Definition: error.c:2795
VALUE rb_eSignal
Definition: error.c:919
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1895
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
Determines if mod inherits arg.
Definition: object.c:1574
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
Definition: object.c:3616
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:692
uint64_t rb_hrtime_t
Definition: hrtime.h:47
#define RB_HRTIME_PER_SEC
Definition: hrtime.h:37
#define rb_fd_init_copy(d, s)
Definition: intern.h:413
#define rb_fd_resize(n, f)
Definition: intern.h:410
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
Definition: vm_insnhelper.c:34
#define RB_WAITFD_OUT
Definition: io.h:53
#define RB_WAITFD_PRI
Definition: io.h:52
#define RB_WAITFD_IN
Definition: io.h:51
void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset)
Definition: iseq.c:1786
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
const char * name
Definition: nkf.c:208
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
int rb_sigwait_fd_get(const rb_thread_t *)
void rb_exit(int status)
Definition: process.c:4225
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause)
Definition: eval.c:639
#define RARRAY_LEN(a)
#define ruby_debug
__uint32_t uint32_t
void rb_iseq_remove_coverage_all(void)
Definition: iseq.c:1110
@ THREAD_STOPPED_FOREVER
#define alloca(size)
#define rb_str_new2
VALUE rb_hash_lookup(VALUE, VALUE)
Definition: hash.c:2063
#define MEMCPY(p1, p2, type, n)
void rb_hash_foreach(VALUE, int(*)(VALUE, VALUE, VALUE), VALUE)
#define list_del(n)
__int8_t int8_t
#define NULL
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2685
#define RBASIC_CLEAR_CLASS(obj)
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *)
Definition: vm_eval.c:237
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
#define _(args)
#define RUBY_VM_SET_INTERRUPT(ec)
int clock_gettime(clockid_t clock_id, struct timespec *tp)
Definition: win32.c:4642
#define RTEST(v)
#define RUBY_VM_INTERRUPTED_ANY(ec)
#define ALLOCA_N(type, n)
#define PRIdVALUE
#define TAG_NONE
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
Definition: eval_error.c:346
#define RUBY_VM_INTERRUPTED(ec)
#define COMPILER_WARNING_PUSH
#define RCLASS_SUPER(c)
unsigned long st_data_t
#define RBASIC(obj)
int rb_empty_keyword_given_p(void)
Definition: eval.c:919
#define RUBY_VM_SET_TIMER_INTERRUPT(ec)
void rb_fd_clr(int, rb_fdset_t *)
#define T_STRING
#define FD_ZERO(p)
VALUE rb_hash_aref(VALUE, VALUE)
Definition: hash.c:2037
VALUE rb_memory_id(VALUE)
Definition: gc.c:3753
#define RARRAY_LENINT(ary)
#define RUBY_INTERNAL_EVENT_SWITCH
rb_control_frame_t * cfp
#define PRIuSIZE
#define xfree
time_t time(time_t *_timer)
VALUE rb_ary_tmp_new_fill(long capa)
Definition: array.c:776
#define LONG2FIX(i)
#define Qundef
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:3065
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1070
void rb_write_error_str(VALUE mesg)
Definition: io.c:7940
const VALUE VALUE obj
VALUE rb_ary_pop(VALUE)
Definition: array.c:1241
void * rb_xrealloc_mul_add(const void *, size_t, size_t, size_t)
Definition: gc.c:10196
#define UINT2NUM(x)
#define COVERAGE_TARGET_METHODS
#define st_is_member(table, key)
#define xrealloc
char * strerror(int)
Definition: strerror.c:11
#define RUBY_UBF_IO
#define rb_vm_register_special_exception(sp, e, m)
#define GET_EC()
VALUE rb_ident_hash_new(void)
Definition: hash.c:4278
VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n)
Definition: vm_backtrace.c:714
#define EINTR
#define NIL_P(v)
void rb_fd_init(rb_fdset_t *)
void rb_reset_random_seed(void)
Definition: random.c:1502
const rb_callable_method_entry_t * me
#define EWOULDBLOCK
const rb_iseq_t * rb_proc_get_iseq(VALUE proc, int *is_proc)
Definition: proc.c:1194
#define VM_ASSERT(expr)
#define ID2SYM(x)
#define EC_EXEC_TAG()
#define COMPILER_WARNING_POP
#define RUBY_TYPED_DEFAULT_FREE
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
const char size_t n
#define MEMZERO(p, type, n)
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
@ ruby_error_stream_closed
#define howmany(x, y)
#define NUM2TIMET(v)
int ruby_snprintf(char *str, size_t n, char const *fmt,...) __attribute__((format(printf
unsigned long VALUE
VALUE rb_ary_push(VALUE, VALUE)
Definition: array.c:1195
#define stderr
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:262
VALUE rb_ary_join(VALUE, VALUE)
Definition: array.c:2347
#define EC_PUSH_TAG(ec)
@ POSTPONED_JOB_INTERRUPT_MASK
@ PENDING_INTERRUPT_MASK
__inline__ const void *__restrict__ src
#define EC_JUMP_TAG(ec, st)
size_t strlcpy(char *, const char *, size_t)
Definition: strlcpy.c:29
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE)
Definition: hash.c:2050
#define RARRAY_ASET(a, i, v)
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define EXIT_SUCCESS
#define STACK_DIR_UPPER(a, b)
#define RUBY_EVENT_COVERAGE_LINE
#define xmalloc
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define GET_VM()
uint32_t i
#define CLOCK_MONOTONIC
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
#define char
#define BUFSIZ
#define OBJ_FROZEN(x)
#define EXIT_FAILURE
const VALUE int int int int int int VALUE char * fmt
__uint64_t uint64_t
#define TAG_FATAL
VALUE rb_class_path(VALUE)
Definition: variable.c:153
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:1086
VALUE rb_block_proc(void)
Definition: proc.c:837
#define INT2NUM(x)
#define RUBY_EVENT_THREAD_BEGIN
__fd_mask fd_mask
#define RUBY_VM_CHECK_INTS(ec)
void rb_fd_copy(rb_fdset_t *, const fd_set *, int)
void mjit_child_after_fork(void)
unsigned int rb_atomic_t
#define va_end(v)
#define T_ICLASS
#define T_HASH
int rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1082
#define THROW_DATA_P(err)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2891
#define COVERAGE_TARGET_BRANCHES
#define FD_SETSIZE
__gnuc_va_list va_list
#define NUM2INT(x)
void rb_define_singleton_method(VALUE, const char *, VALUE(*)(), int)
@ VM_METHOD_TYPE_REFINED
@ VM_METHOD_TYPE_BMETHOD
@ VM_METHOD_TYPE_ALIAS
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
#define RUBY_TYPED_FREE_IMMEDIATELY
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define GET_THREAD()
#define PRIsVALUE
VALUE rb_to_hash_type(VALUE obj)
Definition: hash.c:1845
const char ruby_digitmap[]
Definition: bignum.c:38
void * memset(void *, int, size_t)
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:960
void rb_unblock_function_t(void *)
#define RCLASS_ORIGIN(c)
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2758
VALUE rb_ary_clear(VALUE)
Definition: array.c:3862
VALUE rb_ary_tmp_new(long)
Definition: array.c:768
#define FIX2INT(x)
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
int VALUE v
VALUE rb_ary_new(void)
Definition: array.c:723
#define list_empty(h)
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2326
#define list_for_each_safe(h, i, nxt, member)
#define RB_PASS_EMPTY_KEYWORDS
#define short
#define RB_PASS_CALLED_KEYWORDS
#define EC_POP_TAG()
#define HAVE_VA_ARGS_MACRO
#define FD_ISSET(n, p)
#define rb_str_cat_cstr(str, ptr)
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define va_start(v, l)
VALUE ID VALUE old
#define PRIxVALUE
#define TypedData_Wrap_Struct(klass, data_type, sval)
const rb_iseq_t * iseq
VALUE rb_str_catf(VALUE, const char *,...) __attribute__((format(printf
struct timeval rb_time_timeval(VALUE time)
Definition: time.c:2689
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread_sync.c:403
void rb_fd_set(int, rb_fdset_t *)
int rb_fd_isset(int, const rb_fdset_t *)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1478
#define RARRAY_CONST_PTR_TRANSIENT(a)
#define rb_fd_max(f)
#define rb_key_err_raise(mesg, recv, name)
#define COVERAGE_INDEX_BRANCHES
#define TRUE
#define FALSE
#define COVERAGE_TARGET_ONESHOT_LINES
unsigned int size
#define Qtrue
#define POSFIXABLE(f)
struct rb_call_cache buf
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
Definition: vm.c:1249
#define ACCESS_ONCE(type, x)
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:2326
VALUE rb_str_new_frozen(VALUE)
Definition: string.c:1203
VALUE rb_ary_dup(VALUE)
Definition: array.c:2238
void exit(int __status) __attribute__((__noreturn__))
#define Qnil
#define Qfalse
#define DATA_PTR(dta)
#define T_ARRAY
void * memcpy(void *__restrict__, const void *__restrict__, size_t)
#define OBJ_FREEZE_RAW(x)
#define T_OBJECT
int rb_get_next_signal(void)
Definition: signal.c:756
#define list_for_each(h, i, member)
int int vsnprintf(char *__restrict__, size_t, const char *__restrict__, __gnuc_va_list) __attribute__((__format__(__printf__
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1256
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:919
#define RB_TYPE_P(obj, type)
#define EBADF
#define INT2FIX(i)
void rb_thread_wakeup_timer_thread(int)
#define RUBY_EVENT_COVERAGE_BRANCH
VALUE rb_to_symbol(VALUE name)
Definition: string.c:11156
#define ALLOC(type)
void rb_gc_force_recycle(VALUE)
Definition: gc.c:7027
#define UINT_MAX
void rb_fd_term(rb_fdset_t *)
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define MJIT_FUNC_EXPORTED
const VALUE * argv
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define VM_BLOCK_HANDLER_NONE
#define TIMET_MAX_PLUS_ONE
_ssize_t ssize_t
uint32_t rb_event_flag_t
__inline__ int
VALUE rb_ary_shift(VALUE)
Definition: array.c:1294
VALUE rb_ivar_set(VALUE, ID, VALUE)
Definition: variable.c:1300
VALUE rb_ary_delete_at(VALUE, long)
Definition: array.c:3419
#define FIXNUM_P(f)
#define CLASS_OF(v)
void rb_fiber_close(rb_fiber_t *fib)
Definition: cont.c:2071
#define COVERAGE_TARGET_LINES
if((__builtin_expect(!!(!me), 0)))
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
Definition: hash.c:2852
#define RUBY_UBF_PROCESS
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:966
#define COVERAGE_INDEX_LINES
rb_control_frame_t const VALUE * pc
#define rb_check_arity
#define FD_SET(n, p)
#define GetProcPtr(obj, ptr)
void rb_obj_call_init_kw(VALUE, int, const VALUE *, int)
Definition: eval.c:1688
VALUE rb_blocking_function_t(void *)
#define RUBY_EVENT_CALL
#define SAVE_ROOT_JMPBUF(th, stmt)
VALUE rb_sprintf(const char *,...) __attribute__((format(printf
#define STACK_GROW_DIR_DETECTION
#define RBASIC_CLASS(obj)
#define NFDBITS
int select(int __n, fd_set *__readfds, fd_set *__writefds, fd_set *__exceptfds, struct timeval *__timeout)
unsigned long ID
int rb_fd_select(int, rb_fdset_t *, rb_fdset_t *, rb_fdset_t *, struct timeval *)
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
#define RHASH_EMPTY_P(h)
int rb_keyword_given_p(void)
Definition: eval.c:911
int rb_signal_buff_size(void)
Definition: signal.c:726
#define list_add(h, n)
#define RB_FL_SET_RAW(x, f)
VALUE ID id
#define FIX2LONG(x)
const rb_iseq_t const VALUE exc
#define ISEQ_PC2BRANCHINDEX(iseq)
#define ATOMIC_CAS(var, oldval, newval)
#define RUBY_EVENT_THREAD_END
#define rb_fd_ptr(f)
void rb_define_method(VALUE, const char *, VALUE(*)(), int)
#define COMPILER_WARNING_IGNORED(flag)
#define RARRAY_AREF(a, i)
void rb_fd_zero(rb_fdset_t *)
#define BUILTIN_TYPE(x)
#define RUBY_ASSERT_ALWAYS(expr)
#define EAGAIN
VALUE rb_hash_new(void)
Definition: hash.c:1523
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
#define rb_str_new_cstr(str)
#define RARRAY_CONST_PTR(a)
int rb_sourceline(void)
Definition: vm.c:1346
void rb_timespec_now(struct timespec *)
Definition: time.c:1873
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1662
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread_sync.c:333
_ssize_t write(int __fd, const void *__buf, size_t __nbyte)
#define FD_CLR(n, p)
VALUE rb_ary_entry(VALUE, long)
Definition: array.c:1512
_ssize_t read(int __fd, void *__buf, size_t __nbyte)
#define StringValueCStr(v)
ID rb_to_id(VALUE)
Definition: string.c:11146
#define LIKELY(x)
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
Definition: st.c:1418
st_table * st_init_numtable(void)
Definition: st.c:653
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1717
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4985
rb_thread_t * target
Definition: thread.c:972
rb_hrtime_t * limit
Definition: thread.c:973
rb_thread_t * waiting
Definition: thread.c:972
enum rb_thread_status prev_status
Definition: thread.c:153
struct rb_method_definition_struct *const def
struct rb_execution_context_struct::@55 machine
struct rb_iseq_constant_body * body
struct rb_method_entry_struct * original_me
union rb_method_definition_struct::@41 body
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
struct rb_method_entry_struct * orig_me
struct list_head waitq
Definition: thread_sync.c:47
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:46
rb_thread_t * th
Definition: thread_sync.c:45
struct rb_thread_list_struct * next
struct rb_thread_struct * th
rb_execution_context_t * ec
struct rb_unblock_callback unblock
union rb_thread_struct::@56 invoke_arg
unsigned int pending_interrupt_queue_checked
enum rb_thread_status status
rb_nativethread_id_t thread_id
enum rb_thread_struct::@57 invoke_type
rb_nativethread_lock_t interrupt_lock
struct rb_mutex_struct * keeping_mutexes
rb_thread_list_t * join_list
rb_unblock_function_t * func
rb_nativethread_lock_t waitpid_lock
struct rb_thread_struct * main_thread
struct list_head waiting_fds
const VALUE special_exceptions[ruby_special_error_count]
struct rb_vm_struct::@52 default_params
rb_nativethread_lock_t workqueue_lock
unsigned int thread_abort_on_exception
struct list_head living_threads
rb_fdset_t * except
Definition: thread.c:4234
rb_fdset_t * write
Definition: thread.c:4233
VALUE write
Definition: io.c:9212
int error
Definition: thread.c:4230
VALUE read
Definition: io.c:9212
union select_args::@224 as
struct waiting_fd wfd
Definition: thread.c:4235
rb_fdset_t * read
Definition: thread.c:4232
VALUE except
Definition: io.c:9212
struct timeval * tv
Definition: thread.c:4236
int sigwait_fd
Definition: thread.c:3924
rb_fdset_t orig_rset
Definition: thread.c:3929
rb_fdset_t orig_wset
Definition: thread.c:3930
rb_fdset_t * eset
Definition: thread.c:3928
rb_thread_t * th
Definition: thread.c:3925
rb_fdset_t orig_eset
Definition: thread.c:3931
rb_fdset_t * rset
Definition: thread.c:3926
int max
Definition: thread.c:3923
rb_fdset_t * wset
Definition: thread.c:3927
struct timeval * timeout
Definition: thread.c:3932
int enclosed
Definition: thread.c:4558
VALUE group
Definition: thread.c:4559
int fd
Definition: thread.c:139
struct list_node wfd_node
Definition: thread.c:137
rb_thread_t * th
Definition: thread.c:138
int rb_ec_set_raised(rb_execution_context_t *ec)
Definition: thread.c:2343
int rb_thread_check_trap_pending(void)
Definition: thread.c:1371
MJIT_FUNC_EXPORTED int rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:2193
VALUE rb_get_coverages(void)
Definition: thread.c:5476
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1378
#define threadptr_initialized(th)
Definition: thread.c:872
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:421
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4317
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:642
#define PRI_THREAD_ID
Definition: thread.c:337
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1704
const rb_method_entry_t * rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
Definition: thread.c:5399
int rb_thread_fd_writable(int fd)
Definition: thread.c:4047
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2965
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:440
#define THREAD_SHIELD_WAITING_MAX
Definition: thread.c:4759
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:3215
VALUE rb_default_coverage(int n)
Definition: thread.c:5518
VALUE rb_thread_create(VALUE(*fn)(void *), void *arg)
Definition: thread.c:965
#define THREAD_LOCAL_STORAGE_INITIALISED
Definition: thread.c:106
void rb_clear_coverages(void)
Definition: thread.c:4467
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2445
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd))
#define GetThreadShieldPtr(obj)
Definition: thread.c:4756
int rb_thread_to_be_killed(VALUE thread)
Definition: thread.c:2471
VALUE rb_thread_main(void)
Definition: thread.c:2696
void rb_thread_sleep_forever(void)
Definition: thread.c:1313
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:4066
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:87
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Definition: thread.c:1451
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4757
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:4275
void rb_thread_fd_close(int fd)
Definition: thread.c:2385
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:188
void rb_sigwait_fd_migrate(rb_vm_t *)
Definition: process.c:1026
VALUE rb_thread_shield_new(void)
Definition: thread.c:4789
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1661
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1756
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1346
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1750
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4845
SLEEP_FLAGS
Definition: thread.c:101
@ SLEEP_DEADLOCKABLE
Definition: thread.c:102
@ SLEEP_SPURIOUS_CHECK
Definition: thread.c:103
VALUE rb_thread_stop(void)
Definition: thread.c:2595
void rb_thread_wait_fd(int fd)
Definition: thread.c:4041
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1587
VALUE rb_uninterruptible(VALUE(*b_proc)(VALUE), VALUE data)
Definition: thread.c:5553
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5097
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2323
void rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
Definition: thread.c:5488
#define USE_EVENTFD
Definition: thread.c:383
#define thread_id_str(th)
Definition: thread.c:336
#define do_select_update()
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5109
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:163
void rb_thread_terminate_all(void)
Definition: thread.c:554
#define eKillSignal
Definition: thread.c:132
STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX<=UINT_MAX)
void rb_thread_atfork_before_exec(void)
Definition: thread.c:4552
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4805
void rb_thread_sleep_interruptible(void)
Definition: thread.c:1327
void rb_thread_check_ints(void)
Definition: thread.c:1361
#define rb_intern(str)
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1580
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4424
#define OBJ_ID_EQL(obj_id, other)
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2586
#define thread_debug
Definition: thread.c:330
int rb_notify_fd_close(int fd, struct list_head *busy)
Definition: thread.c:2363
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2539
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:537
#define eTerminateSignal
Definition: thread.c:133
rb_hrtime_t rb_hrtime_now(void)
Definition: thread.c:1228
void ruby_sigchld_handler(rb_vm_t *)
Definition: signal.c:1073
void rb_thread_sleep_deadly(void)
Definition: thread.c:1320
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4416
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4834
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2333
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:4305
void rb_thread_atfork(void)
Definition: thread.c:4547
#define fd_init_copy(f)
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:5074
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:446
VALUE rb_thread_current(void)
Definition: thread.c:2675
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:505
#define PRIu64
Definition: thread.c:1271
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:5085
int rb_thread_alone(void)
Definition: thread.c:3491
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
Definition: vm_trace.c:274
int rb_ec_reset_raised(rb_execution_context_t *ec)
Definition: thread.c:2353
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:648
void Init_Thread(void)
Definition: thread.c:5161
void rb_thread_schedule(void)
Definition: thread.c:1407
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4758
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:88
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:428
int rb_get_coverage_mode(void)
Definition: thread.c:5482
#define BUSY_WAIT_SIGNALS
Definition: thread.c:379
handle_interrupt_timing
Definition: thread.c:1770
@ INTERRUPT_NONE
Definition: thread.c:1771
@ INTERRUPT_ON_BLOCKING
Definition: thread.c:1773
@ INTERRUPT_NEVER
Definition: thread.c:1774
@ INTERRUPT_IMMEDIATE
Definition: thread.c:1772
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:3363
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:1912
#define THREAD_LOCAL_STORAGE_INITIALISED_P(th)
Definition: thread.c:107
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2283
#define RUBY_VM_CHECK_INTS_BLOCKING(ec)
Definition: thread.c:202
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start))
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1594
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2548
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:434
void rb_reset_coverages(void)
Definition: thread.c:5503
void rb_thread_sleep(int sec)
Definition: thread.c:1384
void rb_thread_start_timer_thread(void)
Definition: thread.c:4430
VALUE rb_thread_list(void)
Definition: thread.c:2629
#define RB_NOGVL_UBF_ASYNC_SAFE
Definition: thread.h:26
#define RB_NOGVL_INTR_FAIL
Definition: thread.h:25
MJIT_STATIC const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:2195
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:1108