00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00013
00014 #include "gc.h"
00015
00016 #ifdef HAVE_SYS_RESOURCE_H
00017 #include <sys/resource.h>
00018 #endif
00019
00020 static void native_mutex_lock(pthread_mutex_t *lock);
00021 static void native_mutex_unlock(pthread_mutex_t *lock);
00022 static int native_mutex_trylock(pthread_mutex_t *lock);
00023 static void native_mutex_initialize(pthread_mutex_t *lock);
00024 static void native_mutex_destroy(pthread_mutex_t *lock);
00025
00026 static void native_cond_signal(pthread_cond_t *cond);
00027 static void native_cond_broadcast(pthread_cond_t *cond);
00028 static void native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
00029 static void native_cond_initialize(pthread_cond_t *cond);
00030 static void native_cond_destroy(pthread_cond_t *cond);
00031
00032 static void
00033 native_mutex_lock(pthread_mutex_t *lock)
00034 {
00035 int r;
00036 if ((r = pthread_mutex_lock(lock)) != 0) {
00037 rb_bug_errno("pthread_mutex_lock", r);
00038 }
00039 }
00040
00041 static void
00042 native_mutex_unlock(pthread_mutex_t *lock)
00043 {
00044 int r;
00045 if ((r = pthread_mutex_unlock(lock)) != 0) {
00046 rb_bug_errno("pthread_mutex_unlock", r);
00047 }
00048 }
00049
00050 static inline int
00051 native_mutex_trylock(pthread_mutex_t *lock)
00052 {
00053 int r;
00054 if ((r = pthread_mutex_trylock(lock)) != 0) {
00055 if (r == EBUSY) {
00056 return EBUSY;
00057 }
00058 else {
00059 rb_bug_errno("pthread_mutex_trylock", r);
00060 }
00061 }
00062 return 0;
00063 }
00064
00065 static void
00066 native_mutex_initialize(pthread_mutex_t *lock)
00067 {
00068 int r = pthread_mutex_init(lock, 0);
00069 if (r != 0) {
00070 rb_bug_errno("pthread_mutex_init", r);
00071 }
00072 }
00073
00074 #define native_mutex_reinitialize_atfork(lock) (\
00075 native_mutex_unlock(lock), \
00076 native_mutex_initialize(lock), \
00077 native_mutex_lock(lock))
00078
00079 static void
00080 native_mutex_destroy(pthread_mutex_t *lock)
00081 {
00082 int r = pthread_mutex_destroy(lock);
00083 if (r != 0) {
00084 rb_bug_errno("pthread_mutex_destroy", r);
00085 }
00086 }
00087
00088 static void
00089 native_cond_initialize(pthread_cond_t *cond)
00090 {
00091 int r = pthread_cond_init(cond, 0);
00092 if (r != 0) {
00093 rb_bug_errno("pthread_cond_init", r);
00094 }
00095 }
00096
00097 static void
00098 native_cond_destroy(pthread_cond_t *cond)
00099 {
00100 int r = pthread_cond_destroy(cond);
00101 if (r != 0) {
00102 rb_bug_errno("pthread_cond_destroy", r);
00103 }
00104 }
00105
00106 static void
00107 native_cond_signal(pthread_cond_t *cond)
00108 {
00109 int r = pthread_cond_signal(cond);
00110 if (r != 0) {
00111 rb_bug_errno("pthread_cond_signal", r);
00112 }
00113 }
00114
00115 static void
00116 native_cond_broadcast(pthread_cond_t *cond)
00117 {
00118 int r = pthread_cond_broadcast(cond);
00119 if (r != 0) {
00120 rb_bug_errno("native_cond_broadcast", r);
00121 }
00122 }
00123
00124 static void
00125 native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
00126 {
00127 int r = pthread_cond_wait(cond, mutex);
00128 if (r != 0) {
00129 rb_bug_errno("pthread_cond_wait", r);
00130 }
00131 }
00132
00133 static int
00134 native_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
00135 {
00136 int r = pthread_cond_timedwait(cond, mutex, ts);
00137 if (r != 0 && r != ETIMEDOUT && r != EINTR ) {
00138 rb_bug_errno("pthread_cond_timedwait", r);
00139 }
00140 return r;
00141 }
00142
00143
00144 #define native_cleanup_push pthread_cleanup_push
00145 #define native_cleanup_pop pthread_cleanup_pop
00146 #ifdef HAVE_SCHED_YIELD
00147 #define native_thread_yield() (void)sched_yield()
00148 #else
00149 #define native_thread_yield() ((void)0)
00150 #endif
00151
00152 #ifndef __CYGWIN__
00153 static void add_signal_thread_list(rb_thread_t *th);
00154 #endif
00155 static void remove_signal_thread_list(rb_thread_t *th);
00156
00157 static rb_thread_lock_t signal_thread_list_lock;
00158
00159 static pthread_key_t ruby_native_thread_key;
00160
00161 static void
00162 null_func(int i)
00163 {
00164
00165 }
00166
00167 static rb_thread_t *
00168 ruby_thread_from_native(void)
00169 {
00170 return pthread_getspecific(ruby_native_thread_key);
00171 }
00172
00173 static int
00174 ruby_thread_set_native(rb_thread_t *th)
00175 {
00176 return pthread_setspecific(ruby_native_thread_key, th) == 0;
00177 }
00178
00179 void
00180 Init_native_thread(void)
00181 {
00182 rb_thread_t *th = GET_THREAD();
00183
00184 pthread_key_create(&ruby_native_thread_key, NULL);
00185 th->thread_id = pthread_self();
00186 native_cond_initialize(&th->native_thread_data.sleep_cond);
00187 ruby_thread_set_native(th);
00188 native_mutex_initialize(&signal_thread_list_lock);
00189 posix_signal(SIGVTALRM, null_func);
00190 }
00191
00192 static void
00193 native_thread_destroy(rb_thread_t *th)
00194 {
00195 pthread_mutex_destroy(&th->interrupt_lock);
00196 pthread_cond_destroy(&th->native_thread_data.sleep_cond);
00197 }
00198
00199 #define USE_THREAD_CACHE 0
00200
00201 #if STACK_GROW_DIRECTION
00202 #define STACK_GROW_DIR_DETECTION
00203 #define STACK_DIR_UPPER(a,b) STACK_UPPER(0, a, b)
00204 #else
00205 #define STACK_GROW_DIR_DETECTION VALUE stack_grow_dir_detection
00206 #define STACK_DIR_UPPER(a,b) STACK_UPPER(&stack_grow_dir_detection, a, b)
00207 #endif
00208
00209 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00210 #define STACKADDR_AVAILABLE 1
00211 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00212 #define STACKADDR_AVAILABLE 1
00213 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00214 #define STACKADDR_AVAILABLE 1
00215 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00216 #define STACKADDR_AVAILABLE 1
00217 #endif
00218
00219 #ifdef STACKADDR_AVAILABLE
00220 static int
00221 get_stack(void **addr, size_t *size)
00222 {
00223 #define CHECK_ERR(expr) \
00224 {int err = (expr); if (err) return err;}
00225 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00226 pthread_attr_t attr;
00227 size_t guard = 0;
00228
00229 # ifdef HAVE_PTHREAD_GETATTR_NP
00230 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
00231 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00232 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00233 # else
00234 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00235 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00236 # endif
00237 if (pthread_attr_getguardsize(&attr, &guard) == 0) {
00238 STACK_GROW_DIR_DETECTION;
00239 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + guard));
00240 *size -= guard;
00241 }
00242 # else
00243 CHECK_ERR(pthread_attr_init(&attr));
00244 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
00245 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00246 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00247 # endif
00248 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
00249 *size -= guard;
00250 pthread_attr_destroy(&attr);
00251 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00252 pthread_t th = pthread_self();
00253 *addr = pthread_get_stackaddr_np(th);
00254 *size = pthread_get_stacksize_np(th);
00255 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00256 stack_t stk;
00257 # if defined HAVE_THR_STKSEGMENT
00258 CHECK_ERR(thr_stksegment(&stk));
00259 # else
00260 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
00261 # endif
00262 *addr = stk.ss_sp;
00263 *size = stk.ss_size;
00264 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00265 pthread_t th = pthread_self();
00266 struct __pthrdsinfo thinfo;
00267 char reg[256];
00268 int regsiz=sizeof(reg);
00269 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
00270 &thinfo, sizeof(thinfo),
00271 ®, ®siz));
00272 *addr = thinfo.__pi_stackaddr;
00273 *size = thinfo.__pi_stacksize;
00274 #endif
00275 return 0;
00276 #undef CHECK_ERR
00277 }
00278 #endif
00279
00280 static struct {
00281 rb_thread_id_t id;
00282 size_t stack_maxsize;
00283 VALUE *stack_start;
00284 #ifdef __ia64
00285 VALUE *register_stack_start;
00286 #endif
00287 } native_main_thread;
00288
00289 #ifdef STACK_END_ADDRESS
00290 extern void *STACK_END_ADDRESS;
00291 #endif
00292
00293 #undef ruby_init_stack
00294 void
00295 ruby_init_stack(volatile VALUE *addr
00296 #ifdef __ia64
00297 , void *bsp
00298 #endif
00299 )
00300 {
00301 native_main_thread.id = pthread_self();
00302 #ifdef STACK_END_ADDRESS
00303 native_main_thread.stack_start = STACK_END_ADDRESS;
00304 #else
00305 if (!native_main_thread.stack_start ||
00306 STACK_UPPER((VALUE *)(void *)&addr,
00307 native_main_thread.stack_start > addr,
00308 native_main_thread.stack_start < addr)) {
00309 native_main_thread.stack_start = (VALUE *)addr;
00310 }
00311 #endif
00312 #ifdef __ia64
00313 if (!native_main_thread.register_stack_start ||
00314 (VALUE*)bsp < native_main_thread.register_stack_start) {
00315 native_main_thread.register_stack_start = (VALUE*)bsp;
00316 }
00317 #endif
00318 {
00319 size_t size = 0;
00320 size_t space = 0;
00321 #if defined(HAVE_PTHREAD_ATTR_GET_NP)
00322 void* addr;
00323 get_stack(&addr, &size);
00324 #elif defined(HAVE_GETRLIMIT)
00325 struct rlimit rlim;
00326 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
00327 size = (size_t)rlim.rlim_cur;
00328 }
00329 #endif
00330 space = size > 5 * 1024 * 1024 ? 1024 * 1024 : size / 5;
00331 native_main_thread.stack_maxsize = size - space;
00332 }
00333 }
00334
00335 #define CHECK_ERR(expr) \
00336 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
00337
00338 static int
00339 native_thread_init_stack(rb_thread_t *th)
00340 {
00341 rb_thread_id_t curr = pthread_self();
00342
00343 if (pthread_equal(curr, native_main_thread.id)) {
00344 th->machine_stack_start = native_main_thread.stack_start;
00345 th->machine_stack_maxsize = native_main_thread.stack_maxsize;
00346 }
00347 else {
00348 #ifdef STACKADDR_AVAILABLE
00349 void *start;
00350 size_t size;
00351
00352 if (get_stack(&start, &size) == 0) {
00353 th->machine_stack_start = start;
00354 th->machine_stack_maxsize = size;
00355 }
00356 #else
00357 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
00358 #endif
00359 }
00360 #ifdef __ia64
00361 th->machine_register_stack_start = native_main_thread.register_stack_start;
00362 th->machine_stack_maxsize /= 2;
00363 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00364 #endif
00365 return 0;
00366 }
00367
00368 static void *
00369 thread_start_func_1(void *th_ptr)
00370 {
00371 #if USE_THREAD_CACHE
00372 thread_start:
00373 #endif
00374 {
00375 rb_thread_t *th = th_ptr;
00376 VALUE stack_start;
00377
00378 #ifndef __CYGWIN__
00379 native_thread_init_stack(th);
00380 #endif
00381
00382 thread_start_func_2(th, &stack_start, rb_ia64_bsp());
00383 }
00384 #if USE_THREAD_CACHE
00385 if (1) {
00386
00387 rb_thread_t *th;
00388 static rb_thread_t *register_cached_thread_and_wait(void);
00389 if ((th = register_cached_thread_and_wait()) != 0) {
00390 th_ptr = (void *)th;
00391 th->thread_id = pthread_self();
00392 goto thread_start;
00393 }
00394 }
00395 #endif
00396 return 0;
00397 }
00398
00399 void rb_thread_create_control_thread(void);
00400
00401 struct cached_thread_entry {
00402 volatile rb_thread_t **th_area;
00403 pthread_cond_t *cond;
00404 struct cached_thread_entry *next;
00405 };
00406
00407
00408 #if USE_THREAD_CACHE
00409 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
00410 struct cached_thread_entry *cached_thread_root;
00411
00412 static rb_thread_t *
00413 register_cached_thread_and_wait(void)
00414 {
00415 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
00416 volatile rb_thread_t *th_area = 0;
00417 struct cached_thread_entry *entry =
00418 (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
00419
00420 struct timeval tv;
00421 struct timespec ts;
00422 gettimeofday(&tv, 0);
00423 ts.tv_sec = tv.tv_sec + 60;
00424 ts.tv_nsec = tv.tv_usec * 1000;
00425
00426 pthread_mutex_lock(&thread_cache_lock);
00427 {
00428 entry->th_area = &th_area;
00429 entry->cond = &cond;
00430 entry->next = cached_thread_root;
00431 cached_thread_root = entry;
00432
00433 pthread_cond_timedwait(&cond, &thread_cache_lock, &ts);
00434
00435 {
00436 struct cached_thread_entry *e = cached_thread_root;
00437 struct cached_thread_entry *prev = cached_thread_root;
00438
00439 while (e) {
00440 if (e == entry) {
00441 if (prev == cached_thread_root) {
00442 cached_thread_root = e->next;
00443 }
00444 else {
00445 prev->next = e->next;
00446 }
00447 break;
00448 }
00449 prev = e;
00450 e = e->next;
00451 }
00452 }
00453
00454 free(entry);
00455 pthread_cond_destroy(&cond);
00456 }
00457 pthread_mutex_unlock(&thread_cache_lock);
00458
00459 return (rb_thread_t *)th_area;
00460 }
00461 #endif
00462
00463 static int
00464 use_cached_thread(rb_thread_t *th)
00465 {
00466 int result = 0;
00467 #if USE_THREAD_CACHE
00468 struct cached_thread_entry *entry;
00469
00470 if (cached_thread_root) {
00471 pthread_mutex_lock(&thread_cache_lock);
00472 entry = cached_thread_root;
00473 {
00474 if (cached_thread_root) {
00475 cached_thread_root = entry->next;
00476 *entry->th_area = th;
00477 result = 1;
00478 }
00479 }
00480 if (result) {
00481 pthread_cond_signal(entry->cond);
00482 }
00483 pthread_mutex_unlock(&thread_cache_lock);
00484 }
00485 #endif
00486 return result;
00487 }
00488
00489 enum {
00490 #ifdef __SYMBIAN32__
00491 RUBY_STACK_MIN_LIMIT = 64 * 1024,
00492 #else
00493 RUBY_STACK_MIN_LIMIT = 512 * 1024,
00494 #endif
00495 RUBY_STACK_SPACE_LIMIT = 1024 * 1024
00496 };
00497
00498 #ifdef PTHREAD_STACK_MIN
00499 #define RUBY_STACK_MIN ((RUBY_STACK_MIN_LIMIT < PTHREAD_STACK_MIN) ? \
00500 PTHREAD_STACK_MIN * 2 : RUBY_STACK_MIN_LIMIT)
00501 #else
00502 #define RUBY_STACK_MIN (RUBY_STACK_MIN_LIMIT)
00503 #endif
00504 #define RUBY_STACK_SPACE (RUBY_STACK_MIN/5 > RUBY_STACK_SPACE_LIMIT ? \
00505 RUBY_STACK_SPACE_LIMIT : RUBY_STACK_MIN/5)
00506
00507 static int
00508 native_thread_create(rb_thread_t *th)
00509 {
00510 int err = 0;
00511
00512 if (use_cached_thread(th)) {
00513 thread_debug("create (use cached thread): %p\n", (void *)th);
00514 }
00515 else {
00516 pthread_attr_t attr;
00517 const size_t stack_size = RUBY_STACK_MIN;
00518 const size_t space = RUBY_STACK_SPACE;
00519
00520 th->machine_stack_maxsize = stack_size - space;
00521 #ifdef __ia64
00522 th->machine_stack_maxsize /= 2;
00523 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00524 #endif
00525
00526 CHECK_ERR(pthread_attr_init(&attr));
00527
00528 #ifdef PTHREAD_STACK_MIN
00529 thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
00530 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
00531 #endif
00532
00533 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
00534 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
00535 #endif
00536 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
00537
00538 err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
00539 thread_debug("create: %p (%d)", (void *)th, err);
00540 CHECK_ERR(pthread_attr_destroy(&attr));
00541
00542 if (!err) {
00543 pthread_cond_init(&th->native_thread_data.sleep_cond, 0);
00544 }
00545 }
00546 return err;
00547 }
00548
00549 static void
00550 native_thread_join(pthread_t th)
00551 {
00552 int err = pthread_join(th, 0);
00553 if (err) {
00554 rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
00555 }
00556 }
00557
00558
00559 #if USE_NATIVE_THREAD_PRIORITY
00560
00561 static void
00562 native_thread_apply_priority(rb_thread_t *th)
00563 {
00564 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
00565 struct sched_param sp;
00566 int policy;
00567 int priority = 0 - th->priority;
00568 int max, min;
00569 pthread_getschedparam(th->thread_id, &policy, &sp);
00570 max = sched_get_priority_max(policy);
00571 min = sched_get_priority_min(policy);
00572
00573 if (min > priority) {
00574 priority = min;
00575 }
00576 else if (max < priority) {
00577 priority = max;
00578 }
00579
00580 sp.sched_priority = priority;
00581 pthread_setschedparam(th->thread_id, policy, &sp);
00582 #else
00583
00584 #endif
00585 }
00586
00587 #endif
00588
00589 static void
00590 ubf_pthread_cond_signal(void *ptr)
00591 {
00592 rb_thread_t *th = (rb_thread_t *)ptr;
00593 thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
00594 pthread_cond_signal(&th->native_thread_data.sleep_cond);
00595 }
00596
00597 #if !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00598 static void
00599 ubf_select_each(rb_thread_t *th)
00600 {
00601 thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
00602 if (th) {
00603 pthread_kill(th->thread_id, SIGVTALRM);
00604 }
00605 }
00606
00607 static void
00608 ubf_select(void *ptr)
00609 {
00610 rb_thread_t *th = (rb_thread_t *)ptr;
00611 add_signal_thread_list(th);
00612 ubf_select_each(th);
00613 }
00614 #else
00615 #define ubf_select 0
00616 #endif
00617
00618 #define PER_NANO 1000000000
00619
00620 static void
00621 native_sleep(rb_thread_t *th, struct timeval *tv)
00622 {
00623 struct timespec ts;
00624 struct timeval tvn;
00625
00626 if (tv) {
00627 gettimeofday(&tvn, NULL);
00628 ts.tv_sec = tvn.tv_sec + tv->tv_sec;
00629 ts.tv_nsec = (tvn.tv_usec + tv->tv_usec) * 1000;
00630 if (ts.tv_nsec >= PER_NANO){
00631 ts.tv_sec += 1;
00632 ts.tv_nsec -= PER_NANO;
00633 }
00634 }
00635
00636 thread_debug("native_sleep %ld\n", tv ? tv->tv_sec : -1);
00637 GVL_UNLOCK_BEGIN();
00638 {
00639 pthread_mutex_lock(&th->interrupt_lock);
00640 th->unblock.func = ubf_pthread_cond_signal;
00641 th->unblock.arg = th;
00642
00643 if (RUBY_VM_INTERRUPTED(th)) {
00644
00645 thread_debug("native_sleep: interrupted before sleep\n");
00646 }
00647 else {
00648 if (tv == 0 || ts.tv_sec < tvn.tv_sec ) {
00649 int r;
00650 thread_debug("native_sleep: pthread_cond_wait start\n");
00651 r = pthread_cond_wait(&th->native_thread_data.sleep_cond,
00652 &th->interrupt_lock);
00653 if (r) rb_bug_errno("pthread_cond_wait", r);
00654 thread_debug("native_sleep: pthread_cond_wait end\n");
00655 }
00656 else {
00657 int r;
00658 thread_debug("native_sleep: pthread_cond_timedwait start (%ld, %ld)\n",
00659 (unsigned long)ts.tv_sec, ts.tv_nsec);
00660 r = pthread_cond_timedwait(&th->native_thread_data.sleep_cond,
00661 &th->interrupt_lock, &ts);
00662 if (r && r != ETIMEDOUT) rb_bug_errno("pthread_cond_timedwait", r);
00663
00664 thread_debug("native_sleep: pthread_cond_timedwait end (%d)\n", r);
00665 }
00666 }
00667 th->unblock.func = 0;
00668 th->unblock.arg = 0;
00669
00670 pthread_mutex_unlock(&th->interrupt_lock);
00671 }
00672 GVL_UNLOCK_END();
00673
00674 thread_debug("native_sleep done\n");
00675 }
00676
00677 struct signal_thread_list {
00678 rb_thread_t *th;
00679 struct signal_thread_list *prev;
00680 struct signal_thread_list *next;
00681 };
00682
00683 #ifndef __CYGWIN__
00684 static struct signal_thread_list signal_thread_list_anchor = {
00685 0, 0, 0,
00686 };
00687 #endif
00688
00689 #define FGLOCK(lock, body) do { \
00690 native_mutex_lock(lock); \
00691 { \
00692 body; \
00693 } \
00694 native_mutex_unlock(lock); \
00695 } while (0)
00696
00697 #if 0
00698 static void
00699 print_signal_list(char *str)
00700 {
00701 struct signal_thread_list *list =
00702 signal_thread_list_anchor.next;
00703 thread_debug("list (%s)> ", str);
00704 while(list){
00705 thread_debug("%p (%p), ", list->th, list->th->thread_id);
00706 list = list->next;
00707 }
00708 thread_debug("\n");
00709 }
00710 #endif
00711
00712 #ifndef __CYGWIN__
00713 static void
00714 add_signal_thread_list(rb_thread_t *th)
00715 {
00716 if (!th->native_thread_data.signal_thread_list) {
00717 FGLOCK(&signal_thread_list_lock, {
00718 struct signal_thread_list *list =
00719 malloc(sizeof(struct signal_thread_list));
00720
00721 if (list == 0) {
00722 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00723 exit(1);
00724 }
00725
00726 list->th = th;
00727
00728 list->prev = &signal_thread_list_anchor;
00729 list->next = signal_thread_list_anchor.next;
00730 if (list->next) {
00731 list->next->prev = list;
00732 }
00733 signal_thread_list_anchor.next = list;
00734 th->native_thread_data.signal_thread_list = list;
00735 });
00736 }
00737 }
00738 #endif
00739
00740 static void
00741 remove_signal_thread_list(rb_thread_t *th)
00742 {
00743 if (th->native_thread_data.signal_thread_list) {
00744 FGLOCK(&signal_thread_list_lock, {
00745 struct signal_thread_list *list =
00746 (struct signal_thread_list *)
00747 th->native_thread_data.signal_thread_list;
00748
00749 list->prev->next = list->next;
00750 if (list->next) {
00751 list->next->prev = list->prev;
00752 }
00753 th->native_thread_data.signal_thread_list = 0;
00754 list->th = 0;
00755 free(list);
00756 });
00757 }
00758 else {
00759
00760 }
00761 }
00762
00763 static pthread_t timer_thread_id;
00764 static pthread_cond_t timer_thread_cond = PTHREAD_COND_INITIALIZER;
00765 static pthread_mutex_t timer_thread_lock = PTHREAD_MUTEX_INITIALIZER;
00766
00767 static struct timespec *
00768 get_ts(struct timespec *ts, unsigned long nsec)
00769 {
00770 struct timeval tv;
00771 gettimeofday(&tv, 0);
00772 ts->tv_sec = tv.tv_sec;
00773 ts->tv_nsec = tv.tv_usec * 1000 + nsec;
00774 if (ts->tv_nsec >= PER_NANO) {
00775 ts->tv_sec++;
00776 ts->tv_nsec -= PER_NANO;
00777 }
00778 return ts;
00779 }
00780
00781 static void *
00782 thread_timer(void *dummy)
00783 {
00784 struct timespec ts;
00785
00786 native_mutex_lock(&timer_thread_lock);
00787 native_cond_broadcast(&timer_thread_cond);
00788 #define WAIT_FOR_10MS() native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, get_ts(&ts, PER_NANO/100))
00789 while (system_working > 0) {
00790 int err = WAIT_FOR_10MS();
00791 if (err == ETIMEDOUT);
00792 else if (err == 0 || err == EINTR) {
00793 if (rb_signal_buff_size() == 0) break;
00794 }
00795 else rb_bug_errno("thread_timer/timedwait", err);
00796
00797 #if !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00798 if (signal_thread_list_anchor.next) {
00799 FGLOCK(&signal_thread_list_lock, {
00800 struct signal_thread_list *list;
00801 list = signal_thread_list_anchor.next;
00802 while (list) {
00803 ubf_select_each(list->th);
00804 list = list->next;
00805 }
00806 });
00807 }
00808 #endif
00809 timer_thread_function(dummy);
00810 }
00811 native_mutex_unlock(&timer_thread_lock);
00812 return NULL;
00813 }
00814
00815 static void
00816 rb_thread_create_timer_thread(void)
00817 {
00818 rb_enable_interrupt();
00819
00820 if (!timer_thread_id) {
00821 pthread_attr_t attr;
00822 int err;
00823
00824 pthread_attr_init(&attr);
00825 #ifdef PTHREAD_STACK_MIN
00826 pthread_attr_setstacksize(&attr,
00827 PTHREAD_STACK_MIN + (THREAD_DEBUG ? BUFSIZ : 0));
00828 #endif
00829 native_mutex_lock(&timer_thread_lock);
00830 err = pthread_create(&timer_thread_id, &attr, thread_timer, 0);
00831 if (err != 0) {
00832 native_mutex_unlock(&timer_thread_lock);
00833 fprintf(stderr, "[FATAL] Failed to create timer thread (errno: %d)\n", err);
00834 exit(EXIT_FAILURE);
00835 }
00836 native_cond_wait(&timer_thread_cond, &timer_thread_lock);
00837 native_mutex_unlock(&timer_thread_lock);
00838 }
00839 rb_disable_interrupt();
00840 }
00841
00842 static int
00843 native_stop_timer_thread(void)
00844 {
00845 int stopped;
00846 native_mutex_lock(&timer_thread_lock);
00847 stopped = --system_working <= 0;
00848 if (stopped) {
00849 native_cond_signal(&timer_thread_cond);
00850 }
00851 native_mutex_unlock(&timer_thread_lock);
00852 if (stopped) {
00853 native_thread_join(timer_thread_id);
00854 }
00855 return stopped;
00856 }
00857
00858 static void
00859 native_reset_timer_thread(void)
00860 {
00861 timer_thread_id = 0;
00862 }
00863
00864 #ifdef HAVE_SIGALTSTACK
00865 int
00866 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
00867 {
00868 void *base;
00869 size_t size;
00870 const size_t water_mark = 1024 * 1024;
00871 STACK_GROW_DIR_DETECTION;
00872
00873 if (th) {
00874 size = th->machine_stack_maxsize;
00875 base = (char *)th->machine_stack_start - STACK_DIR_UPPER(0, size);
00876 }
00877 #ifdef STACKADDR_AVAILABLE
00878 else if (get_stack(&base, &size) == 0) {
00879 STACK_DIR_UPPER((void)(base = (char *)base + size), (void)0);
00880 }
00881 #endif
00882 else {
00883 return 0;
00884 }
00885 size /= 5;
00886 if (size > water_mark) size = water_mark;
00887 if (STACK_DIR_UPPER(1, 0)) {
00888 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
00889 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
00890 }
00891 else {
00892 if (size > (size_t)base) size = (size_t)base;
00893 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
00894 }
00895 return 0;
00896 }
00897 #endif
00898
00899 #endif
00900