Scheduler: made scheduled signals thread specific
[runtime.git] / lib / silcutil / unix / silcunixschedule.c
index 9a4ae9a49a192c1f507055bed2d9d443773d7a6c..4fa48fbfc47528c7665efe1811536290de7725ea 100644 (file)
@@ -4,7 +4,7 @@
 
   Author: Pekka Riikonen <priikone@silcnet.org>
 
-  Copyright (C) 1998 - 2007 Pekka Riikonen
+  Copyright (C) 1998 - 2008 Pekka Riikonen
 
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
@@ -16,9 +16,8 @@
   GNU General Public License for more details.
 
 */
-/* $Id$ */
 
-#include "silc.h"
+#include "silcruntime.h"
 
 #if defined(HAVE_EPOLL_WAIT)
 #include <sys/epoll.h>
@@ -55,7 +54,6 @@ typedef struct {
 } SilcUnixSignal;
 
 #define SIGNAL_COUNT 32
-SilcUnixSignal signal_call[SIGNAL_COUNT];
 
 #if defined(HAVE_EPOLL_WAIT)
 
@@ -121,11 +119,13 @@ int silc_poll(SilcSchedule schedule, void *context)
   struct pollfd *fds = internal->fds;
   SilcUInt32 fds_count = internal->fds_count;
   int fd, ret, i = 0, timeout = -1;
+  void *fdp;
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
     if (!task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
     /* Allocate larger fd table if needed */
     if (i >= fds_count) {
@@ -172,7 +172,7 @@ int silc_poll(SilcSchedule schedule, void *context)
     if (!fds[i].revents)
       continue;
     if (!silc_hash_table_find(schedule->fd_queue, SILC_32_TO_PTR(fds[i].fd),
-                             NULL, (void **)&task))
+                             NULL, (void *)&task))
       continue;
     if (!task->header.valid || !task->events)
       continue;
@@ -198,14 +198,16 @@ int silc_select(SilcSchedule schedule, void *context)
   SilcTaskFd task;
   fd_set in, out;
   int fd, max_fd = 0, ret;
+  void *fdp;
 
   FD_ZERO(&in);
   FD_ZERO(&out);
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
     if (!task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
 #ifdef FD_SETSIZE
     if (fd >= FD_SETSIZE)
@@ -233,9 +235,10 @@ int silc_select(SilcSchedule schedule, void *context)
     return ret;
 
   silc_hash_table_list(schedule->fd_queue, &htl);
-  while (silc_hash_table_get(&htl, (void **)&fd, (void **)&task)) {
+  while (silc_hash_table_get(&htl, &fdp, (void *)&task)) {
     if (!task->header.valid || !task->events)
       continue;
+    fd = SILC_PTR_TO_32(fdp);
 
 #ifdef FD_SETSIZE
     if (fd >= FD_SETSIZE)
@@ -271,7 +274,7 @@ SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
 
   SILC_LOG_DEBUG(("Scheduling fd %lu, mask %x", task->fd, event_mask));
 
-  event.events = 0;
+  memset(&event, 0, sizeof(event));
   if (event_mask & SILC_TASK_READ)
     event.events |= (EPOLLIN | EPOLLPRI);
   if (event_mask & SILC_TASK_WRITE)
@@ -283,6 +286,7 @@ SilcBool silc_schedule_internal_schedule_fd(SilcSchedule schedule,
       SILC_LOG_DEBUG(("epoll_ctl (DEL): %s", strerror(errno)));
       return FALSE;
     }
+    task->scheduled = FALSE;
     return TRUE;
   }
 
@@ -316,23 +320,41 @@ SILC_TASK_CALLBACK(silc_schedule_wakeup_cb)
 
   SILC_LOG_DEBUG(("Wokeup"));
 
-  read(internal->wakeup_pipe[0], &c, 1);
+  (void)read(internal->wakeup_pipe[0], &c, 1);
 }
 
+SILC_TASK_CALLBACK(silc_schedule_wakeup_init)
+{
+  SilcUnixScheduler internal = schedule->internal;
+
+  internal->wakeup_task =
+    silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
+                          silc_schedule_wakeup_cb, internal,
+                          0, 0, SILC_TASK_FD);
+  if (!internal->wakeup_task) {
+    SILC_LOG_WARNING(("Could not add a wakeup task, threads won't work"));
+    close(internal->wakeup_pipe[0]);
+    return;
+  }
+  silc_schedule_internal_schedule_fd(schedule, internal,
+                                    (SilcTaskFd)internal->wakeup_task,
+                                    SILC_TASK_READ);
+}
 #endif /* SILC_THREADS */
 
 /* Initializes the platform specific scheduler.  This for example initializes
    the wakeup mechanism of the scheduler.  In multi-threaded environment
-   the scheduler needs to be wakenup when tasks are added or removed from
+   the scheduler needs to be woken up when tasks are added or removed from
    the task queues.  Returns context to the platform specific scheduler. */
 
 void *silc_schedule_internal_init(SilcSchedule schedule,
                                  void *app_context)
 {
   SilcUnixScheduler internal;
+  SilcUnixSignal *signal_call;
   int i;
 
-  internal = silc_calloc(1, sizeof(*internal));
+  internal = silc_scalloc(schedule->stack, 1, sizeof(*internal));
   if (!internal)
     return NULL;
 
@@ -372,32 +394,26 @@ void *silc_schedule_internal_init(SilcSchedule schedule,
 #ifdef SILC_THREADS
   if (pipe(internal->wakeup_pipe)) {
     SILC_LOG_ERROR(("pipe() fails: %s", strerror(errno)));
-    silc_free(internal);
     return NULL;
   }
 
-  internal->wakeup_task =
-    silc_schedule_task_add(schedule, internal->wakeup_pipe[0],
-                          silc_schedule_wakeup_cb, internal,
-                          0, 0, SILC_TASK_FD);
-  if (!internal->wakeup_task) {
-    SILC_LOG_ERROR(("Could not add a wakeup task, threads won't work"));
-    close(internal->wakeup_pipe[0]);
-    close(internal->wakeup_pipe[1]);
-    silc_free(internal);
-    return NULL;
-  }
-#endif
-  silc_schedule_internal_schedule_fd(schedule, internal,
-                                    (SilcTaskFd)internal->wakeup_task,
-                                    SILC_TASK_READ);
+  silc_schedule_task_add_timeout(schedule, silc_schedule_wakeup_init,
+                                internal, 0, 0);
+#endif /* SILC_THREADS */
 
   internal->app_context = app_context;
 
-  for (i = 0; i < SIGNAL_COUNT; i++) {
-    signal_call[i].sig = 0;
-    signal_call[i].call = FALSE;
-    signal_call[i].schedule = schedule;
+  signal_call = silc_global_get_var("srtsignals", TRUE);
+  if (!signal_call)
+    signal_call = silc_global_set_var("srtsignals",
+                                     sizeof(*signal_call) * SIGNAL_COUNT,
+                                     NULL, TRUE);
+  if (signal_call) {
+    for (i = 0; i < SIGNAL_COUNT; i++) {
+      signal_call[i].sig = 0;
+      signal_call[i].call = FALSE;
+      signal_call[i].schedule = schedule;
+    }
   }
 
   return (void *)internal;
@@ -429,7 +445,7 @@ void silc_schedule_internal_uninit(SilcSchedule schedule, void *context)
   silc_free(internal->fds);
 #endif /* HAVE_POLL && HAVE_SETRLIMIT && RLIMIT_NOFILE */
 
-  silc_free(internal);
+  silc_global_del_var("srtsignals", TRUE);
 }
 
 /* Wakes up the scheduler */
@@ -439,12 +455,12 @@ void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
 #ifdef SILC_THREADS
   SilcUnixScheduler internal = (SilcUnixScheduler)context;
 
-  if (!internal)
+  if (!internal || !internal->wakeup_task)
     return;
 
   SILC_LOG_DEBUG(("Wakeup"));
 
-  write(internal->wakeup_pipe[1], "!", 1);
+  (void)write(internal->wakeup_pipe[1], "!", 1);
 #endif
 }
 
@@ -453,6 +469,12 @@ void silc_schedule_internal_wakeup(SilcSchedule schedule, void *context)
 static void silc_schedule_internal_sighandler(int signal)
 {
   int i;
+  SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
+
+  if (!signal_call)
+    return;
+
+  SILC_LOG_DEBUG(("Start"));
 
   for (i = 0; i < SIGNAL_COUNT; i++) {
     if (signal_call[i].sig == signal) {
@@ -472,9 +494,10 @@ void silc_schedule_internal_signal_register(SilcSchedule schedule,
                                             void *callback_context)
 {
   SilcUnixScheduler internal = (SilcUnixScheduler)context;
+  SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
   int i;
 
-  if (!internal)
+  if (!internal || !signal_call)
     return;
 
   SILC_LOG_DEBUG(("Registering signal %d", sig));
@@ -486,6 +509,7 @@ void silc_schedule_internal_signal_register(SilcSchedule schedule,
       signal_call[i].sig = sig;
       signal_call[i].callback = callback;
       signal_call[i].context = callback_context;
+      signal_call[i].schedule = schedule;
       signal_call[i].call = FALSE;
       signal(sig, silc_schedule_internal_sighandler);
       break;
@@ -501,9 +525,10 @@ void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
                                              SilcUInt32 sig)
 {
   SilcUnixScheduler internal = (SilcUnixScheduler)context;
+  SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
   int i;
 
-  if (!internal)
+  if (!internal || !signal_call)
     return;
 
   SILC_LOG_DEBUG(("Unregistering signal %d", sig));
@@ -515,6 +540,7 @@ void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
       signal_call[i].sig = 0;
       signal_call[i].callback = NULL;
       signal_call[i].context = NULL;
+      signal_call[i].schedule = NULL;
       signal_call[i].call = FALSE;
       signal(sig, SIG_DFL);
     }
@@ -529,11 +555,12 @@ void silc_schedule_internal_signal_unregister(SilcSchedule schedule,
 void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
 {
   SilcUnixScheduler internal = (SilcUnixScheduler)context;
+  SilcUnixSignal *signal_call = silc_global_get_var("srtsignals", TRUE);
   int i;
 
   SILC_LOG_DEBUG(("Start"));
 
-  if (!internal)
+  if (!internal || !signal_call)
     return;
 
   silc_schedule_internal_signals_block(schedule, context);
@@ -543,11 +570,13 @@ void silc_schedule_internal_signals_call(SilcSchedule schedule, void *context)
         signal_call[i].callback) {
       SILC_LOG_DEBUG(("Calling signal %d callback",
                      signal_call[i].sig));
+      silc_schedule_internal_signals_unblock(schedule, context);
       signal_call[i].callback(schedule, internal->app_context,
                              SILC_TASK_INTERRUPT,
                              signal_call[i].sig,
                              signal_call[i].context);
       signal_call[i].call = FALSE;
+      silc_schedule_internal_signals_block(schedule, context);
     }
   }