ext/libuv/src/unix/kqueue.c in libuv-2.0.8 vs ext/libuv/src/unix/kqueue.c in libuv-2.0.9
- old
+ new
@@ -76,10 +76,11 @@
uv__io_t* w;
sigset_t* pset;
sigset_t set;
uint64_t base;
uint64_t diff;
+ int have_signals;
int filter;
int fflags;
int count;
int nfds;
int fd;
@@ -101,11 +102,11 @@
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
- if ((w->events & UV__POLLIN) == 0 && (w->pevents & UV__POLLIN) != 0) {
+ if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
filter = EVFILT_READ;
fflags = 0;
op = EV_ADD;
if (w->cb == uv__fs_event) {
@@ -122,11 +123,11 @@
abort();
nevents = 0;
}
}
- if ((w->events & UV__POLLOUT) == 0 && (w->pevents & UV__POLLOUT) != 0) {
+ if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
@@ -190,10 +191,11 @@
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
+ have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
@@ -217,22 +219,22 @@
continue;
}
if (ev->filter == EVFILT_VNODE) {
- assert(w->events == UV__POLLIN);
- assert(w->pevents == UV__POLLIN);
+ assert(w->events == POLLIN);
+ assert(w->pevents == POLLIN);
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
nevents++;
continue;
}
revents = 0;
if (ev->filter == EVFILT_READ) {
- if (w->pevents & UV__POLLIN) {
- revents |= UV__POLLIN;
+ if (w->pevents & POLLIN) {
+ revents |= POLLIN;
w->rcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
@@ -241,12 +243,12 @@
abort();
}
}
if (ev->filter == EVFILT_WRITE) {
- if (w->pevents & UV__POLLOUT) {
- revents |= UV__POLLOUT;
+ if (w->pevents & POLLOUT) {
+ revents |= POLLOUT;
w->wcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
@@ -255,24 +257,38 @@
abort();
}
}
if (ev->flags & EV_ERROR)
- revents |= UV__POLLERR;
+ revents |= POLLERR;
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
if (revents == 0)
continue;
- w->cb(loop, w, revents);
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher)
+ have_signals = 1;
+ else
+ w->cb(loop, w, revents);
+
nevents++;
}
+
+ if (have_signals != 0)
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
@@ -407,10 +423,10 @@
return uv__fsevents_init(handle);
fallback:
#endif /* defined(__APPLE__) */
- uv__io_start(handle->loop, &handle->event_watcher, UV__POLLIN);
+ uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
return 0;
}