ext/libuv/src/unix/kqueue.c in libuv-0.11.1 vs ext/libuv/src/unix/kqueue.c in libuv-0.11.2

- old
+ new

@@ -159,15 +159,22 @@ goto update_timeout; } nevents = 0; + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; for (i = 0; i < nfds; i++) { ev = events + i; fd = ev->ident; w = loop->watchers[fd]; + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + if (w == NULL) { /* File descriptor that we've stopped watching, disarm it. */ /* TODO batch up */ struct kevent events[1]; @@ -188,11 +195,11 @@ } revents = 0; if (ev->filter == EVFILT_READ) { - if (w->events & UV__POLLIN) { + if (w->pevents & UV__POLLIN) { revents |= UV__POLLIN; w->rcount = ev->data; } else { /* TODO batch up */ struct kevent events[1]; @@ -202,11 +209,11 @@ abort(); } } if (ev->filter == EVFILT_WRITE) { - if (w->events & UV__POLLOUT) { + if (w->pevents & UV__POLLOUT) { revents |= UV__POLLOUT; w->wcount = ev->data; } else { /* TODO batch up */ struct kevent events[1]; @@ -224,10 +231,12 @@ continue; w->cb(loop, w, revents); nevents++; } + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; if (nevents != 0) { if (nfds == ARRAY_SIZE(events) && --count != 0) { /* Poll for more events but don't block this time. */ timeout = 0; @@ -249,9 +258,28 @@ if (diff >= (uint64_t) timeout) return; timeout -= diff; } +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct kevent* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct kevent*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].ident == fd) + events[i].ident = -1; } static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) { uv_fs_event_t* handle;