event-loop: Track timer event sources in userspace
libwayland now uses only one file descriptor to keep track of all
the timer event sources associated with an event loop. An array-based
binary heap is used to determine which event source has the earliest
deadline.
(Previously, each timer event source had its own timerfd, making it easy
for the a process using many timer event sources to run out of file
descriptors.)
Signed-off-by: Manuel Stoeckl <code@mstoeckl.com>
Manuel Stoeckl authored 4 years ago
Héctor Orón Martínez committed 4 years ago
22 | 22 | * SOFTWARE. |
23 | 23 | */ |
24 | 24 | |
25 | #include <assert.h> | |
25 | 26 | #include <stddef.h> |
26 | 27 | #include <stdio.h> |
27 | 28 | #include <errno.h> |
44 | 45 | |
45 | 46 | /** \cond INTERNAL */ |
46 | 47 | |
47 | struct wl_event_loop { | |
48 | int epoll_fd; | |
49 | struct wl_list check_list; | |
50 | struct wl_list idle_list; | |
51 | struct wl_list destroy_list; | |
52 | ||
53 | struct wl_signal destroy_signal; | |
54 | }; | |
55 | ||
56 | struct wl_event_source_interface { | |
57 | int (*dispatch)(struct wl_event_source *source, | |
58 | struct epoll_event *ep); | |
59 | }; | |
48 | #define TIMER_REMOVED -2 | |
49 | ||
50 | struct wl_event_loop; | |
51 | struct wl_event_source_interface; | |
52 | struct wl_event_source_timer; | |
60 | 53 | |
61 | 54 | struct wl_event_source { |
62 | 55 | struct wl_event_source_interface *interface; |
65 | 58 | void *data; |
66 | 59 | int fd; |
67 | 60 | }; |
61 | ||
62 | struct wl_timer_heap { | |
63 | struct wl_event_source base; | |
64 | /* pointers to the user-visible event sources */ | |
65 | struct wl_event_source_timer **data; | |
66 | int space, active, count; | |
67 | }; | |
68 | ||
69 | struct wl_event_loop { | |
70 | int epoll_fd; | |
71 | struct wl_list check_list; | |
72 | struct wl_list idle_list; | |
73 | struct wl_list destroy_list; | |
74 | ||
75 | struct wl_signal destroy_signal; | |
76 | ||
77 | struct wl_timer_heap timers; | |
78 | }; | |
79 | ||
80 | struct wl_event_source_interface { | |
81 | int (*dispatch)(struct wl_event_source *source, | |
82 | struct epoll_event *ep); | |
83 | }; | |
84 | ||
68 | 85 | |
69 | 86 | struct wl_event_source_fd { |
70 | 87 | struct wl_event_source base; |
214 | 231 | struct wl_event_source_timer { |
215 | 232 | struct wl_event_source base; |
216 | 233 | wl_event_loop_timer_func_t func; |
217 | }; | |
218 | ||
219 | /** \endcond */ | |
234 | struct wl_event_source_timer *next_due; | |
235 | struct timespec deadline; | |
236 | int heap_idx; | |
237 | }; | |
238 | ||
239 | static int | |
240 | noop_dispatch(struct wl_event_source *source, | |
241 | struct epoll_event *ep) { | |
242 | return 0; | |
243 | } | |
244 | ||
245 | struct wl_event_source_interface timer_heap_source_interface = { | |
246 | noop_dispatch, | |
247 | }; | |
248 | ||
249 | static bool | |
250 | time_lt(struct timespec ta, struct timespec tb) | |
251 | { | |
252 | if (ta.tv_sec != tb.tv_sec) { | |
253 | return ta.tv_sec < tb.tv_sec; | |
254 | } | |
255 | return ta.tv_nsec < tb.tv_nsec; | |
256 | } | |
257 | ||
258 | static int | |
259 | set_timer(int timerfd, struct timespec deadline) { | |
260 | struct itimerspec its; | |
261 | ||
262 | its.it_interval.tv_sec = 0; | |
263 | its.it_interval.tv_nsec = 0; | |
264 | its.it_value = deadline; | |
265 | return timerfd_settime(timerfd, TFD_TIMER_ABSTIME, &its, NULL); | |
266 | } | |
267 | ||
268 | static int | |
269 | clear_timer(int timerfd) | |
270 | { | |
271 | struct itimerspec its; | |
272 | ||
273 | its.it_interval.tv_sec = 0; | |
274 | its.it_interval.tv_nsec = 0; | |
275 | its.it_value.tv_sec = 0; | |
276 | its.it_value.tv_nsec = 0; | |
277 | return timerfd_settime(timerfd, 0, &its, NULL); | |
278 | } | |
279 | ||
280 | static void | |
281 | wl_timer_heap_init(struct wl_timer_heap *timers, struct wl_event_loop *loop) | |
282 | { | |
283 | timers->base.fd = -1; | |
284 | timers->base.data = NULL; | |
285 | wl_list_init(&timers->base.link); | |
286 | timers->base.interface = &timer_heap_source_interface; | |
287 | timers->base.loop = loop; | |
288 | ||
289 | loop->timers.data = NULL; | |
290 | loop->timers.active = 0; | |
291 | loop->timers.space = 0; | |
292 | loop->timers.count = 0; | |
293 | } | |
294 | ||
295 | static void | |
296 | wl_timer_heap_release(struct wl_timer_heap *timers) | |
297 | { | |
298 | if (timers->base.fd != -1) { | |
299 | close(timers->base.fd); | |
300 | } | |
301 | free(timers->data); | |
302 | } | |
303 | ||
304 | static int | |
305 | wl_timer_heap_ensure_timerfd(struct wl_timer_heap *timers) | |
306 | { | |
307 | struct epoll_event ep; | |
308 | int timer_fd; | |
309 | ||
310 | if (timers->base.fd != -1) | |
311 | return 0; | |
312 | ||
313 | memset(&ep, 0, sizeof ep); | |
314 | ep.events = EPOLLIN; | |
315 | ep.data.ptr = timers; | |
316 | ||
317 | timer_fd = timerfd_create(CLOCK_MONOTONIC, | |
318 | TFD_CLOEXEC | TFD_NONBLOCK); | |
319 | if (timer_fd < 0) | |
320 | return -1; | |
321 | ||
322 | if (epoll_ctl(timers->base.loop->epoll_fd, | |
323 | EPOLL_CTL_ADD, timer_fd, &ep) < 0) { | |
324 | close(timer_fd); | |
325 | return -1; | |
326 | } | |
327 | ||
328 | timers->base.fd = timer_fd; | |
329 | return 0; | |
330 | } | |
331 | ||
332 | static int | |
333 | wl_timer_heap_reserve(struct wl_timer_heap *timers) | |
334 | { | |
335 | struct wl_event_source_timer **n; | |
336 | int new_space; | |
337 | ||
338 | if (timers->count + 1 > timers->space) { | |
339 | new_space = timers->space >= 8 ? timers->space * 2 : 8; | |
340 | n = realloc(timers->data, (size_t)new_space * sizeof(*n)); | |
341 | if (!n) { | |
342 | wl_log("Allocation failure when expanding timer list"); | |
343 | return -1; | |
344 | } | |
345 | timers->data = n; | |
346 | timers->space = new_space; | |
347 | } | |
348 | ||
349 | timers->count++; | |
350 | return 0; | |
351 | } | |
352 | ||
353 | static void | |
354 | wl_timer_heap_unreserve(struct wl_timer_heap *timers) | |
355 | { | |
356 | struct wl_event_source_timer **n; | |
357 | ||
358 | timers->count--; | |
359 | ||
360 | if (timers->space >= 16 && timers->space >= 4 * timers->count) { | |
361 | n = realloc(timers->data, (size_t)timers->space / 2 * sizeof(*n)); | |
362 | if (!n) { | |
363 | wl_log("Reallocation failure when shrinking timer list"); | |
364 | return; | |
365 | } | |
366 | timers->data = n; | |
367 | timers->space = timers->space / 2; | |
368 | } | |
369 | } | |
370 | ||
371 | static int | |
372 | heap_set(struct wl_event_source_timer **data, | |
373 | struct wl_event_source_timer *a, | |
374 | int idx) | |
375 | { | |
376 | int tmp; | |
377 | ||
378 | tmp = a->heap_idx; | |
379 | a->heap_idx = idx; | |
380 | data[a->heap_idx] = a; | |
381 | ||
382 | return tmp; | |
383 | } | |
384 | ||
385 | static void | |
386 | heap_sift_down(struct wl_event_source_timer **data, | |
387 | int num_active, | |
388 | struct wl_event_source_timer *source) | |
389 | { | |
390 | struct wl_event_source_timer *child, *other_child; | |
391 | int cursor_idx; | |
392 | struct timespec key; | |
393 | ||
394 | cursor_idx = source->heap_idx; | |
395 | key = source->deadline; | |
396 | while (1) { | |
397 | int lchild_idx = cursor_idx * 2 + 1; | |
398 | ||
399 | if (lchild_idx >= num_active) { | |
400 | break; | |
401 | } | |
402 | ||
403 | child = data[lchild_idx]; | |
404 | if (lchild_idx + 1 < num_active) { | |
405 | other_child = data[lchild_idx + 1]; | |
406 | if (time_lt(other_child->deadline, child->deadline)) | |
407 | child = other_child; | |
408 | } | |
409 | ||
410 | if (time_lt(child->deadline, key)) | |
411 | cursor_idx = heap_set(data, child, cursor_idx); | |
412 | else | |
413 | break; | |
414 | } | |
415 | ||
416 | heap_set(data, source, cursor_idx); | |
417 | } | |
418 | ||
419 | static void | |
420 | heap_sift_up(struct wl_event_source_timer **data, | |
421 | struct wl_event_source_timer *source) | |
422 | { | |
423 | int cursor_idx; | |
424 | struct timespec key; | |
425 | ||
426 | cursor_idx = source->heap_idx; | |
427 | key = source->deadline; | |
428 | while (cursor_idx > 0) { | |
429 | struct wl_event_source_timer *parent = | |
430 | data[(cursor_idx - 1) / 2]; | |
431 | ||
432 | if (time_lt(key, parent->deadline)) | |
433 | cursor_idx = heap_set(data, parent, cursor_idx); | |
434 | else | |
435 | break; | |
436 | } | |
437 | heap_set(data, source, cursor_idx); | |
438 | } | |
439 | ||
440 | /* requires timer be armed */ | |
441 | static void | |
442 | wl_timer_heap_disarm(struct wl_timer_heap *timers, | |
443 | struct wl_event_source_timer *source) | |
444 | { | |
445 | struct wl_event_source_timer *last_end_evt; | |
446 | int old_source_idx; | |
447 | ||
448 | assert(source->heap_idx >= 0); | |
449 | ||
450 | old_source_idx = source->heap_idx; | |
451 | source->heap_idx = -1; | |
452 | source->deadline.tv_sec = 0; | |
453 | source->deadline.tv_nsec = 0; | |
454 | ||
455 | last_end_evt = timers->data[timers->active - 1]; | |
456 | timers->data[timers->active - 1] = NULL; | |
457 | timers->active--; | |
458 | ||
459 | if (old_source_idx == timers->active) | |
460 | return; | |
461 | ||
462 | timers->data[old_source_idx] = last_end_evt; | |
463 | last_end_evt->heap_idx = old_source_idx; | |
464 | ||
465 | /* Move the displaced (active) element to its proper place. | |
466 | * Only one of sift-down and sift-up will have any effect */ | |
467 | heap_sift_down(timers->data, timers->active, last_end_evt); | |
468 | heap_sift_up(timers->data, last_end_evt); | |
469 | } | |
470 | ||
471 | /* requires timer be disarmed */ | |
472 | static void | |
473 | wl_timer_heap_arm(struct wl_timer_heap *timers, | |
474 | struct wl_event_source_timer *source, | |
475 | struct timespec deadline) | |
476 | { | |
477 | assert(source->heap_idx == -1); | |
478 | ||
479 | source->deadline = deadline; | |
480 | timers->data[timers->active] = source; | |
481 | source->heap_idx = timers->active; | |
482 | timers->active++; | |
483 | heap_sift_up(timers->data, source); | |
484 | } | |
485 | ||
486 | ||
487 | static int | |
488 | wl_timer_heap_dispatch(struct wl_timer_heap *timers) | |
489 | { | |
490 | struct timespec now; | |
491 | struct wl_event_source_timer *root; | |
492 | struct wl_event_source_timer *list_cursor = NULL, *list_tail = NULL; | |
493 | ||
494 | clock_gettime(CLOCK_MONOTONIC, &now); | |
495 | ||
496 | while (timers->active > 0) { | |
497 | root = timers->data[0]; | |
498 | if (time_lt(now, root->deadline)) | |
499 | break; | |
500 | ||
501 | wl_timer_heap_disarm(timers, root); | |
502 | ||
503 | if (list_cursor == NULL) | |
504 | list_cursor = root; | |
505 | else | |
506 | list_tail->next_due = root; | |
507 | list_tail = root; | |
508 | } | |
509 | if (list_tail) | |
510 | list_tail->next_due = NULL; | |
511 | ||
512 | if (timers->active > 0) { | |
513 | if (set_timer(timers->base.fd, timers->data[0]->deadline) < 0) | |
514 | return -1; | |
515 | } else { | |
516 | if (clear_timer(timers->base.fd) < 0) | |
517 | return -1; | |
518 | } | |
519 | ||
520 | /* Execute precisely the functions for events before `now`, in order. | |
521 | * Because wl_event_loop_dispatch ignores return codes, do the same | |
522 | * here as well */ | |
523 | for (; list_cursor; list_cursor = list_cursor->next_due) { | |
524 | if (list_cursor->base.fd != TIMER_REMOVED) | |
525 | list_cursor->func(list_cursor->base.data); | |
526 | } | |
527 | ||
528 | return 0; | |
529 | } | |
220 | 530 | |
221 | 531 | static int |
222 | 532 | wl_event_source_timer_dispatch(struct wl_event_source *source, |
223 | 533 | struct epoll_event *ep) |
224 | 534 | { |
225 | struct wl_event_source_timer *timer_source = | |
226 | (struct wl_event_source_timer *) source; | |
227 | uint64_t expires; | |
228 | int len; | |
229 | ||
230 | len = read(source->fd, &expires, sizeof expires); | |
231 | if (!(len == -1 && errno == EAGAIN) && len != sizeof expires) | |
232 | /* Is there anything we can do here? Will this ever happen? */ | |
233 | wl_log("timerfd read error: %s\n", strerror(errno)); | |
234 | ||
235 | return timer_source->func(timer_source->base.data); | |
535 | struct wl_event_source_timer *timer; | |
536 | ||
537 | timer = wl_container_of(source, timer, base); | |
538 | return timer->func(timer->base.data); | |
236 | 539 | } |
237 | 540 | |
238 | 541 | struct wl_event_source_interface timer_source_interface = { |
239 | 542 | wl_event_source_timer_dispatch, |
240 | 543 | }; |
544 | ||
545 | /** \endcond */ | |
241 | 546 | |
242 | 547 | /** Create a timer event source |
243 | 548 | * |
259 | 564 | { |
260 | 565 | struct wl_event_source_timer *source; |
261 | 566 | |
567 | if (wl_timer_heap_ensure_timerfd(&loop->timers) < 0) | |
568 | return NULL; | |
569 | ||
262 | 570 | source = malloc(sizeof *source); |
263 | 571 | if (source == NULL) |
264 | 572 | return NULL; |
265 | 573 | |
266 | 574 | source->base.interface = &timer_source_interface; |
267 | source->base.fd = timerfd_create(CLOCK_MONOTONIC, | |
268 | TFD_CLOEXEC | TFD_NONBLOCK); | |
575 | source->base.fd = -1; | |
269 | 576 | source->func = func; |
270 | ||
271 | return add_source(loop, &source->base, WL_EVENT_READABLE, data); | |
577 | source->base.loop = loop; | |
578 | source->base.data = data; | |
579 | wl_list_init(&source->base.link); | |
580 | source->next_due = NULL; | |
581 | source->deadline.tv_sec = 0; | |
582 | source->deadline.tv_nsec = 0; | |
583 | source->heap_idx = -1; | |
584 | ||
585 | if (wl_timer_heap_reserve(&loop->timers) < 0) { | |
586 | free(source); | |
587 | return NULL; | |
588 | } | |
589 | ||
590 | return &source->base; | |
272 | 591 | } |
273 | 592 | |
274 | 593 | /** Arm or disarm a timer |
290 | 609 | WL_EXPORT int |
291 | 610 | wl_event_source_timer_update(struct wl_event_source *source, int ms_delay) |
292 | 611 | { |
293 | struct itimerspec its; | |
294 | ||
295 | its.it_interval.tv_sec = 0; | |
296 | its.it_interval.tv_nsec = 0; | |
297 | its.it_value.tv_sec = ms_delay / 1000; | |
298 | its.it_value.tv_nsec = (ms_delay % 1000) * 1000 * 1000; | |
299 | if (timerfd_settime(source->fd, 0, &its, NULL) < 0) | |
300 | return -1; | |
612 | struct wl_event_source_timer *tsource = | |
613 | wl_container_of(source, tsource, base); | |
614 | struct wl_timer_heap *timers = &tsource->base.loop->timers; | |
615 | ||
616 | if (ms_delay > 0) { | |
617 | struct timespec deadline; | |
618 | ||
619 | clock_gettime(CLOCK_MONOTONIC, &deadline); | |
620 | ||
621 | deadline.tv_nsec += (ms_delay % 1000) * 1000000L; | |
622 | deadline.tv_sec += ms_delay / 1000; | |
623 | if (deadline.tv_nsec >= 1000000000L) { | |
624 | deadline.tv_nsec -= 1000000000L; | |
625 | deadline.tv_sec += 1; | |
626 | } | |
627 | ||
628 | if (tsource->heap_idx == -1) { | |
629 | wl_timer_heap_arm(timers, tsource, deadline); | |
630 | } else if (time_lt(deadline, tsource->deadline)) { | |
631 | tsource->deadline = deadline; | |
632 | heap_sift_up(timers->data, tsource); | |
633 | } else { | |
634 | tsource->deadline = deadline; | |
635 | heap_sift_down(timers->data, timers->active, tsource); | |
636 | } | |
637 | ||
638 | if (tsource->heap_idx == 0) { | |
639 | /* Only update the timerfd if the new deadline is | |
640 | * the earliest */ | |
641 | if (set_timer(timers->base.fd, deadline) < 0) | |
642 | return -1; | |
643 | } | |
644 | } else { | |
645 | if (tsource->heap_idx == -1) | |
646 | return 0; | |
647 | wl_timer_heap_disarm(timers, tsource); | |
648 | ||
649 | if (timers->active == 0) { | |
650 | /* Only update the timerfd if this was the last | |
651 | * active timer */ | |
652 | if (clear_timer(timers->base.fd) < 0) | |
653 | return -1; | |
654 | } | |
655 | } | |
301 | 656 | |
302 | 657 | return 0; |
303 | 658 | } |
482 | 837 | source->fd = -1; |
483 | 838 | } |
484 | 839 | |
840 | if (source->interface == &timer_source_interface && | |
841 | source->fd != TIMER_REMOVED) { | |
842 | /* Disarm the timer (and the loop's timerfd, if necessary), | |
843 | * before removing its space in the loop timer heap */ | |
844 | wl_event_source_timer_update(source, 0); | |
845 | wl_timer_heap_unreserve(&loop->timers); | |
846 | /* Set the fd field to to indicate that the timer should NOT | |
847 | * be dispatched in `wl_event_loop_dispatch` */ | |
848 | source->fd = TIMER_REMOVED; | |
849 | } | |
850 | ||
485 | 851 | wl_list_remove(&source->link); |
486 | 852 | wl_list_insert(&loop->destroy_list, &source->link); |
487 | 853 | |
533 | 899 | |
534 | 900 | wl_signal_init(&loop->destroy_signal); |
535 | 901 | |
902 | wl_timer_heap_init(&loop->timers, loop); | |
903 | ||
536 | 904 | return loop; |
537 | 905 | } |
538 | 906 | |
555 | 923 | wl_signal_emit(&loop->destroy_signal, loop); |
556 | 924 | |
557 | 925 | wl_event_loop_process_destroy_list(loop); |
926 | wl_timer_heap_release(&loop->timers); | |
558 | 927 | close(loop->epoll_fd); |
559 | 928 | free(loop); |
560 | 929 | } |
605 | 974 | * |
606 | 975 | * \param loop The event loop whose sources to wait for. |
607 | 976 | * \param timeout The polling timeout in milliseconds. |
608 | * \return 0 for success, -1 for polling error. | |
977 | * \return 0 for success, -1 for polling (or timer update) error. | |
609 | 978 | * |
610 | 979 | * All the associated event sources are polled. This function blocks until |
611 | 980 | * any event source delivers an event (idle sources excluded), or the timeout |
627 | 996 | struct epoll_event ep[32]; |
628 | 997 | struct wl_event_source *source; |
629 | 998 | int i, count; |
999 | bool has_timers = false; | |
630 | 1000 | |
631 | 1001 | wl_event_loop_dispatch_idle(loop); |
632 | 1002 | |
633 | 1003 | count = epoll_wait(loop->epoll_fd, ep, ARRAY_LENGTH(ep), timeout); |
634 | 1004 | if (count < 0) |
635 | 1005 | return -1; |
1006 | ||
1007 | for (i = 0; i < count; i++) { | |
1008 | source = ep[i].data.ptr; | |
1009 | if (source == &loop->timers.base) | |
1010 | has_timers = true; | |
1011 | } | |
1012 | ||
1013 | if (has_timers) { | |
1014 | /* Dispatch timer sources before non-timer sources, so that | |
1015 | * the non-timer sources can not cancel (by calling | |
1016 | * `wl_event_source_timer_update`) the dispatching of the timers | |
1017 | * (Note that timer sources also can't cancel pending non-timer | |
1018 | * sources, since epoll_wait has already been called) */ | |
1019 | if (wl_timer_heap_dispatch(&loop->timers) < 0) | |
1020 | return -1; | |
1021 | } | |
636 | 1022 | |
637 | 1023 | for (i = 0; i < count; i++) { |
638 | 1024 | source = ep[i].data.ptr; |