2019-04-08 12:53:32 -04:00
|
|
|
/*
|
|
|
|
|
* FD polling functions for SunOS event ports.
|
|
|
|
|
*
|
|
|
|
|
* Copyright 2018 Joyent, Inc.
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <sys/time.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
|
|
|
|
|
#include <poll.h>
|
|
|
|
|
#include <port.h>
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <syslog.h>
|
|
|
|
|
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/activity.h>
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2021-10-08 03:33:24 -04:00
|
|
|
#include <haproxy/clock.h>
|
2020-06-09 03:07:15 -04:00
|
|
|
#include <haproxy/fd.h>
|
|
|
|
|
#include <haproxy/global.h>
|
2020-06-04 11:37:26 -04:00
|
|
|
#include <haproxy/signal.h>
|
2021-09-30 11:53:22 -04:00
|
|
|
#include <haproxy/task.h>
|
2020-06-02 12:15:32 -04:00
|
|
|
#include <haproxy/ticks.h>
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Private data:
|
|
|
|
|
*/
|
|
|
|
|
static int evports_fd[MAX_THREADS]; // per-thread evports_fd
|
|
|
|
|
static THREAD_LOCAL port_event_t *evports_evlist = NULL;
|
|
|
|
|
static THREAD_LOCAL int evports_evlist_max = 0;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Convert the "state" member of "fdtab" into an event ports event mask.
|
|
|
|
|
*/
|
|
|
|
|
static inline int evports_state_to_events(int state)
|
|
|
|
|
{
|
|
|
|
|
int events = 0;
|
|
|
|
|
|
2019-09-04 03:52:57 -04:00
|
|
|
if (state & FD_EV_ACTIVE_W)
|
2019-04-08 12:53:32 -04:00
|
|
|
events |= POLLOUT;
|
2019-09-04 03:52:57 -04:00
|
|
|
if (state & FD_EV_ACTIVE_R)
|
2019-04-08 12:53:32 -04:00
|
|
|
events |= POLLIN;
|
|
|
|
|
|
|
|
|
|
return (events);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Associate or dissociate this file descriptor with the event port, using the
|
|
|
|
|
* specified event mask.
|
|
|
|
|
*/
|
|
|
|
|
static inline void evports_resync_fd(int fd, int events)
|
|
|
|
|
{
|
|
|
|
|
if (events == 0)
|
|
|
|
|
port_dissociate(evports_fd[tid], PORT_SOURCE_FD, fd);
|
|
|
|
|
else
|
|
|
|
|
port_associate(evports_fd[tid], PORT_SOURCE_FD, fd, events, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void _update_fd(int fd)
|
|
|
|
|
{
|
|
|
|
|
int en;
|
|
|
|
|
int events;
|
2022-07-06 04:37:31 -04:00
|
|
|
ulong pr, ps;
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
en = fdtab[fd].state;
|
2022-07-06 04:37:31 -04:00
|
|
|
pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
|
|
|
|
|
ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
|
2019-04-08 12:53:32 -04:00
|
|
|
|
2022-07-07 02:23:03 -04:00
|
|
|
if (!(fdtab[fd].thread_mask & ti->ltid_bit) || !(en & FD_EV_ACTIVE_RW)) {
|
2022-07-06 04:37:31 -04:00
|
|
|
if (!((pr | ps) & ti->ltid_bit)) {
|
2019-04-08 12:53:32 -04:00
|
|
|
/* fd was not watched, it's still not */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* fd totally removed from poll list */
|
|
|
|
|
events = 0;
|
2022-07-06 04:37:31 -04:00
|
|
|
if (pr & ti->ltid_bit)
|
|
|
|
|
_HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
|
|
|
|
|
if (ps & ti->ltid_bit)
|
|
|
|
|
_HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
|
2019-04-08 12:53:32 -04:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* OK fd has to be monitored, it was either added or changed */
|
|
|
|
|
events = evports_state_to_events(en);
|
2019-09-04 03:52:57 -04:00
|
|
|
if (en & FD_EV_ACTIVE_R) {
|
2022-07-06 04:37:31 -04:00
|
|
|
if (!(pr & ti->ltid_bit))
|
|
|
|
|
_HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
|
2019-07-25 10:00:18 -04:00
|
|
|
} else {
|
2022-07-06 04:37:31 -04:00
|
|
|
if (pr & ti->ltid_bit)
|
|
|
|
|
_HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
|
2019-07-25 10:00:18 -04:00
|
|
|
}
|
2019-09-04 03:52:57 -04:00
|
|
|
if (en & FD_EV_ACTIVE_W) {
|
2022-07-06 04:37:31 -04:00
|
|
|
if (!(ps & ti->ltid_bit))
|
|
|
|
|
_HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
|
2019-07-25 10:00:18 -04:00
|
|
|
} else {
|
2022-07-06 04:37:31 -04:00
|
|
|
if (ps & ti->ltid_bit)
|
|
|
|
|
_HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
|
2019-07-25 10:00:18 -04:00
|
|
|
}
|
|
|
|
|
|
2019-04-08 12:53:32 -04:00
|
|
|
}
|
|
|
|
|
evports_resync_fd(fd, events);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Event Ports poller. This routine interacts with the file descriptor
|
|
|
|
|
* management data structures and routines; see the large block comment in
|
|
|
|
|
* "src/fd.c" for more information.
|
|
|
|
|
*/
|
|
|
|
|
|
2020-02-25 01:38:05 -05:00
|
|
|
static void _do_poll(struct poller *p, int exp, int wake)
|
2019-04-08 12:53:32 -04:00
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
int wait_time;
|
|
|
|
|
struct timespec timeout_ts;
|
|
|
|
|
unsigned int nevlist;
|
|
|
|
|
int fd, old_fd;
|
|
|
|
|
int status;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Scan the list of file descriptors with an updated status:
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < fd_nbupdt; i++) {
|
|
|
|
|
fd = fd_updt[i];
|
|
|
|
|
|
2022-07-09 17:55:43 -04:00
|
|
|
if (!fd_grab_tgid(fd, tgid)) {
|
|
|
|
|
/* was reassigned */
|
2020-06-17 14:35:33 -04:00
|
|
|
activity[tid].poll_drop_fd++;
|
2019-04-08 12:53:32 -04:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-09 17:55:43 -04:00
|
|
|
_HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
|
|
|
|
|
|
|
|
|
|
if (fdtab[fd].owner)
|
|
|
|
|
_update_fd(fd);
|
|
|
|
|
else
|
|
|
|
|
activity[tid].poll_drop_fd++;
|
|
|
|
|
|
|
|
|
|
fd_drop_tgid(fd);
|
2019-04-08 12:53:32 -04:00
|
|
|
}
|
|
|
|
|
fd_nbupdt = 0;
|
2022-07-09 17:55:43 -04:00
|
|
|
|
|
|
|
|
/* Scan the shared update list */
|
2022-07-08 05:33:43 -04:00
|
|
|
for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
|
2019-04-08 12:53:32 -04:00
|
|
|
if (fd == -2) {
|
|
|
|
|
fd = old_fd;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
else if (fd <= -3)
|
|
|
|
|
fd = -fd -4;
|
|
|
|
|
if (fd == -1)
|
|
|
|
|
break;
|
2022-07-09 17:55:43 -04:00
|
|
|
|
|
|
|
|
if (!fd_grab_tgid(fd, tgid)) {
|
|
|
|
|
/* was reassigned */
|
|
|
|
|
activity[tid].poll_drop_fd++;
|
2019-04-08 12:53:32 -04:00
|
|
|
continue;
|
2022-07-09 17:55:43 -04:00
|
|
|
}
|
|
|
|
|
|
2022-07-25 09:39:21 -04:00
|
|
|
if (!(fdtab[fd].update_mask & ti->ltid_bit)) {
|
|
|
|
|
fd_drop_tgid(fd);
|
2019-04-08 12:53:32 -04:00
|
|
|
continue;
|
2022-07-25 09:39:21 -04:00
|
|
|
}
|
2022-07-09 17:55:43 -04:00
|
|
|
|
|
|
|
|
done_update_polling(fd);
|
|
|
|
|
|
|
|
|
|
if (fdtab[fd].owner)
|
|
|
|
|
_update_fd(fd);
|
|
|
|
|
else
|
|
|
|
|
activity[tid].poll_drop_fd++;
|
|
|
|
|
|
|
|
|
|
fd_drop_tgid(fd);
|
2019-04-08 12:53:32 -04:00
|
|
|
}
|
|
|
|
|
|
MEDIUM: threads: add a stronger thread_isolate_full() call
The current principle of running under isolation was made to access
sensitive data while being certain that no other thread was using them
in parallel, without necessarily having to place locks everywhere. The
main use case are "show sess" and "show fd" which run over long chains
of pointers.
The thread_isolate() call relies on the "harmless" bit that indicates
for a given thread that it's not currently doing such sensitive things,
which is advertised using thread_harmless_now() and which ends usings
thread_harmless_end(), which also waits for possibly concurrent threads
to complete their work if they took this opportunity for starting
something tricky.
As some system calls were notoriously slow (e.g. mmap()), a bunch of
thread_harmless_now() / thread_harmless_end() were placed around them
to let waiting threads do their work while such other threads were not
able to modify memory contents.
But this is not sufficient for performing memory modifications. One such
example is the server deletion code. By modifying memory, it not only
requires that other threads are not playing with it, but are not either
in the process of touching it. The fact that a pool_alloc() or pool_free()
on some structure may call thread_harmless_now() and let another thread
start to release the same object's memory is not acceptable.
This patch introduces the concept of "idle threads". Threads entering
the polling loop are idle, as well as those that are waiting for all
others to become idle via the new function thread_isolate_full(). Once
thread_isolate_full() is granted, the thread is not idle anymore, and
it is released using thread_release() just like regular isolation. Its
users have to keep in mind that across this call nothing is granted as
another thread might have performed shared memory modifications. But
such users are extremely rare and are actually expecting this from their
peers as well.
Note that that in case of backport, this patch depends on previous patch:
MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
|
|
|
thread_idle_now();
|
2019-04-08 12:53:32 -04:00
|
|
|
thread_harmless_now();
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Determine how long to wait for events to materialise on the port.
|
|
|
|
|
*/
|
2019-05-28 10:44:05 -04:00
|
|
|
wait_time = wake ? 0 : compute_poll_timeout(exp);
|
2021-10-08 04:43:59 -04:00
|
|
|
clock_entering_poll();
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
|
|
|
|
|
int interrupted = 0;
|
|
|
|
|
nevlist = 1; /* desired number of events to be retrieved */
|
|
|
|
|
timeout_ts.tv_sec = (timeout / 1000);
|
|
|
|
|
timeout_ts.tv_nsec = (timeout % 1000) * 1000000;
|
|
|
|
|
|
|
|
|
|
status = port_getn(evports_fd[tid],
|
|
|
|
|
evports_evlist,
|
|
|
|
|
evports_evlist_max,
|
|
|
|
|
&nevlist, /* updated to the number of events retrieved */
|
|
|
|
|
&timeout_ts);
|
|
|
|
|
if (status != 0) {
|
|
|
|
|
int e = errno;
|
|
|
|
|
switch (e) {
|
|
|
|
|
case ETIME:
|
|
|
|
|
/*
|
|
|
|
|
* Though the manual page has not historically made it
|
|
|
|
|
* clear, port_getn() can return -1 with an errno of
|
|
|
|
|
* ETIME and still have returned some number of events.
|
|
|
|
|
*/
|
|
|
|
|
/* nevlist >= 0 */
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nevlist = 0;
|
|
|
|
|
interrupted = 1;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-10-08 03:33:24 -04:00
|
|
|
clock_update_date(timeout, nevlist);
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
if (nevlist || interrupted)
|
|
|
|
|
break;
|
|
|
|
|
if (timeout || !wait_time)
|
|
|
|
|
break;
|
2019-05-28 10:44:05 -04:00
|
|
|
if (signal_queue_len || wake)
|
2019-04-08 12:53:32 -04:00
|
|
|
break;
|
|
|
|
|
if (tick_isset(exp) && tick_is_expired(exp, now_ms))
|
|
|
|
|
break;
|
|
|
|
|
} while(1);
|
|
|
|
|
|
2022-06-22 09:21:34 -04:00
|
|
|
fd_leaving_poll(wait_time, nevlist);
|
2019-04-08 12:53:32 -04:00
|
|
|
|
2020-06-17 14:25:18 -04:00
|
|
|
if (nevlist > 0)
|
|
|
|
|
activity[tid].poll_io++;
|
|
|
|
|
|
2019-04-08 12:53:32 -04:00
|
|
|
for (i = 0; i < nevlist; i++) {
|
|
|
|
|
unsigned int n = 0;
|
|
|
|
|
int events, rebind_events;
|
MEDIUM: fd: rely more on fd_update_events() to detect changes
This function already performs a number of checks prior to calling the
IOCB, and detects the change of thread (FD migration). Half of the
controls are still in each poller, and these pollers also maintain
activity counters for various cases.
Note that the unreliable test on thread_mask was removed so that only
the one performed by fd_set_running() is now used, since this one is
reliable.
Let's centralize all that fd-specific logic into the function and make
it return a status among:
FD_UPDT_DONE, // update done, nothing else to be done
FD_UPDT_DEAD, // FD was already dead, ignore it
FD_UPDT_CLOSED, // FD was closed
FD_UPDT_MIGRATED, // FD was migrated, ignore it now
Some pollers already used to call it last and have nothing to do after
it, regardless of the result. epoll has to delete the FD in case a
migration is detected. Overall this removes more code than it adds.
2021-07-29 10:57:19 -04:00
|
|
|
int ret;
|
|
|
|
|
|
2019-04-08 12:53:32 -04:00
|
|
|
fd = evports_evlist[i].portev_object;
|
|
|
|
|
events = evports_evlist[i].portev_events;
|
|
|
|
|
|
2020-06-23 04:04:54 -04:00
|
|
|
#ifdef DEBUG_FD
|
2021-04-06 07:53:36 -04:00
|
|
|
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
2020-06-23 04:04:54 -04:00
|
|
|
#endif
|
2019-04-08 12:53:32 -04:00
|
|
|
/*
|
|
|
|
|
* By virtue of receiving an event for this file descriptor, it
|
|
|
|
|
* is no longer associated with the port in question. Store
|
|
|
|
|
* the previous event mask so that we may reassociate after
|
|
|
|
|
* processing is complete.
|
|
|
|
|
*/
|
|
|
|
|
rebind_events = evports_state_to_events(fdtab[fd].state);
|
|
|
|
|
/* rebind_events != 0 */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Set bits based on the events we received from the port:
|
|
|
|
|
*/
|
2019-09-19 07:08:26 -04:00
|
|
|
n = ((events & POLLIN) ? FD_EV_READY_R : 0) |
|
|
|
|
|
((events & POLLOUT) ? FD_EV_READY_W : 0) |
|
|
|
|
|
((events & POLLHUP) ? FD_EV_SHUT_RW : 0) |
|
|
|
|
|
((events & POLLERR) ? FD_EV_ERR_RW : 0);
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Call connection processing callbacks. Note that it's
|
|
|
|
|
* possible for this processing to alter the required event
|
2020-03-23 13:28:40 -04:00
|
|
|
* port association; i.e., the "state" member of the "fdtab"
|
2019-04-08 12:53:32 -04:00
|
|
|
* entry. If it changes, the fd will be placed on the updated
|
|
|
|
|
* list for processing the next time we are called.
|
|
|
|
|
*/
|
MEDIUM: fd: rely more on fd_update_events() to detect changes
This function already performs a number of checks prior to calling the
IOCB, and detects the change of thread (FD migration). Half of the
controls are still in each poller, and these pollers also maintain
activity counters for various cases.
Note that the unreliable test on thread_mask was removed so that only
the one performed by fd_set_running() is now used, since this one is
reliable.
Let's centralize all that fd-specific logic into the function and make
it return a status among:
FD_UPDT_DONE, // update done, nothing else to be done
FD_UPDT_DEAD, // FD was already dead, ignore it
FD_UPDT_CLOSED, // FD was closed
FD_UPDT_MIGRATED, // FD was migrated, ignore it now
Some pollers already used to call it last and have nothing to do after
it, regardless of the result. epoll has to delete the FD in case a
migration is detected. Overall this removes more code than it adds.
2021-07-29 10:57:19 -04:00
|
|
|
ret = fd_update_events(fd, n);
|
|
|
|
|
|
2022-07-09 12:55:37 -04:00
|
|
|
/* polling will be on this instance if the FD was migrated */
|
|
|
|
|
if (ret == FD_UPDT_MIGRATED)
|
MEDIUM: fd: rely more on fd_update_events() to detect changes
This function already performs a number of checks prior to calling the
IOCB, and detects the change of thread (FD migration). Half of the
controls are still in each poller, and these pollers also maintain
activity counters for various cases.
Note that the unreliable test on thread_mask was removed so that only
the one performed by fd_set_running() is now used, since this one is
reliable.
Let's centralize all that fd-specific logic into the function and make
it return a status among:
FD_UPDT_DONE, // update done, nothing else to be done
FD_UPDT_DEAD, // FD was already dead, ignore it
FD_UPDT_CLOSED, // FD was closed
FD_UPDT_MIGRATED, // FD was migrated, ignore it now
Some pollers already used to call it last and have nothing to do after
it, regardless of the result. epoll has to delete the FD in case a
migration is detected. Overall this removes more code than it adds.
2021-07-29 10:57:19 -04:00
|
|
|
continue;
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This file descriptor was closed during the processing of
|
|
|
|
|
* polled events. No need to reassociate.
|
|
|
|
|
*/
|
MEDIUM: fd: rely more on fd_update_events() to detect changes
This function already performs a number of checks prior to calling the
IOCB, and detects the change of thread (FD migration). Half of the
controls are still in each poller, and these pollers also maintain
activity counters for various cases.
Note that the unreliable test on thread_mask was removed so that only
the one performed by fd_set_running() is now used, since this one is
reliable.
Let's centralize all that fd-specific logic into the function and make
it return a status among:
FD_UPDT_DONE, // update done, nothing else to be done
FD_UPDT_DEAD, // FD was already dead, ignore it
FD_UPDT_CLOSED, // FD was closed
FD_UPDT_MIGRATED, // FD was migrated, ignore it now
Some pollers already used to call it last and have nothing to do after
it, regardless of the result. epoll has to delete the FD in case a
migration is detected. Overall this removes more code than it adds.
2021-07-29 10:57:19 -04:00
|
|
|
if (ret == FD_UPDT_CLOSED)
|
2019-04-08 12:53:32 -04:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Reassociate with the port, using the same event mask as
|
|
|
|
|
* before. This call will not result in a dissociation as we
|
|
|
|
|
* asserted that _some_ events needed to be rebound above.
|
|
|
|
|
*
|
|
|
|
|
* Reassociating with the same mask allows us to mimic the
|
|
|
|
|
* level-triggered behaviour of poll(2). In the event that we
|
|
|
|
|
* are interested in the same events on the next turn of the
|
|
|
|
|
* loop, this represents no extra work.
|
|
|
|
|
*
|
|
|
|
|
* If this additional port_associate(3C) call becomes a
|
|
|
|
|
* performance problem, we would need to verify that we can
|
|
|
|
|
* correctly interact with the file descriptor cache and update
|
|
|
|
|
* list (see "src/fd.c") to avoid reassociating here, or to use
|
|
|
|
|
* a different events mask.
|
|
|
|
|
*/
|
|
|
|
|
evports_resync_fd(fd, rebind_events);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int init_evports_per_thread()
|
|
|
|
|
{
|
|
|
|
|
evports_evlist_max = global.tune.maxpollevents;
|
2021-11-06 10:14:45 -04:00
|
|
|
evports_evlist = calloc(evports_evlist_max, sizeof(*evports_evlist));
|
2019-04-08 12:53:32 -04:00
|
|
|
if (evports_evlist == NULL) {
|
|
|
|
|
goto fail_alloc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (MAX_THREADS > 1 && tid) {
|
|
|
|
|
if ((evports_fd[tid] = port_create()) == -1) {
|
|
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* we may have to unregister some events initially registered on the
|
|
|
|
|
* original fd when it was alone, and/or to register events on the new
|
|
|
|
|
* fd for this thread. Let's just mark them as updated, the poller will
|
|
|
|
|
* do the rest.
|
|
|
|
|
*/
|
2022-07-09 17:23:50 -04:00
|
|
|
fd_reregister_all(tgid, ti->ltid_bit);
|
2019-04-08 12:53:32 -04:00
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
fail_fd:
|
2021-02-20 04:46:51 -05:00
|
|
|
ha_free(&evports_evlist);
|
2019-04-08 12:53:32 -04:00
|
|
|
evports_evlist_max = 0;
|
|
|
|
|
fail_alloc:
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void deinit_evports_per_thread()
|
|
|
|
|
{
|
|
|
|
|
if (MAX_THREADS > 1 && tid)
|
|
|
|
|
close(evports_fd[tid]);
|
|
|
|
|
|
2021-02-20 04:46:51 -05:00
|
|
|
ha_free(&evports_evlist);
|
2019-04-08 12:53:32 -04:00
|
|
|
evports_evlist_max = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Initialisation of the event ports poller.
|
|
|
|
|
* Returns 0 in case of failure, non-zero in case of success.
|
|
|
|
|
*/
|
2020-02-25 01:38:05 -05:00
|
|
|
static int _do_init(struct poller *p)
|
2019-04-08 12:53:32 -04:00
|
|
|
{
|
|
|
|
|
p->private = NULL;
|
|
|
|
|
|
|
|
|
|
if ((evports_fd[tid] = port_create()) == -1) {
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hap_register_per_thread_init(init_evports_per_thread);
|
|
|
|
|
hap_register_per_thread_deinit(deinit_evports_per_thread);
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
fail:
|
|
|
|
|
p->pref = 0;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Termination of the event ports poller.
|
|
|
|
|
* All resources are released and the poller is marked as inoperative.
|
|
|
|
|
*/
|
2020-02-25 01:38:05 -05:00
|
|
|
static void _do_term(struct poller *p)
|
2019-04-08 12:53:32 -04:00
|
|
|
{
|
|
|
|
|
if (evports_fd[tid] != -1) {
|
|
|
|
|
close(evports_fd[tid]);
|
|
|
|
|
evports_fd[tid] = -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
p->private = NULL;
|
|
|
|
|
p->pref = 0;
|
|
|
|
|
|
2021-02-20 04:46:51 -05:00
|
|
|
ha_free(&evports_evlist);
|
2019-04-08 12:53:32 -04:00
|
|
|
evports_evlist_max = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Run-time check to make sure we can allocate the resources needed for
|
|
|
|
|
* the poller to function correctly.
|
|
|
|
|
* Returns 1 on success, otherwise 0.
|
|
|
|
|
*/
|
2020-02-25 01:38:05 -05:00
|
|
|
static int _do_test(struct poller *p)
|
2019-04-08 12:53:32 -04:00
|
|
|
{
|
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
|
|
if ((fd = port_create()) == -1) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
close(fd);
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Close and recreate the event port after fork(). Returns 1 on success,
|
|
|
|
|
* otherwise 0. If this function fails, "_do_term()" must be called to
|
|
|
|
|
* clean up the poller.
|
|
|
|
|
*/
|
2020-02-25 01:38:05 -05:00
|
|
|
static int _do_fork(struct poller *p)
|
2019-04-08 12:53:32 -04:00
|
|
|
{
|
|
|
|
|
if (evports_fd[tid] != -1) {
|
|
|
|
|
close(evports_fd[tid]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((evports_fd[tid] = port_create()) == -1) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
2022-04-25 13:00:55 -04:00
|
|
|
* Registers the poller.
|
2019-04-08 12:53:32 -04:00
|
|
|
*/
|
|
|
|
|
static void _do_register(void)
|
|
|
|
|
{
|
|
|
|
|
struct poller *p;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (nbpollers >= MAX_POLLERS)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_THREADS; i++)
|
|
|
|
|
evports_fd[i] = -1;
|
|
|
|
|
|
|
|
|
|
p = &pollers[nbpollers++];
|
|
|
|
|
|
|
|
|
|
p->name = "evports";
|
|
|
|
|
p->pref = 300;
|
2019-11-28 12:17:33 -05:00
|
|
|
p->flags = HAP_POLL_F_ERRHUP;
|
2019-04-08 12:53:32 -04:00
|
|
|
p->private = NULL;
|
|
|
|
|
|
|
|
|
|
p->clo = NULL;
|
|
|
|
|
p->test = _do_test;
|
|
|
|
|
p->init = _do_init;
|
|
|
|
|
p->term = _do_term;
|
|
|
|
|
p->poll = _do_poll;
|
|
|
|
|
p->fork = _do_fork;
|
|
|
|
|
}
|
2022-04-25 13:00:55 -04:00
|
|
|
|
|
|
|
|
INITCALL0(STG_REGISTER, _do_register);
|