2006-06-25 20:48:02 -04:00
|
|
|
/*
|
2010-02-26 05:12:27 -05:00
|
|
|
* include/types/global.h
|
|
|
|
|
* Global variables.
|
|
|
|
|
*
|
2012-11-11 11:42:00 -05:00
|
|
|
* Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
|
2010-02-26 05:12:27 -05:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
2006-06-25 20:48:02 -04:00
|
|
|
|
|
|
|
|
#ifndef _TYPES_GLOBAL_H
|
|
|
|
|
#define _TYPES_GLOBAL_H
|
|
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
|
|
2006-06-29 12:54:54 -04:00
|
|
|
#include <common/config.h>
|
2018-11-26 04:19:54 -05:00
|
|
|
#include <common/initcall.h>
|
2017-04-21 10:47:03 -04:00
|
|
|
#include <common/hathreads.h>
|
2018-11-26 04:19:54 -05:00
|
|
|
#include <common/standard.h>
|
2017-04-21 10:47:03 -04:00
|
|
|
|
2012-09-12 16:58:11 -04:00
|
|
|
#include <types/listener.h>
|
2009-08-16 11:41:45 -04:00
|
|
|
#include <types/proxy.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
#include <types/task.h>
|
2016-11-09 05:36:17 -05:00
|
|
|
#include <types/vars.h>
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2012-10-02 12:42:10 -04:00
|
|
|
#ifndef UNIX_MAX_PATH
|
|
|
|
|
#define UNIX_MAX_PATH 108
|
|
|
|
|
#endif
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/* modes of operation (global.mode) */
|
2007-10-18 07:53:22 -04:00
|
|
|
#define MODE_DEBUG 0x01
|
|
|
|
|
#define MODE_DAEMON 0x02
|
|
|
|
|
#define MODE_QUIET 0x04
|
|
|
|
|
#define MODE_CHECK 0x08
|
|
|
|
|
#define MODE_VERBOSE 0x10
|
|
|
|
|
#define MODE_STARTING 0x20
|
|
|
|
|
#define MODE_FOREGROUND 0x40
|
2017-06-01 11:38:50 -04:00
|
|
|
#define MODE_MWORKER 0x80 /* Master Worker */
|
2018-11-21 09:48:31 -05:00
|
|
|
#define MODE_MWORKER_WAIT 0x100 /* Master Worker wait mode */
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2006-11-12 17:57:19 -05:00
|
|
|
/* list of last checks to perform, depending on config options */
|
|
|
|
|
#define LSTCHK_CAP_BIND 0x00000001 /* check that we can bind to any port */
|
2015-08-20 13:35:14 -04:00
|
|
|
#define LSTCHK_NETADM 0x00000002 /* check that we have CAP_NET_ADMIN */
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2009-01-25 09:42:27 -05:00
|
|
|
/* Global tuning options */
|
|
|
|
|
/* available polling mechanisms */
|
|
|
|
|
#define GTUNE_USE_SELECT (1<<0)
|
|
|
|
|
#define GTUNE_USE_POLL (1<<1)
|
|
|
|
|
#define GTUNE_USE_EPOLL (1<<2)
|
|
|
|
|
#define GTUNE_USE_KQUEUE (1<<3)
|
2009-01-25 10:03:28 -05:00
|
|
|
/* platform-specific options */
|
2012-11-11 11:42:00 -05:00
|
|
|
#define GTUNE_USE_SPLICE (1<<4)
|
2014-04-14 09:56:58 -04:00
|
|
|
#define GTUNE_USE_GAI (1<<5)
|
2016-09-12 17:42:20 -04:00
|
|
|
#define GTUNE_USE_REUSEPORT (1<<6)
|
2016-11-07 15:03:16 -05:00
|
|
|
#define GTUNE_RESOLVE_DONTFAIL (1<<7)
|
2009-01-25 09:42:27 -05:00
|
|
|
|
2017-04-05 19:05:05 -04:00
|
|
|
#define GTUNE_SOCKET_TRANSFER (1<<8)
|
2017-11-24 16:02:34 -05:00
|
|
|
#define GTUNE_NOEXIT_ONFAILURE (1<<9)
|
2017-11-20 09:58:35 -05:00
|
|
|
#define GTUNE_USE_SYSTEMD (1<<10)
|
2017-04-05 19:05:05 -04:00
|
|
|
|
MINOR: polling: add an option to support busy polling
In some situations, especially when dealing with low latency on processors
supporting a variable frequency or when running inside virtual machines,
each time the process waits for an I/O using the poller, the processor
goes back to sleep or is offered to another VM for a long time, and it
causes excessively high latencies.
A solution to this provided by this patch is to enable busy polling using
a global option. When busy polling is enabled, the pollers never sleep and
loop over themselves waiting for an I/O event to happen or for a timeout
to occur. On multi-processor machines it can significantly overheat the
processor but it usually results in much lower latencies.
A typical test consisting in injecting traffic over a single connection at
a time over the loopback shows a bump from 4640 to 8540 connections per
second on forwarded connections, indicating a latency reduction of 98
microseconds for each connection, and a bump from 12500 to 21250 for
locally terminated connections (redirects), indicating a reduction of
33 microseconds.
It is only usable with epoll and kqueue because select() and poll()'s
API is not convenient for such usages, and the level of performance they
are used in doesn't benefit from this anyway.
The option, which obviously remains disabled by default, can be turned
on using "busy-polling" in the global section, and turned off later
using "no busy-polling". Its status is reported in "show info" to help
troubleshooting suspicious CPU spikes.
2018-11-22 12:07:59 -05:00
|
|
|
#define GTUNE_BUSY_POLLING (1<<11)
|
2019-02-27 06:02:18 -05:00
|
|
|
#define GTUNE_LISTENER_MQ (1<<12)
|
2019-04-15 13:38:50 -04:00
|
|
|
#define GTUNE_SET_DUMPABLE (1<<13)
|
2019-04-08 12:53:32 -04:00
|
|
|
#define GTUNE_USE_EVPORTS (1<<14)
|
2019-10-27 15:08:11 -04:00
|
|
|
#define GTUNE_STRICT_LIMITS (1<<15)
|
MEDIUM: init: prevent process and thread creation at runtime
Some concerns are regularly raised about the risk to inherit some Lua
files which make use of a fork (e.g. via os.execute()) as well as
whether or not some of bugs we fix might or not be exploitable to run
some code. Given that haproxy is event-driven, any foreground activity
completely stops processing and is easy to detect, but background
activity is a different story. A Lua script could very well discretely
fork a sub-process connecting to a remote location and taking commands,
and some injected code could also try to hide its activity by creating
a process or a thread without blocking the rest of the processing. While
such activities should be extremely limited when run in an empty chroot
without any permission, it would be better to get a higher assurance
they cannot happen.
This patch introduces something very simple: it limits the number of
processes and threads to zero in the workers after the last thread was
created. By doing so, it effectively instructs the system to fail on
any fork() or clone() syscall. Thus any undesired activity has to happen
in the foreground and is way easier to detect.
This will obviously break external checks (whose concept is already
totally insecure), and for this reason a new option
"insecure-fork-wanted" was added to disable this protection, and it
is suggested in the fork() error report from the checks. It is
obviously recommended not to use it and to reconsider the reasons
leading to it being enabled in the first place.
If for any reason we fail to disable forks, we still start because it
could be imaginable that some operating systems refuse to set this
limit to zero, but in this case we emit a warning, that may or may not
be reported since we're after the fork point. Ideally over the long
term it should be conditionned by strict-limits and cause a hard fail.
2019-12-03 01:07:36 -05:00
|
|
|
#define GTUNE_INSECURE_FORK (1<<16)
|
2019-12-06 10:31:45 -05:00
|
|
|
#define GTUNE_INSECURE_SETUID (1<<17)
|
2019-04-08 12:53:32 -04:00
|
|
|
|
2014-01-29 06:24:34 -05:00
|
|
|
/* SSL server verify mode */
|
|
|
|
|
enum {
|
|
|
|
|
SSL_SERVER_VERIFY_NONE = 0,
|
|
|
|
|
SSL_SERVER_VERIFY_REQUIRED = 1,
|
|
|
|
|
};
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
/* FIXME : this will have to be redefined correctly */
|
|
|
|
|
struct global {
|
2006-11-12 17:57:19 -05:00
|
|
|
int uid;
|
|
|
|
|
int gid;
|
2014-06-19 23:30:16 -04:00
|
|
|
int external_check;
|
2006-11-12 17:57:19 -05:00
|
|
|
int nbproc;
|
2017-08-29 09:37:10 -04:00
|
|
|
int nbthread;
|
2017-03-23 17:44:13 -04:00
|
|
|
unsigned int hard_stop_after; /* maximum time allowed to perform a soft-stop */
|
2011-09-07 08:38:31 -04:00
|
|
|
int maxconn, hardmaxconn;
|
2012-09-06 05:58:37 -04:00
|
|
|
int maxsslconn;
|
2015-01-15 15:34:39 -05:00
|
|
|
int ssl_session_max_cost; /* how many bytes an SSL session may cost */
|
|
|
|
|
int ssl_handshake_max_cost; /* how many bytes an SSL handshake may use */
|
2015-01-15 15:32:40 -05:00
|
|
|
int ssl_used_frontend; /* non-zero if SSL is used in a frontend */
|
|
|
|
|
int ssl_used_backend; /* non-zero if SSL is used in a backend */
|
2017-12-06 07:51:49 -05:00
|
|
|
int ssl_used_async_engines; /* number of used async engines */
|
2014-01-29 06:24:34 -05:00
|
|
|
unsigned int ssl_server_verify; /* default verify mode on servers side */
|
2011-09-07 09:17:21 -04:00
|
|
|
struct freq_ctr conn_per_sec;
|
2013-10-07 12:51:07 -04:00
|
|
|
struct freq_ctr sess_per_sec;
|
2013-10-07 14:01:52 -04:00
|
|
|
struct freq_ctr ssl_per_sec;
|
2014-05-28 06:28:58 -04:00
|
|
|
struct freq_ctr ssl_fe_keys_per_sec;
|
|
|
|
|
struct freq_ctr ssl_be_keys_per_sec;
|
2012-11-09 11:05:39 -05:00
|
|
|
struct freq_ctr comp_bps_in; /* bytes per second, before http compression */
|
|
|
|
|
struct freq_ctr comp_bps_out; /* bytes per second, after http compression */
|
2019-05-23 05:39:14 -04:00
|
|
|
struct freq_ctr out_32bps; /* #of 32-byte blocks emitted per second */
|
|
|
|
|
unsigned long long out_bytes; /* total #of bytes emitted */
|
2011-09-07 09:17:21 -04:00
|
|
|
int cps_lim, cps_max;
|
2013-10-07 12:51:07 -04:00
|
|
|
int sps_lim, sps_max;
|
2013-10-07 14:01:52 -04:00
|
|
|
int ssl_lim, ssl_max;
|
2014-05-28 06:28:58 -04:00
|
|
|
int ssl_fe_keys_max, ssl_be_keys_max;
|
2014-05-28 10:47:01 -04:00
|
|
|
unsigned int shctx_lookups, shctx_misses;
|
2012-11-09 11:05:39 -05:00
|
|
|
int comp_rate_lim; /* HTTP compression rate limit */
|
2009-01-18 14:39:42 -05:00
|
|
|
int maxpipes; /* max # of pipes */
|
2006-11-12 17:57:19 -05:00
|
|
|
int maxsock; /* max # of sockets */
|
|
|
|
|
int rlimit_nofile; /* default ulimit-n value : 0=unset */
|
2015-12-14 06:46:07 -05:00
|
|
|
int rlimit_memmax_all; /* default all-process memory limit in megs ; 0=unset */
|
|
|
|
|
int rlimit_memmax; /* default per-process memory limit in megs ; 0=unset */
|
2012-11-20 05:25:20 -05:00
|
|
|
long maxzlibmem; /* max RAM for zlib in bytes */
|
2006-11-12 17:57:19 -05:00
|
|
|
int mode;
|
2014-01-25 05:01:50 -05:00
|
|
|
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
|
2006-11-12 17:57:19 -05:00
|
|
|
int last_checks;
|
2007-10-14 17:40:01 -04:00
|
|
|
int spread_checks;
|
2014-04-25 04:46:47 -04:00
|
|
|
int max_spread_checks;
|
2014-06-27 12:10:07 -04:00
|
|
|
int max_syslog_len;
|
2006-11-12 17:57:19 -05:00
|
|
|
char *chroot;
|
|
|
|
|
char *pidfile;
|
2009-10-02 16:51:14 -04:00
|
|
|
char *node, *desc; /* node name & description */
|
2018-07-13 05:56:34 -04:00
|
|
|
struct buffer log_tag; /* name for syslog */
|
2011-10-12 11:50:54 -04:00
|
|
|
struct list logsrvs;
|
2010-12-29 11:05:48 -05:00
|
|
|
char *log_send_hostname; /* set hostname in syslog header */
|
2015-08-23 03:22:25 -04:00
|
|
|
char *server_state_base; /* path to a directory where server state files can be found */
|
2015-08-23 03:54:31 -04:00
|
|
|
char *server_state_file; /* path to the file where server states are loaded from */
|
2007-06-03 11:16:49 -04:00
|
|
|
struct {
|
|
|
|
|
int maxpollevents; /* max number of poll events at once */
|
2008-01-06 05:22:57 -05:00
|
|
|
int maxaccept; /* max number of consecutive accept() */
|
2009-01-25 09:42:27 -05:00
|
|
|
int options; /* various tuning options */
|
2018-05-24 12:59:04 -04:00
|
|
|
int runqueue_depth;/* max number of tasks to run at once */
|
2009-03-21 15:43:57 -04:00
|
|
|
int recv_enough; /* how many input bytes at once are "enough" */
|
2009-08-17 01:23:33 -04:00
|
|
|
int bufsize; /* buffer size in bytes, defaults to BUFSIZE */
|
|
|
|
|
int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
|
MAJOR: session: only wake up as many sessions as available buffers permit
We've already experimented with three wake up algorithms when releasing
buffers : the first naive one used to wake up far too many sessions,
causing many of them not to get any buffer. The second approach which
was still in use prior to this patch consisted in waking up either 1
or 2 sessions depending on the number of FDs we had released. And this
was still inaccurate. The third one tried to cover the accuracy issues
of the second and took into consideration the number of FDs the sessions
would be willing to use, but most of the time we ended up waking up too
many of them for nothing, or deadlocking by lack of buffers.
This patch completely removes the need to allocate two buffers at once.
Instead it splits allocations into critical and non-critical ones and
implements a reserve in the pool for this. The deadlock situation happens
when all buffers are be allocated for requests pending in a maxconn-limited
server queue, because then there's no more way to allocate buffers for
responses, and these responses are critical to release the servers's
connection in order to release the pending requests. In fact maxconn on
a server creates a dependence between sessions and particularly between
oldest session's responses and latest session's requests. Thus, it is
mandatory to get a free buffer for a response in order to release a
server connection which will permit to release a request buffer.
Since we definitely have non-symmetrical buffers, we need to implement
this logic in the buffer allocation mechanism. What this commit does is
implement a reserve of buffers which can only be allocated for responses
and that will never be allocated for requests. This is made possible by
the requester indicating how much margin it wants to leave after the
allocation succeeds. Thus it is a cooperative allocation mechanism : the
requester (process_session() in general) prefers not to get a buffer in
order to respect other's need for response buffers. The session management
code always knows if a buffer will be used for requests or responses, so
that is not difficult :
- either there's an applet on the initiator side and we really need
the request buffer (since currently the applet is called in the
context of the session)
- or we have a connection and we really need the response buffer (in
order to support building and sending an error message back)
This reserve ensures that we don't take all allocatable buffers for
requests waiting in a queue. The downside is that all the extra buffers
are really allocated to ensure they can be allocated. But with small
values it is not an issue.
With this change, we don't observe any more deadlocks even when running
with maxconn 1 on a server under severely constrained memory conditions.
The code becomes a bit tricky, it relies on the scheduler's run queue to
estimate how many sessions are already expected to run so that it doesn't
wake up everyone with too few resources. A better solution would probably
consist in having two queues, one for urgent requests and one for normal
requests. A failed allocation for a session dealing with an error, a
connection event, or the need for a response (or request when there's an
applet on the left) would go to the urgent request queue, while other
requests would go to the other queue. Urgent requests would be served
from 1 entry in the pool, while the regular ones would be served only
according to the reserve. Despite not yet having this, it works
remarkably well.
This mechanism is quite efficient, we don't perform too many wake up calls
anymore. For 1 million sessions elapsed during massive memory contention,
we observe about 4.5M calls to process_session() compared to 4.0M without
memory constraints. Previously we used to observe up to 16M calls, which
rougly means 12M failures.
During a test run under high memory constraints (limit enforced to 27 MB
instead of the 58 MB normally needed), performance used to drop by 53% prior
to this patch. Now with this patch instead it *increases* by about 1.5%.
The best effect of this change is that by limiting the memory usage to about
2/3 to 3/4 of what is needed by default, it's possible to increase performance
by up to about 18% mainly due to the fact that pools are reused more often
and remain hot in the CPU cache (observed on regular HTTP traffic with 20k
objects, buffers.limit = maxconn/10, buffers.reserve = limit/2).
Below is an example of scenario which used to cause a deadlock previously :
- connection is received
- two buffers are allocated in process_session() then released
- one is allocated when receiving an HTTP request
- the second buffer is allocated then released in process_session()
for request parsing then connection establishment.
- poll() says we can send, so the request buffer is sent and released
- process session gets notified that the connection is now established
and allocates two buffers then releases them
- all other sessions do the same till one cannot get the request buffer
without hitting the margin
- and now the server responds. stream_interface allocates the response
buffer and manages to get it since it's higher priority being for a
response.
- but process_session() cannot allocate the request buffer anymore
=> We could end up with all buffers used by responses so that none may
be allocated for a request in process_session().
When the applet processing leaves the session context, the test will have
to be changed so that we always allocate a response buffer regardless of
the left side (eg: H2->H1 gateway). A final improvement would consists in
being able to only retry the failed I/O operation without waking up a
task, but to date all experiments to achieve this have proven not to be
reliable enough.
2014-11-26 19:11:56 -05:00
|
|
|
int reserved_bufs; /* how many buffers can only be allocated for response */
|
2014-12-23 16:52:37 -05:00
|
|
|
int buf_limit; /* if not null, how many total buffers may only be allocated */
|
2010-01-21 11:43:04 -05:00
|
|
|
int client_sndbuf; /* set client sndbuf to this value if not null */
|
|
|
|
|
int client_rcvbuf; /* set client rcvbuf to this value if not null */
|
|
|
|
|
int server_sndbuf; /* set server sndbuf to this value if not null */
|
|
|
|
|
int server_rcvbuf; /* set server rcvbuf to this value if not null */
|
2010-10-04 14:39:20 -04:00
|
|
|
int chksize; /* check buffer size in bytes, defaults to BUFSIZE */
|
2011-10-23 15:14:29 -04:00
|
|
|
int pipesize; /* pipe size in bytes, system defaults if zero */
|
2011-10-24 13:14:41 -04:00
|
|
|
int max_http_hdr; /* max number of HTTP headers, use MAX_HTTP_HDR if zero */
|
2017-05-18 02:58:41 -04:00
|
|
|
int requri_len; /* max len of request URI, use REQURI_LEN if zero */
|
2012-11-21 18:17:38 -05:00
|
|
|
int cookie_len; /* max length of cookie captures */
|
2015-04-29 10:24:50 -04:00
|
|
|
int pattern_cache; /* max number of entries in the pattern cache. */
|
2012-09-03 06:10:29 -04:00
|
|
|
int sslcachesize; /* SSL cache size in session, defaults to 20000 */
|
2012-11-09 06:33:10 -05:00
|
|
|
int comp_maxlevel; /* max HTTP compression level */
|
MEDIUM: connections: Add a way to control the number of idling connections.
As by default we add all keepalive connections to the idle pool, if we run
into a pathological case, where all client don't do keepalive, but the server
does, and haproxy is configured to only reuse "safe" connections, we will
soon find ourself having lots of idling, unusable for new sessions, connections,
while we won't have any file descriptors available to create new connections.
To fix this, add 2 new global settings, "pool_low_ratio" and "pool_high_ratio".
pool-low-fd-ratio is the % of fds we're allowed to use (against the maximum
number of fds available to haproxy) before we stop adding connections to the
idle pool, and destroy them instead. The default is 20. pool-high-fd-ratio is
the % of fds we're allowed to use (against the maximum number of fds available
to haproxy) before we start killing idling connection in the event we have to
create a new outgoing connection, and no reuse is possible. The default is 25.
2019-04-16 13:07:22 -04:00
|
|
|
int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
|
|
|
|
|
int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
|
|
|
|
|
int pool_low_count; /* max number of opened fd before we stop using new idle connections */
|
|
|
|
|
int pool_high_count; /* max number of opened fd before we start killing idle connections when creating new connections */
|
2014-02-12 10:35:14 -05:00
|
|
|
unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
|
2007-06-03 11:16:49 -04:00
|
|
|
} tune;
|
2010-10-22 11:59:25 -04:00
|
|
|
struct {
|
|
|
|
|
char *prefix; /* path prefix of unix bind socket */
|
|
|
|
|
struct { /* UNIX socket permissions */
|
|
|
|
|
uid_t uid; /* -1 to leave unchanged */
|
|
|
|
|
gid_t gid; /* -1 to leave unchanged */
|
|
|
|
|
mode_t mode; /* 0 to leave unchanged */
|
|
|
|
|
} ux;
|
|
|
|
|
} unix_bind;
|
2018-01-20 12:12:15 -05:00
|
|
|
struct proxy *stats_fe; /* the frontend holding the stats settings */
|
|
|
|
|
struct vars vars; /* list of variables for the process scope. */
|
2012-11-16 10:12:27 -05:00
|
|
|
#ifdef USE_CPU_AFFINITY
|
2017-11-22 10:50:41 -05:00
|
|
|
struct {
|
2019-05-03 03:41:23 -04:00
|
|
|
unsigned long proc[MAX_PROCS]; /* list of CPU masks for the 32/64 first processes */
|
2019-07-16 09:10:34 -04:00
|
|
|
unsigned long proc_t1[MAX_PROCS]; /* list of CPU masks for the 1st thread of each process */
|
|
|
|
|
unsigned long thread[MAX_THREADS]; /* list of CPU masks for the 32/64 first threads of the 1st process */
|
2017-11-22 10:50:41 -05:00
|
|
|
} cpu_map;
|
2012-11-16 10:12:27 -05:00
|
|
|
#endif
|
2006-06-25 20:48:02 -04:00
|
|
|
};
|
|
|
|
|
|
2019-04-12 10:09:21 -04:00
|
|
|
/* options for mworker_proc */
|
|
|
|
|
|
|
|
|
|
#define PROC_O_TYPE_MASTER 0x00000001
|
|
|
|
|
#define PROC_O_TYPE_WORKER 0x00000002
|
|
|
|
|
#define PROC_O_TYPE_PROG 0x00000004
|
|
|
|
|
/* 0x00000008 unused */
|
|
|
|
|
#define PROC_O_LEAVING 0x00000010 /* this process should be leaving */
|
2019-04-12 10:09:22 -04:00
|
|
|
/* 0x00000020 to 0x00000080 unused */
|
|
|
|
|
#define PROC_O_START_RELOAD 0x00000100 /* Start the process even if the master was re-executed */
|
2019-04-12 10:09:21 -04:00
|
|
|
|
2018-10-26 08:47:31 -04:00
|
|
|
/*
|
|
|
|
|
* Structure used to describe the processes in master worker mode
|
|
|
|
|
*/
|
|
|
|
|
struct mworker_proc {
|
|
|
|
|
int pid;
|
2019-04-12 10:09:21 -04:00
|
|
|
int options;
|
2019-04-01 05:30:02 -04:00
|
|
|
char *id;
|
|
|
|
|
char **command;
|
|
|
|
|
char *path;
|
2019-06-12 13:11:33 -04:00
|
|
|
char *version;
|
2018-10-26 08:47:31 -04:00
|
|
|
int ipc_fd[2]; /* 0 is master side, 1 is worker side */
|
|
|
|
|
int relative_pid;
|
|
|
|
|
int reloads;
|
2018-11-19 12:46:17 -05:00
|
|
|
int timestamp;
|
2018-10-26 08:47:38 -04:00
|
|
|
struct server *srv; /* the server entry in the master proxy */
|
2018-10-26 08:47:31 -04:00
|
|
|
struct list list;
|
2019-07-11 23:50:26 -04:00
|
|
|
int uid;
|
|
|
|
|
int gid;
|
2018-10-26 08:47:31 -04:00
|
|
|
};
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
extern struct global global;
|
|
|
|
|
extern int pid; /* current process id */
|
2007-11-04 17:35:08 -05:00
|
|
|
extern int relative_pid; /* process id starting at 1 */
|
2017-11-10 13:08:14 -05:00
|
|
|
extern unsigned long pid_bit; /* bit corresponding to the process id */
|
2019-02-02 11:11:28 -05:00
|
|
|
extern unsigned long all_proc_mask; /* mask of all processes */
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int actconn; /* # of active sessions */
|
2010-08-31 09:39:26 -04:00
|
|
|
extern int listeners;
|
2017-09-15 02:18:11 -04:00
|
|
|
extern int jobs; /* # of active jobs (listeners, sessions, open devices) */
|
2018-11-16 10:57:20 -05:00
|
|
|
extern int unstoppable_jobs; /* # of active jobs that can't be stopped during a soft stop */
|
2018-11-05 10:31:22 -05:00
|
|
|
extern int active_peers; /* # of active peers (connection attempts and successes) */
|
2018-11-05 11:12:27 -05:00
|
|
|
extern int connected_peers; /* # of really connected peers */
|
2018-07-13 05:56:34 -04:00
|
|
|
extern THREAD_LOCAL struct buffer trash;
|
2010-08-25 06:58:59 -04:00
|
|
|
extern int nb_oldpids; /* contains the number of old pids found */
|
2006-06-25 20:48:02 -04:00
|
|
|
extern const int zero;
|
|
|
|
|
extern const int one;
|
2007-10-11 14:48:58 -04:00
|
|
|
extern const struct linger nolinger;
|
2006-06-25 20:48:02 -04:00
|
|
|
extern int stopping; /* non zero means stopping in progress */
|
2019-06-02 05:11:29 -04:00
|
|
|
extern int killed; /* >0 means a hard-stop is triggered, >1 means hard-stop immediately */
|
2009-08-16 04:08:02 -04:00
|
|
|
extern char hostname[MAX_HOSTNAME_LEN];
|
2010-09-23 12:30:22 -04:00
|
|
|
extern char localpeer[MAX_HOSTNAME_LEN];
|
2014-04-28 16:27:06 -04:00
|
|
|
extern unsigned int warned; /* bitfield of a few warnings to emit just once */
|
2018-07-26 11:55:11 -04:00
|
|
|
extern volatile unsigned long sleeping_thread_mask;
|
2018-10-26 08:47:34 -04:00
|
|
|
extern struct list proc_list; /* list of process in mworker mode */
|
2018-11-19 12:46:18 -05:00
|
|
|
extern struct mworker_proc *proc_self; /* process structure of current process */
|
2018-11-27 06:02:38 -05:00
|
|
|
extern int master; /* 1 if in master, 0 otherwise */
|
2019-03-01 04:09:28 -05:00
|
|
|
extern unsigned int rlim_fd_cur_at_boot;
|
|
|
|
|
extern unsigned int rlim_fd_max_at_boot;
|
2019-04-01 05:29:56 -04:00
|
|
|
extern int atexit_flag;
|
2019-05-03 04:16:39 -04:00
|
|
|
|
2014-04-28 16:27:06 -04:00
|
|
|
/* bit values to go with "warned" above */
|
2019-05-14 14:57:57 -04:00
|
|
|
/* unassigned : 0x00000001 (previously: WARN_BLOCK_DEPRECATED) */
|
2015-05-26 06:18:29 -04:00
|
|
|
/* unassigned : 0x00000002 */
|
2019-05-14 14:57:58 -04:00
|
|
|
/* unassigned : 0x00000004 (previously: WARN_REDISPATCH_DEPRECATED) */
|
2019-05-14 14:57:59 -04:00
|
|
|
/* unassigned : 0x00000008 (previously: WARN_CLITO_DEPRECATED) */
|
|
|
|
|
/* unassigned : 0x00000010 (previously: WARN_SRVTO_DEPRECATED) */
|
|
|
|
|
/* unassigned : 0x00000020 (previously: WARN_CONTO_DEPRECATED) */
|
2019-05-14 14:58:00 -04:00
|
|
|
#define WARN_FORCECLOSE_DEPRECATED 0x00000040
|
2014-04-28 16:27:06 -04:00
|
|
|
|
2019-05-22 14:34:35 -04:00
|
|
|
|
2014-04-28 16:27:06 -04:00
|
|
|
/* to be used with warned and WARN_* */
|
|
|
|
|
static inline int already_warned(unsigned int warning)
|
|
|
|
|
{
|
|
|
|
|
if (warned & warning)
|
|
|
|
|
return 1;
|
|
|
|
|
warned |= warning;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2006-06-25 20:48:02 -04:00
|
|
|
|
2019-02-02 11:22:19 -05:00
|
|
|
/* returns a mask if set, otherwise all_proc_mask */
|
|
|
|
|
static inline unsigned long proc_mask(unsigned long mask)
|
|
|
|
|
{
|
|
|
|
|
return mask ? mask : all_proc_mask;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* returns a mask if set, otherwise all_threads_mask */
|
|
|
|
|
static inline unsigned long thread_mask(unsigned long mask)
|
|
|
|
|
{
|
|
|
|
|
return mask ? mask : all_threads_mask;
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-01 05:29:56 -04:00
|
|
|
int tell_old_pids(int sig);
|
|
|
|
|
int delete_oldpid(int pid);
|
|
|
|
|
|
2017-03-23 17:44:13 -04:00
|
|
|
void deinit(void);
|
2016-12-21 12:43:10 -05:00
|
|
|
void hap_register_build_opts(const char *str, int must_free);
|
2016-12-21 13:57:00 -05:00
|
|
|
void hap_register_post_check(int (*fct)());
|
2019-08-12 03:51:07 -04:00
|
|
|
void hap_register_post_proxy_check(int (*fct)(struct proxy *));
|
|
|
|
|
void hap_register_post_server_check(int (*fct)(struct server *));
|
2016-12-21 14:46:26 -05:00
|
|
|
void hap_register_post_deinit(void (*fct)());
|
2019-07-31 02:44:12 -04:00
|
|
|
void hap_register_proxy_deinit(void (*fct)(struct proxy *));
|
|
|
|
|
void hap_register_server_deinit(void (*fct)(struct server *));
|
2016-12-21 12:43:10 -05:00
|
|
|
|
2019-05-22 08:42:12 -04:00
|
|
|
void hap_register_per_thread_alloc(int (*fct)());
|
2017-07-25 10:52:58 -04:00
|
|
|
void hap_register_per_thread_init(int (*fct)());
|
|
|
|
|
void hap_register_per_thread_deinit(void (*fct)());
|
2019-05-22 08:42:12 -04:00
|
|
|
void hap_register_per_thread_free(int (*fct)());
|
2017-07-25 10:52:58 -04:00
|
|
|
|
2018-11-20 11:36:51 -05:00
|
|
|
void mworker_accept_wrapper(int fd);
|
2018-12-14 15:11:31 -05:00
|
|
|
void mworker_reload();
|
2018-11-20 11:36:51 -05:00
|
|
|
|
2018-11-26 04:19:54 -05:00
|
|
|
/* simplified way to declare static build options in a file */
|
|
|
|
|
#define REGISTER_BUILD_OPTS(str) \
|
|
|
|
|
INITCALL2(STG_REGISTER, hap_register_build_opts, (str), 0)
|
|
|
|
|
|
2018-11-26 05:21:50 -05:00
|
|
|
/* simplified way to declare a post-check callback in a file */
|
|
|
|
|
#define REGISTER_POST_CHECK(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_post_check, (fct))
|
|
|
|
|
|
2019-08-12 03:51:07 -04:00
|
|
|
/* simplified way to declare a post-proxy-check callback in a file */
|
|
|
|
|
#define REGISTER_POST_PROXY_CHECK(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_post_proxy_check, (fct))
|
|
|
|
|
|
|
|
|
|
/* simplified way to declare a post-server-check callback in a file */
|
|
|
|
|
#define REGISTER_POST_SERVER_CHECK(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_post_server_check, (fct))
|
|
|
|
|
|
2018-11-26 05:21:50 -05:00
|
|
|
/* simplified way to declare a post-deinit callback in a file */
|
|
|
|
|
#define REGISTER_POST_DEINIT(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
|
|
|
|
|
|
2019-07-31 02:44:12 -04:00
|
|
|
/* simplified way to declare a proxy-deinit callback in a file */
|
|
|
|
|
#define REGISTER_PROXY_DEINIT(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))
|
|
|
|
|
|
|
|
|
|
/* simplified way to declare a proxy-deinit callback in a file */
|
|
|
|
|
#define REGISTER_SERVER_DEINIT(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_server_deinit, (fct))
|
|
|
|
|
|
2019-05-22 08:42:12 -04:00
|
|
|
/* simplified way to declare a per-thread allocation callback in a file */
|
|
|
|
|
#define REGISTER_PER_THREAD_ALLOC(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_per_thread_alloc, (fct))
|
|
|
|
|
|
2018-11-26 05:21:50 -05:00
|
|
|
/* simplified way to declare a per-thread init callback in a file */
|
|
|
|
|
#define REGISTER_PER_THREAD_INIT(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_per_thread_init, (fct))
|
|
|
|
|
|
|
|
|
|
/* simplified way to declare a per-thread deinit callback in a file */
|
|
|
|
|
#define REGISTER_PER_THREAD_DEINIT(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_per_thread_deinit, (fct))
|
|
|
|
|
|
2019-05-22 08:42:12 -04:00
|
|
|
/* simplified way to declare a per-thread free callback in a file */
|
|
|
|
|
#define REGISTER_PER_THREAD_FREE(fct) \
|
|
|
|
|
INITCALL1(STG_REGISTER, hap_register_per_thread_free, (fct))
|
|
|
|
|
|
2006-06-25 20:48:02 -04:00
|
|
|
#endif /* _TYPES_GLOBAL_H */
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Local variables:
|
|
|
|
|
* c-indent-level: 8
|
|
|
|
|
* c-basic-offset: 8
|
|
|
|
|
* End:
|
|
|
|
|
*/
|