2019-04-01 05:29:53 -04:00
/*
* Master Worker
*
* Copyright HAProxy Technologies 2019 - William Lallemand < wlallemand @ haproxy . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
2021-01-20 20:31:46 -05:00
# define _GNU_SOURCE
2019-04-01 05:29:55 -04:00
# include <errno.h>
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
# include <fcntl.h>
2019-04-01 05:29:55 -04:00
# include <signal.h>
2019-04-01 05:29:53 -04:00
# include <stdlib.h>
# include <string.h>
2019-04-01 05:29:56 -04:00
# include <sys/wait.h>
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
# include <unistd.h>
2019-04-01 05:29:53 -04:00
2020-05-27 06:58:42 -04:00
# include <haproxy/api.h>
2020-06-04 18:00:29 -04:00
# include <haproxy/cfgparse.h>
2020-06-04 14:19:54 -04:00
# include <haproxy/cli.h>
2020-06-05 11:27:29 -04:00
# include <haproxy/errors.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/fd.h>
# include <haproxy/global.h>
2020-05-27 12:01:47 -04:00
# include <haproxy/list.h>
2022-09-24 09:44:42 -04:00
# include <haproxy/log.h>
2020-06-04 08:58:24 -04:00
# include <haproxy/listener.h>
2020-06-04 08:07:37 -04:00
# include <haproxy/mworker.h>
2020-06-04 12:38:21 -04:00
# include <haproxy/peers.h>
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
# include <haproxy/proto_sockpair.h>
2021-05-08 14:21:31 -04:00
# include <haproxy/proxy.h>
2022-10-13 11:49:54 -04:00
# include <haproxy/ring.h>
2022-05-27 03:25:10 -04:00
# include <haproxy/sc_strm.h>
2020-06-04 11:37:26 -04:00
# include <haproxy/signal.h>
2022-05-27 03:47:12 -04:00
# include <haproxy/stconn.h>
2020-06-04 17:46:14 -04:00
# include <haproxy/stream.h>
2021-05-08 07:58:19 -04:00
# include <haproxy/tools.h>
2020-05-27 09:59:00 -04:00
# include <haproxy/version.h>
2019-04-01 05:29:53 -04:00
2024-04-03 09:13:00 -04:00
# if defined(USE_SYSTEMD)
# include <haproxy/systemd.h>
# endif
2019-04-01 05:29:53 -04:00
2019-04-01 05:29:56 -04:00
static int exitcode = - 1 ;
2019-05-07 11:49:33 -04:00
static int max_reloads = - 1 ; /* number max of reloads a worker can have until they are killed */
2021-05-08 06:30:50 -04:00
struct mworker_proc * proc_self = NULL ; /* process structure of current process */
2024-06-26 10:21:50 -04:00
struct list mworker_cli_conf = LIST_HEAD_INIT ( mworker_cli_conf ) ; /* master CLI configuration (-S flag) */
2019-04-01 05:29:56 -04:00
/* ----- children processes handling ----- */
/*
* Send signal to every known children .
*/
static void mworker_kill ( int sig )
{
2019-04-01 05:29:59 -04:00
struct mworker_proc * child ;
2019-04-01 05:29:56 -04:00
2019-04-01 05:29:59 -04:00
list_for_each_entry ( child , & proc_list , list ) {
/* careful there, we must be sure that the pid > 0, we don't want to emit a kill -1 */
2019-04-16 11:42:42 -04:00
if ( ( child - > options & ( PROC_O_TYPE_WORKER | PROC_O_TYPE_PROG ) ) & & ( child - > pid > 0 ) )
2019-04-01 05:29:59 -04:00
kill ( child - > pid , sig ) ;
2019-04-01 05:29:56 -04:00
}
}
2019-05-07 11:49:33 -04:00
void mworker_kill_max_reloads ( int sig )
{
struct mworker_proc * child ;
list_for_each_entry ( child , & proc_list , list ) {
if ( max_reloads ! = - 1 & & ( child - > options & PROC_O_TYPE_WORKER ) & &
( child - > pid > 0 ) & & ( child - > reloads > max_reloads ) )
kill ( child - > pid , sig ) ;
}
}
2019-04-01 05:29:56 -04:00
/* return 1 if a pid is a current child otherwise 0 */
2019-04-01 05:29:59 -04:00
int mworker_current_child ( int pid )
2019-04-01 05:29:56 -04:00
{
2019-04-01 05:29:59 -04:00
struct mworker_proc * child ;
2019-04-01 05:29:56 -04:00
2019-04-01 05:29:59 -04:00
list_for_each_entry ( child , & proc_list , list ) {
2019-04-12 10:09:23 -04:00
if ( ( child - > options & ( PROC_O_TYPE_WORKER | PROC_O_TYPE_PROG ) ) & & ( ! ( child - > options & PROC_O_LEAVING ) ) & & ( child - > pid = = pid ) )
2019-04-01 05:29:56 -04:00
return 1 ;
}
return 0 ;
}
2019-04-01 05:29:53 -04:00
2019-04-01 05:29:59 -04:00
/*
* Return the number of new and old children ( including workers and external
* processes )
*/
int mworker_child_nb ( )
{
struct mworker_proc * child ;
int ret = 0 ;
list_for_each_entry ( child , & proc_list , list ) {
2019-04-12 10:09:23 -04:00
if ( child - > options & ( PROC_O_TYPE_WORKER | PROC_O_TYPE_PROG ) )
2019-04-01 05:29:59 -04:00
ret + + ;
}
return ret ;
}
2019-04-01 05:29:53 -04:00
/*
* serialize the proc list and put it in the environment
*/
void mworker_proc_list_to_env ( )
{
char * msg = NULL ;
struct mworker_proc * child ;
2022-07-27 05:57:12 -04:00
int minreloads = INT_MAX ; /* minimum number of reloads to chose which processes are "current" ones */
2019-04-01 05:29:53 -04:00
list_for_each_entry ( child , & proc_list , list ) {
2019-04-12 10:09:23 -04:00
char type = ' ? ' ;
if ( child - > options & PROC_O_TYPE_MASTER )
type = ' m ' ;
else if ( child - > options & PROC_O_TYPE_PROG )
type = ' e ' ;
else if ( child - > options & = PROC_O_TYPE_WORKER )
type = ' w ' ;
2022-07-27 05:57:12 -04:00
if ( child - > reloads < minreloads )
minreloads = child - > reloads ;
2019-04-01 05:29:53 -04:00
if ( child - > pid > - 1 )
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
memprintf ( & msg , " %s|type=%c;fd=%d;cfd=%d;pid=%d;reloads=%d;failedreloads=%d;timestamp=%d;id=%s;version=%s " , msg ? msg : " " , type , child - > ipc_fd [ 0 ] , child - > ipc_fd [ 1 ] , child - > pid , child - > reloads , child - > failedreloads , child - > timestamp , child - > id ? child - > id : " " , child - > version ) ;
2019-04-01 05:29:53 -04:00
}
if ( msg )
setenv ( " HAPROXY_PROCESSES " , msg , 1 ) ;
2022-07-27 05:57:12 -04:00
list_for_each_entry ( child , & proc_list , list ) {
if ( child - > reloads > minreloads & & ! ( child - > options & PROC_O_TYPE_MASTER ) ) {
child - > options | = PROC_O_LEAVING ;
}
}
2019-04-01 05:29:53 -04:00
}
2022-01-28 15:11:41 -05:00
struct mworker_proc * mworker_proc_new ( )
{
struct mworker_proc * child ;
child = calloc ( 1 , sizeof ( * child ) ) ;
if ( ! child )
return NULL ;
child - > failedreloads = 0 ;
child - > reloads = 0 ;
child - > pid = - 1 ;
child - > ipc_fd [ 0 ] = - 1 ;
child - > ipc_fd [ 1 ] = - 1 ;
child - > timestamp = - 1 ;
return child ;
}
2019-04-01 05:29:53 -04:00
/*
* unserialize the proc list from the environment
2023-02-21 06:44:56 -05:00
* Return < 0 upon error .
2019-04-01 05:29:53 -04:00
*/
2021-05-19 04:45:12 -04:00
int mworker_env_to_proc_list ( )
2019-04-01 05:29:53 -04:00
{
2023-02-21 06:44:56 -05:00
char * env , * msg , * omsg = NULL , * token = NULL , * s1 ;
2021-11-10 05:26:14 -05:00
struct mworker_proc * child ;
int minreloads = INT_MAX ; /* minimum number of reloads to chose which processes are "current" ones */
2023-02-21 06:44:56 -05:00
int err = 0 ;
2019-04-01 05:29:53 -04:00
2023-02-21 06:44:56 -05:00
env = getenv ( " HAPROXY_PROCESSES " ) ;
if ( ! env )
2023-02-21 07:17:24 -05:00
goto no_env ;
2019-04-01 05:29:53 -04:00
2023-02-21 06:44:56 -05:00
omsg = msg = strdup ( env ) ;
if ( ! msg ) {
ha_alert ( " Out of memory while trying to allocate a worker process structure. " ) ;
err = - 1 ;
goto out ;
}
2019-04-01 05:29:53 -04:00
while ( ( token = strtok_r ( msg , " | " , & s1 ) ) ) {
char * subtoken = NULL ;
char * s2 ;
msg = NULL ;
2022-01-28 15:11:41 -05:00
child = mworker_proc_new ( ) ;
2021-05-19 04:45:12 -04:00
if ( ! child ) {
2023-02-21 06:44:56 -05:00
ha_alert ( " out of memory while trying to allocate a worker process structure. " ) ;
err = - 1 ;
goto out ;
2021-05-19 04:45:12 -04:00
}
2019-04-01 05:29:53 -04:00
while ( ( subtoken = strtok_r ( token , " ; " , & s2 ) ) ) {
token = NULL ;
if ( strncmp ( subtoken , " type= " , 5 ) = = 0 ) {
2019-04-12 10:09:23 -04:00
char type ;
type = * ( subtoken + 5 ) ;
if ( type = = ' m ' ) { /* we are in the master, assign it */
2019-04-01 05:29:53 -04:00
proc_self = child ;
2019-04-12 10:09:23 -04:00
child - > options | = PROC_O_TYPE_MASTER ;
} else if ( type = = ' e ' ) {
child - > options | = PROC_O_TYPE_PROG ;
} else if ( type = = ' w ' ) {
child - > options | = PROC_O_TYPE_WORKER ;
}
2019-04-01 05:29:53 -04:00
} else if ( strncmp ( subtoken , " fd= " , 3 ) = = 0 ) {
child - > ipc_fd [ 0 ] = atoi ( subtoken + 3 ) ;
2023-06-19 11:12:58 -04:00
if ( child - > ipc_fd [ 0 ] > - 1 )
global . maxsock + + ;
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
} else if ( strncmp ( subtoken , " cfd= " , 4 ) = = 0 ) {
child - > ipc_fd [ 1 ] = atoi ( subtoken + 4 ) ;
2023-06-19 11:12:58 -04:00
if ( child - > ipc_fd [ 1 ] > - 1 )
global . maxsock + + ;
2019-04-01 05:29:53 -04:00
} else if ( strncmp ( subtoken , " pid= " , 4 ) = = 0 ) {
child - > pid = atoi ( subtoken + 4 ) ;
} else if ( strncmp ( subtoken , " reloads= " , 8 ) = = 0 ) {
2021-11-09 12:43:59 -05:00
/* we only increment the number of asked reload */
child - > reloads = atoi ( subtoken + 8 ) ;
2021-11-10 05:26:14 -05:00
if ( child - > reloads < minreloads )
minreloads = child - > reloads ;
2021-11-10 04:49:06 -05:00
} else if ( strncmp ( subtoken , " failedreloads= " , 14 ) = = 0 ) {
child - > failedreloads = atoi ( subtoken + 14 ) ;
2019-04-01 05:29:53 -04:00
} else if ( strncmp ( subtoken , " timestamp= " , 10 ) = = 0 ) {
child - > timestamp = atoi ( subtoken + 10 ) ;
2019-04-01 05:30:02 -04:00
} else if ( strncmp ( subtoken , " id= " , 3 ) = = 0 ) {
child - > id = strdup ( subtoken + 3 ) ;
2019-06-12 13:11:33 -04:00
} else if ( strncmp ( subtoken , " version= " , 8 ) = = 0 ) {
child - > version = strdup ( subtoken + 8 ) ;
2019-04-01 05:29:53 -04:00
}
}
2019-04-01 05:30:02 -04:00
if ( child - > pid ) {
2021-04-21 01:32:39 -04:00
LIST_APPEND ( & proc_list , & child - > list ) ;
2019-04-01 05:30:02 -04:00
} else {
2019-05-16 14:23:22 -04:00
mworker_free_child ( child ) ;
2019-04-01 05:30:02 -04:00
}
2019-04-01 05:29:53 -04:00
}
2021-11-10 05:26:14 -05:00
/* set the leaving processes once we know which number of reloads are the current processes */
list_for_each_entry ( child , & proc_list , list ) {
if ( child - > reloads > minreloads )
child - > options | = PROC_O_LEAVING ;
}
2019-04-01 05:29:53 -04:00
unsetenv ( " HAPROXY_PROCESSES " ) ;
2021-05-19 04:45:12 -04:00
2023-02-21 07:17:24 -05:00
no_env :
if ( ! proc_self ) {
proc_self = mworker_proc_new ( ) ;
if ( ! proc_self ) {
ha_alert ( " Cannot allocate process structures. \n " ) ;
err = - 1 ;
goto out ;
}
proc_self - > options | = PROC_O_TYPE_MASTER ;
proc_self - > pid = pid ;
proc_self - > timestamp = 0 ; /* we don't know the startime anymore */
LIST_APPEND ( & proc_list , & proc_self - > list ) ;
ha_warning ( " The master internals are corrupted or it was started with a too old version (< 1.9). Please restart the master process. \n " ) ;
}
2023-02-21 06:44:56 -05:00
out :
free ( omsg ) ;
return err ;
2019-04-01 05:29:53 -04:00
}
2019-04-01 05:29:54 -04:00
/* Signal blocking and unblocking */
void mworker_block_signals ( )
{
sigset_t set ;
sigemptyset ( & set ) ;
sigaddset ( & set , SIGUSR1 ) ;
sigaddset ( & set , SIGUSR2 ) ;
2019-12-11 08:24:07 -05:00
sigaddset ( & set , SIGTTIN ) ;
sigaddset ( & set , SIGTTOU ) ;
2019-04-01 05:29:54 -04:00
sigaddset ( & set , SIGHUP ) ;
sigaddset ( & set , SIGCHLD ) ;
ha_sigmask ( SIG_SETMASK , & set , NULL ) ;
}
void mworker_unblock_signals ( )
{
haproxy_unblock_signals ( ) ;
}
2019-04-01 05:29:55 -04:00
2019-04-01 05:29:56 -04:00
/* ----- mworker signal handlers ----- */
2019-12-11 08:24:07 -05:00
/* broadcast the configured signal to the workers */
void mworker_broadcast_signal ( struct sig_handler * sh )
{
mworker_kill ( sh - > arg ) ;
}
2019-04-01 05:29:56 -04:00
/*
* When called , this function reexec haproxy with - sf followed by current
* children PIDs and possibly old children PIDs if they didn ' t leave yet .
*/
void mworker_catch_sighup ( struct sig_handler * sh )
{
2023-11-24 15:20:32 -05:00
mworker_reload ( 0 ) ;
2019-04-01 05:29:56 -04:00
}
void mworker_catch_sigterm ( struct sig_handler * sh )
{
int sig = sh - > arg ;
# if defined(USE_SYSTEMD)
if ( global . tune . options & GTUNE_USE_SYSTEMD ) {
sd_notify ( 0 , " STOPPING=1 " ) ;
}
# endif
ha_warning ( " Exiting Master process... \n " ) ;
mworker_kill ( sig ) ;
}
/*
* Wait for every children to exit
*/
void mworker_catch_sigchld ( struct sig_handler * sh )
{
int exitpid = - 1 ;
int status = 0 ;
int childfound ;
restart_wait :
childfound = 0 ;
exitpid = waitpid ( - 1 , & status , WNOHANG ) ;
if ( exitpid > 0 ) {
2019-05-16 14:23:22 -04:00
struct mworker_proc * child , * it ;
2019-04-01 05:29:56 -04:00
if ( WIFEXITED ( status ) )
status = WEXITSTATUS ( status ) ;
else if ( WIFSIGNALED ( status ) )
status = 128 + WTERMSIG ( status ) ;
else if ( WIFSTOPPED ( status ) )
status = 128 + WSTOPSIG ( status ) ;
else
status = 255 ;
2019-04-01 05:29:59 -04:00
/* delete the child from the process list */
2019-04-01 05:29:56 -04:00
list_for_each_entry_safe ( child , it , & proc_list , list ) {
if ( child - > pid ! = exitpid )
continue ;
2021-04-21 01:32:39 -04:00
LIST_DELETE ( & child - > list ) ;
2019-04-01 05:29:56 -04:00
close ( child - > ipc_fd [ 0 ] ) ;
childfound = 1 ;
break ;
}
2019-04-01 05:29:59 -04:00
if ( ! childfound ) {
/* We didn't find the PID in the list, that shouldn't happen but we can emit a warning */
2019-04-01 05:30:02 -04:00
ha_warning ( " Process %d exited with code %d (%s) \n " , exitpid , status , ( status > = 128 ) ? strsignal ( status - 128 ) : " Exit " ) ;
2019-04-01 05:29:56 -04:00
} else {
2019-04-01 05:30:02 -04:00
/* check if exited child is a current child */
2019-04-12 10:09:21 -04:00
if ( ! ( child - > options & PROC_O_LEAVING ) ) {
2020-05-06 11:27:03 -04:00
if ( child - > options & PROC_O_TYPE_WORKER ) {
if ( status < 128 )
2021-11-09 09:25:31 -05:00
ha_warning ( " Current worker (%d) exited with code %d (%s) \n " , exitpid , status , " Exit " ) ;
2020-05-06 11:27:03 -04:00
else
2021-11-09 09:25:31 -05:00
ha_alert ( " Current worker (%d) exited with code %d (%s) \n " , exitpid , status , strsignal ( status - 128 ) ) ;
2020-05-06 11:27:03 -04:00
}
2019-04-12 10:09:23 -04:00
else if ( child - > options & PROC_O_TYPE_PROG )
2019-04-01 05:30:02 -04:00
ha_alert ( " Current program '%s' (%d) exited with code %d (%s) \n " , child - > id , exitpid , status , ( status > = 128 ) ? strsignal ( status - 128 ) : " Exit " ) ;
2019-04-01 05:29:59 -04:00
2023-09-05 09:25:38 -04:00
if ( status ! = 0 & & status ! = 130 & & status ! = 143 ) {
if ( child - > options & PROC_O_TYPE_WORKER ) {
ha_warning ( " A worker process unexpectedly died and this can only be explained by a bug in haproxy or its dependencies. \n Please check that you are running an up to date and maintained version of haproxy and open a bug report. \n " ) ;
display_version ( ) ;
}
if ( ! ( global . tune . options & GTUNE_NOEXIT_ONFAILURE ) ) {
ha_alert ( " exit-on-failure: killing every processes with SIGTERM \n " ) ;
mworker_kill ( SIGTERM ) ;
}
2019-04-01 05:29:56 -04:00
}
2019-04-16 11:42:44 -04:00
/* 0 & SIGTERM (143) are normal, but we should report SIGINT (130) and other signals */
if ( exitcode < 0 & & status ! = 0 & & status ! = 143 )
exitcode = status ;
2019-04-01 05:29:56 -04:00
} else {
2019-04-12 10:09:23 -04:00
if ( child - > options & PROC_O_TYPE_WORKER ) {
2021-11-09 09:25:31 -05:00
ha_warning ( " Former worker (%d) exited with code %d (%s) \n " , exitpid , status , ( status > = 128 ) ? strsignal ( status - 128 ) : " Exit " ) ;
2019-04-01 05:29:59 -04:00
delete_oldpid ( exitpid ) ;
2019-04-12 10:09:23 -04:00
} else if ( child - > options & PROC_O_TYPE_PROG ) {
2019-04-01 05:30:02 -04:00
ha_warning ( " Former program '%s' (%d) exited with code %d (%s) \n " , child - > id , exitpid , status , ( status > = 128 ) ? strsignal ( status - 128 ) : " Exit " ) ;
2019-04-01 05:29:59 -04:00
}
2019-04-01 05:29:56 -04:00
}
2019-05-16 14:23:22 -04:00
mworker_free_child ( child ) ;
child = NULL ;
2019-04-01 05:29:56 -04:00
}
/* do it again to check if it was the last worker */
goto restart_wait ;
}
/* Better rely on the system than on a list of process to check if it was the last one */
else if ( exitpid = = - 1 & & errno = = ECHILD ) {
2019-04-16 11:42:43 -04:00
ha_warning ( " All workers exited. Exiting... (%d) \n " , ( exitcode > 0 ) ? exitcode : EXIT_SUCCESS ) ;
2019-04-01 05:29:56 -04:00
atexit_flag = 0 ;
if ( exitcode > 0 )
2019-04-16 11:42:43 -04:00
exit ( exitcode ) ; /* parent must leave using the status code that provoked the exit */
exit ( EXIT_SUCCESS ) ;
2019-04-01 05:29:56 -04:00
}
}
2019-04-01 05:29:55 -04:00
/* ----- IPC FD (sockpair) related ----- */
/* This wrapper is called from the workers. It is registered instead of the
* normal listener_accept ( ) so the worker can exit ( ) when it detects that the
* master closed the IPC FD . If it ' s not a close , we just call the regular
2020-10-15 15:29:49 -04:00
* listener_accept ( ) function .
*/
2019-04-01 05:29:55 -04:00
void mworker_accept_wrapper ( int fd )
{
char c ;
int ret ;
while ( 1 ) {
ret = recv ( fd , & c , 1 , MSG_PEEK ) ;
if ( ret = = - 1 ) {
if ( errno = = EINTR )
continue ;
2022-04-25 14:32:15 -04:00
if ( errno = = EAGAIN | | errno = = EWOULDBLOCK ) {
2019-04-01 05:29:55 -04:00
fd_cant_recv ( fd ) ;
return ;
}
break ;
} else if ( ret > 0 ) {
2020-10-15 15:29:49 -04:00
struct listener * l = fdtab [ fd ] . owner ;
if ( l )
listener_accept ( l ) ;
2019-04-01 05:29:55 -04:00
return ;
} else if ( ret = = 0 ) {
/* At this step the master is down before
* this worker perform a ' normal ' exit .
* So we want to exit with an error but
* other threads could currently process
* some stuff so we can ' t perform a clean
* deinit ( ) .
*/
exit ( EXIT_FAILURE ) ;
}
}
return ;
}
/*
2019-05-20 05:12:15 -04:00
* This function registers the accept wrapper for the sockpair of the master
* worker . It ' s only handled by worker thread # 0. Other threads and master do
* nothing here . It always returns 1 ( success ) .
2019-04-01 05:29:55 -04:00
*/
2022-07-05 03:04:03 -04:00
static int mworker_sockpair_register_per_thread ( )
2019-04-01 05:29:55 -04:00
{
2019-05-20 05:12:15 -04:00
if ( ! ( global . mode & MODE_MWORKER ) | | master )
return 1 ;
if ( tid ! = 0 )
return 1 ;
2019-04-01 05:29:55 -04:00
2023-02-21 07:41:24 -05:00
if ( proc_self - > ipc_fd [ 1 ] < 0 ) /* proc_self was incomplete and we can't find the socketpair */
return 1 ;
2022-04-26 04:24:14 -04:00
fd_set_nonblock ( proc_self - > ipc_fd [ 1 ] ) ;
2022-07-05 03:04:03 -04:00
/* register the wrapper to handle read 0 when the master exits */
2022-07-04 18:55:09 -04:00
fdtab [ proc_self - > ipc_fd [ 1 ] ] . iocb = mworker_accept_wrapper ;
2019-04-01 05:29:55 -04:00
fd_want_recv ( proc_self - > ipc_fd [ 1 ] ) ;
2019-05-20 05:12:15 -04:00
return 1 ;
2019-04-01 05:29:55 -04:00
}
2019-04-01 05:29:57 -04:00
2022-07-05 03:04:03 -04:00
REGISTER_PER_THREAD_INIT ( mworker_sockpair_register_per_thread ) ;
2019-05-20 05:12:15 -04:00
2019-04-01 05:29:57 -04:00
/* ----- proxies ----- */
/*
* Upon a reload , the master worker needs to close all listeners FDs but the mworker_pipe
* fd , and the FD provided by fd @
*/
void mworker_cleanlisteners ( )
{
struct listener * l , * l_next ;
struct proxy * curproxy ;
struct peers * curpeers ;
2022-11-23 13:56:35 -05:00
/* peers proxies cleanup */
2019-04-01 05:29:57 -04:00
for ( curpeers = cfg_peers ; curpeers ; curpeers = curpeers - > next ) {
if ( ! curpeers - > peers_fe )
continue ;
stop_proxy ( curpeers - > peers_fe ) ;
/* disable this peer section so that it kills itself */
2022-12-07 09:21:24 -05:00
if ( curpeers - > sighandler )
signal_unregister_handler ( curpeers - > sighandler ) ;
2023-04-22 11:47:34 -04:00
task_destroy ( curpeers - > sync_task ) ;
2019-04-01 05:29:57 -04:00
curpeers - > sync_task = NULL ;
curpeers - > peers_fe = NULL ;
}
2022-11-23 13:56:35 -05:00
/* main proxies cleanup */
2019-04-01 05:29:57 -04:00
for ( curproxy = proxies_list ; curproxy ; curproxy = curproxy - > next ) {
int listen_in_master = 0 ;
list_for_each_entry_safe ( l , l_next , & curproxy - > conf . listeners , by_fe ) {
/* remove the listener, but not those we need in the master... */
2020-10-09 10:11:46 -04:00
if ( ! ( l - > rx . flags & RX_F_MWORKER ) ) {
2020-10-09 09:55:23 -04:00
unbind_listener ( l ) ;
2019-04-01 05:29:57 -04:00
delete_listener ( l ) ;
} else {
listen_in_master = 1 ;
}
}
/* if the proxy shouldn't be in the master, we stop it */
if ( ! listen_in_master )
2021-10-06 08:24:19 -04:00
curproxy - > flags | = PR_FL_DISABLED ;
2019-04-01 05:29:57 -04:00
}
}
2019-04-01 05:30:01 -04:00
2022-01-28 15:17:30 -05:00
/* Upon a configuration loading error some mworker_proc and FDs/server were
* assigned but the worker was never forked , we must close the FDs and
* remove the server
*/
void mworker_cleanup_proc ( )
{
struct mworker_proc * child , * it ;
list_for_each_entry_safe ( child , it , & proc_list , list ) {
if ( child - > pid = = - 1 ) {
2023-06-21 03:44:18 -04:00
/* Close the socketpairs. */
2022-01-28 15:17:30 -05:00
if ( child - > ipc_fd [ 0 ] > - 1 )
close ( child - > ipc_fd [ 0 ] ) ;
2023-06-21 03:44:18 -04:00
if ( child - > ipc_fd [ 1 ] > - 1 )
close ( child - > ipc_fd [ 1 ] ) ;
2022-01-28 15:17:30 -05:00
if ( child - > srv ) {
/* only exists if we created a master CLI listener */
srv_drop ( child - > srv ) ;
}
LIST_DELETE ( & child - > list ) ;
mworker_free_child ( child ) ;
}
}
}
2019-04-01 05:30:01 -04:00
/* Displays workers and processes */
static int cli_io_handler_show_proc ( struct appctx * appctx )
{
struct mworker_proc * child ;
int old = 0 ;
2023-02-17 10:23:52 -05:00
int up = date . tv_sec - proc_self - > timestamp ;
2019-06-12 12:21:17 -04:00
char * uptime = NULL ;
2021-11-10 04:49:06 -05:00
char * reloadtxt = NULL ;
2019-04-01 05:30:01 -04:00
2023-02-17 10:23:52 -05:00
if ( up < 0 ) /* must never be negative because of clock drift */
up = 0 ;
2019-04-01 05:30:01 -04:00
chunk_reset ( & trash ) ;
2021-11-10 04:49:06 -05:00
memprintf ( & reloadtxt , " %d [failed: %d] " , proc_self - > reloads , proc_self - > failedreloads ) ;
2021-11-09 09:25:31 -05:00
chunk_printf ( & trash , " #%-14s %-15s %-15s %-15s %-15s \n " , " <PID> " , " <type> " , " <reloads> " , " <uptime> " , " <version> " ) ;
2019-06-12 12:21:17 -04:00
memprintf ( & uptime , " %dd%02dh%02dm%02ds " , up / 86400 , ( up % 86400 ) / 3600 , ( up % 3600 ) / 60 , ( up % 60 ) ) ;
2021-11-10 04:49:06 -05:00
chunk_appendf ( & trash , " %-15u %-15s %-15s %-15s %-15s \n " , ( unsigned int ) getpid ( ) , " master " , reloadtxt , uptime , haproxy_version ) ;
ha_free ( & reloadtxt ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & uptime ) ;
2019-04-01 05:30:01 -04:00
/* displays current processes */
chunk_appendf ( & trash , " # workers \n " ) ;
list_for_each_entry ( child , & proc_list , list ) {
2023-02-17 10:23:52 -05:00
up = date . tv_sec - child - > timestamp ;
if ( up < 0 ) /* must never be negative because of clock drift */
up = 0 ;
2019-04-01 05:30:01 -04:00
2019-04-12 10:09:23 -04:00
if ( ! ( child - > options & PROC_O_TYPE_WORKER ) )
2019-04-01 05:30:01 -04:00
continue ;
2019-04-12 10:09:21 -04:00
if ( child - > options & PROC_O_LEAVING ) {
2019-04-01 05:30:01 -04:00
old + + ;
continue ;
}
2019-06-12 12:21:17 -04:00
memprintf ( & uptime , " %dd%02dh%02dm%02ds " , up / 86400 , ( up % 86400 ) / 3600 , ( up % 3600 ) / 60 , ( up % 60 ) ) ;
2021-11-09 09:25:31 -05:00
chunk_appendf ( & trash , " %-15u %-15s %-15d %-15s %-15s \n " , child - > pid , " worker " , child - > reloads , uptime , child - > version ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & uptime ) ;
2019-04-01 05:30:01 -04:00
}
/* displays old processes */
if ( old ) {
char * msg = NULL ;
chunk_appendf ( & trash , " # old workers \n " ) ;
list_for_each_entry ( child , & proc_list , list ) {
2023-02-17 10:23:52 -05:00
up = date . tv_sec - child - > timestamp ;
if ( up < = 0 ) /* must never be negative because of clock drift */
up = 0 ;
2019-04-01 05:30:01 -04:00
2019-04-12 10:09:23 -04:00
if ( ! ( child - > options & PROC_O_TYPE_WORKER ) )
2019-04-01 05:30:01 -04:00
continue ;
2019-04-12 10:09:21 -04:00
if ( child - > options & PROC_O_LEAVING ) {
2019-06-12 12:21:17 -04:00
memprintf ( & uptime , " %dd%02dh%02dm%02ds " , up / 86400 , ( up % 86400 ) / 3600 , ( up % 3600 ) / 60 , ( up % 60 ) ) ;
2021-11-09 09:25:31 -05:00
chunk_appendf ( & trash , " %-15u %-15s %-15d %-15s %-15s \n " , child - > pid , " worker " , child - > reloads , uptime , child - > version ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & uptime ) ;
2019-04-01 05:30:01 -04:00
}
}
free ( msg ) ;
}
2019-04-01 05:30:03 -04:00
/* displays external process */
chunk_appendf ( & trash , " # programs \n " ) ;
old = 0 ;
list_for_each_entry ( child , & proc_list , list ) {
2023-02-17 10:23:52 -05:00
up = date . tv_sec - child - > timestamp ;
if ( up < 0 ) /* must never be negative because of clock drift */
up = 0 ;
2019-04-01 05:30:03 -04:00
2019-04-12 10:09:23 -04:00
if ( ! ( child - > options & PROC_O_TYPE_PROG ) )
2019-04-01 05:30:03 -04:00
continue ;
2019-04-12 10:09:21 -04:00
if ( child - > options & PROC_O_LEAVING ) {
2019-04-01 05:30:03 -04:00
old + + ;
continue ;
}
2019-06-12 12:21:17 -04:00
memprintf ( & uptime , " %dd%02dh%02dm%02ds " , up / 86400 , ( up % 86400 ) / 3600 , ( up % 3600 ) / 60 , ( up % 60 ) ) ;
2021-11-09 09:25:31 -05:00
chunk_appendf ( & trash , " %-15u %-15s %-15d %-15s %-15s \n " , child - > pid , child - > id , child - > reloads , uptime , " - " ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & uptime ) ;
2019-04-01 05:30:03 -04:00
}
if ( old ) {
chunk_appendf ( & trash , " # old programs \n " ) ;
list_for_each_entry ( child , & proc_list , list ) {
2023-02-17 10:23:52 -05:00
up = date . tv_sec - child - > timestamp ;
if ( up < 0 ) /* must never be negative because of clock drift */
up = 0 ;
2019-04-01 05:30:03 -04:00
2019-04-12 10:09:23 -04:00
if ( ! ( child - > options & PROC_O_TYPE_PROG ) )
2019-04-01 05:30:03 -04:00
continue ;
2019-04-12 10:09:21 -04:00
if ( child - > options & PROC_O_LEAVING ) {
2019-06-12 12:21:17 -04:00
memprintf ( & uptime , " %dd%02dh%02dm%02ds " , up / 86400 , ( up % 86400 ) / 3600 , ( up % 3600 ) / 60 , ( up % 60 ) ) ;
2021-11-09 09:25:31 -05:00
chunk_appendf ( & trash , " %-15u %-15s %-15d %-15s %-15s \n " , child - > pid , child - > id , child - > reloads , uptime , " - " ) ;
2021-02-20 04:46:51 -05:00
ha_free ( & uptime ) ;
2019-04-01 05:30:03 -04:00
}
}
}
2022-05-18 09:07:19 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
2019-04-01 05:30:01 -04:00
return 0 ;
/* dump complete */
return 1 ;
}
/* reload the master process */
static int cli_parse_reload ( char * * args , char * payload , struct appctx * appctx , void * private )
{
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
struct stconn * scb = NULL ;
struct stream * strm = NULL ;
struct connection * conn = NULL ;
int fd = - 1 ;
2023-11-24 15:20:32 -05:00
int hardreload = 0 ;
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
2019-04-01 05:30:01 -04:00
if ( ! cli_has_level ( appctx , ACCESS_LVL_OPER ) )
return 1 ;
2023-11-24 15:20:32 -05:00
/* hard reload requested */
if ( * args [ 0 ] = = ' h ' )
hardreload = 1 ;
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
/* This ask for a synchronous reload, which means we will keep this FD
instead of closing it . */
scb = appctx_sc ( appctx ) ;
if ( scb )
strm = sc_strm ( scb ) ;
if ( strm & & strm - > scf )
conn = sc_conn ( strm - > scf ) ;
if ( conn )
fd = conn_fd ( conn ) ;
/* Send the FD of the current session to the "cli_reload" FD, which won't be polled */
if ( fd ! = - 1 & & send_fd_uxst ( proc_self - > ipc_fd [ 0 ] , fd ) = = 0 ) {
2022-09-23 04:21:32 -04:00
fd_delete ( fd ) ; /* avoid the leak of the FD after sending it via the socketpair */
MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
When using the "reload" command over the master CLI, all connections to
the master CLI were cut, this was unfortunate because it could have been
used to implement a synchronous reload command.
This patch implements an architecture to keep the connection alive after
the reload.
The master CLI is now equipped with a listener which uses a socketpair,
the 2 FDs of this socketpair are stored in the mworker_proc of the
master, which the master keeps via the environment variable.
ipc_fd[1] is used as a listener for the master CLI. During the "reload"
command, the CLI will send the FD of the current session over ipc_fd[0],
then the reload is achieved, so the master won't handle the recv of the
FD. Once reloaded, ipc_fd[1] receives the FD of the session, so the
connection is preserved. Of course it is a new context, so everything
like the "prompt mode" are lost.
Only the FD which performs the reload is kept.
2022-09-22 11:26:23 -04:00
}
2023-11-24 15:20:32 -05:00
mworker_reload ( hardreload ) ;
2019-04-01 05:30:01 -04:00
return 1 ;
}
2022-10-13 11:49:54 -04:00
/* Displays if the current reload failed or succeed.
* If the startup - logs is available , dump it . */
2022-10-21 08:00:05 -04:00
static int cli_io_handler_show_loadstatus ( struct appctx * appctx )
2022-09-24 09:44:42 -04:00
{
char * env ;
if ( ! cli_has_level ( appctx , ACCESS_LVL_OPER ) )
return 1 ;
env = getenv ( " HAPROXY_LOAD_SUCCESS " ) ;
if ( ! env )
return 1 ;
if ( strcmp ( env , " 0 " ) = = 0 ) {
2022-10-13 11:49:54 -04:00
chunk_printf ( & trash , " Success=0 \n " ) ;
2022-09-24 09:44:42 -04:00
} else if ( strcmp ( env , " 1 " ) = = 0 ) {
2022-10-13 11:49:54 -04:00
chunk_printf ( & trash , " Success=1 \n " ) ;
2022-09-24 09:44:42 -04:00
}
2022-10-21 08:03:29 -04:00
# ifdef USE_SHM_OPEN
2024-02-27 12:54:19 -05:00
if ( startup_logs & & ring_data ( startup_logs ) > 1 )
2022-10-13 11:49:54 -04:00
chunk_appendf ( & trash , " -- \n " ) ;
2022-09-24 09:44:42 -04:00
2022-10-13 11:49:54 -04:00
if ( applet_putchk ( appctx , & trash ) = = - 1 )
return 0 ;
2022-09-24 09:44:42 -04:00
2022-10-13 11:49:54 -04:00
if ( startup_logs ) {
appctx - > io_handler = NULL ;
ring_attach_cli ( startup_logs , appctx , 0 ) ;
return 0 ;
}
2022-10-21 08:03:29 -04:00
# else
if ( applet_putchk ( appctx , & trash ) = = - 1 )
return 0 ;
# endif
2022-10-13 11:49:54 -04:00
return 1 ;
}
2019-04-01 05:30:01 -04:00
2019-05-07 11:49:33 -04:00
static int mworker_parse_global_max_reloads ( char * * args , int section_type , struct proxy * curpx ,
2021-03-09 03:53:46 -05:00
const struct proxy * defpx , const char * file , int linenum , char * * err )
2019-05-07 11:49:33 -04:00
{
int err_code = 0 ;
if ( alertif_too_many_args ( 1 , file , linenum , args , & err_code ) )
goto out ;
if ( * ( args [ 1 ] ) = = 0 ) {
memprintf ( err , " %sparsing [%s:%d] : '%s' expects an integer argument. \n " , * err , file , linenum , args [ 0 ] ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
max_reloads = atol ( args [ 1 ] ) ;
if ( max_reloads < 0 ) {
memprintf ( err , " %sparsing [%s:%d] '%s' : invalid value %d, must be >= 0 " , * err , file , linenum , args [ 0 ] , max_reloads ) ;
err_code | = ERR_ALERT | ERR_FATAL ;
goto out ;
}
out :
return err_code ;
}
2019-05-16 14:23:22 -04:00
void mworker_free_child ( struct mworker_proc * child )
{
2022-01-27 09:33:40 -05:00
int i ;
2019-05-16 14:23:22 -04:00
if ( child = = NULL )
return ;
2022-01-27 09:33:40 -05:00
for ( i = 0 ; child - > command & & child - > command [ i ] ; i + + )
ha_free ( & child - > command [ i ] ) ;
2019-06-12 12:21:17 -04:00
2022-01-27 09:33:40 -05:00
ha_free ( & child - > command ) ;
ha_free ( & child - > id ) ;
ha_free ( & child - > version ) ;
2019-05-16 14:23:22 -04:00
free ( child ) ;
}
2019-05-07 11:49:33 -04:00
2024-06-26 10:21:50 -04:00
/* Creates and binds dedicated master CLI 'reload' sockpair and listeners */
void mworker_create_master_cli ( void )
{
struct wordlist * it , * c ;
/* get the info of the children in the env */
if ( mworker_env_to_proc_list ( ) < 0 ) {
exit ( EXIT_FAILURE ) ;
}
if ( ! LIST_ISEMPTY ( & mworker_cli_conf ) ) {
char * path = NULL ;
if ( mworker_cli_proxy_create ( ) < 0 ) {
ha_alert ( " Can't create the master's CLI. \n " ) ;
exit ( EXIT_FAILURE ) ;
}
list_for_each_entry_safe ( c , it , & mworker_cli_conf , list ) {
if ( mworker_cli_proxy_new_listener ( c - > s ) = = NULL ) {
ha_alert ( " Can't create the master's CLI. \n " ) ;
exit ( EXIT_FAILURE ) ;
}
LIST_DELETE ( & c - > list ) ;
free ( c - > s ) ;
free ( c ) ;
}
/* Creates the mcli_reload listener, which is the listener used
* to retrieve the master CLI session which asked for the reload .
*
* ipc_fd [ 1 ] will be used as a listener , and ipc_fd [ 0 ]
* will be used to send the FD of the session .
*
* Both FDs will be kept in the master . The sockets are
* created only if they weren ' t inherited .
*/
if ( ( proc_self - > ipc_fd [ 1 ] = = - 1 ) & &
socketpair ( AF_UNIX , SOCK_STREAM , 0 , proc_self - > ipc_fd ) < 0 ) {
ha_alert ( " Can't create the mcli_reload socketpair. \n " ) ;
exit ( EXIT_FAILURE ) ;
}
/* Create the mcli_reload listener from the proc_self struct */
memprintf ( & path , " sockpair@%d " , proc_self - > ipc_fd [ 1 ] ) ;
mcli_reload_bind_conf = mworker_cli_proxy_new_listener ( path ) ;
if ( mcli_reload_bind_conf = = NULL ) {
ha_alert ( " Can't create the mcli_reload listener. \n " ) ;
exit ( EXIT_FAILURE ) ;
}
ha_free ( & path ) ;
}
}
2019-05-07 11:49:33 -04:00
static struct cfg_kw_list mworker_kws = { { } , {
{ CFG_GLOBAL , " mworker-max-reloads " , mworker_parse_global_max_reloads } ,
{ 0 , NULL , NULL } ,
} } ;
INITCALL1 ( STG_REGISTER , cfg_register_keywords , & mworker_kws ) ;
2019-04-01 05:30:01 -04:00
/* register cli keywords */
static struct cli_kw_list cli_kws = { { } , {
2021-05-09 16:49:44 -04:00
{ { " @<relative pid> " , NULL } , " @<relative pid> : send a command to the <relative pid> process " , NULL , cli_io_handler_show_proc , NULL , NULL , ACCESS_MASTER_ONLY } ,
{ { " @!<pid> " , NULL } , " @!<pid> : send a command to the <pid> process " , cli_parse_default , NULL , NULL , NULL , ACCESS_MASTER_ONLY } ,
{ { " @master " , NULL } , " @master : send a command to the master process " , cli_parse_default , NULL , NULL , NULL , ACCESS_MASTER_ONLY } ,
{ { " show " , " proc " , NULL } , " show proc : show processes status " , cli_parse_default , cli_io_handler_show_proc , NULL , NULL , ACCESS_MASTER_ONLY } ,
2023-11-24 15:20:32 -05:00
{ { " reload " , NULL } , " reload : achieve a soft-reload (-sf) of haproxy " , cli_parse_reload , NULL , NULL , NULL , ACCESS_MASTER_ONLY } ,
{ { " hard-reload " , NULL } , " hard-reload : achieve a hard-reload (-st) of haproxy " , cli_parse_reload , NULL , NULL , NULL , ACCESS_MASTER_ONLY } ,
2022-10-21 08:00:05 -04:00
{ { " _loadstatus " , NULL } , NULL , cli_parse_default , cli_io_handler_show_loadstatus , NULL , NULL , ACCESS_MASTER_ONLY } ,
2019-04-01 05:30:01 -04:00
{ { } , }
} } ;
INITCALL1 ( STG_REGISTER , cli_register_kw , & cli_kws ) ;