haproxy/include/haproxy/thread.h

468 lines
17 KiB
C
Raw Normal View History

/*
* include/haproxy/thread.h
* definitions, macros and inline functions used by threads.
*
* Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
* Copyright (C) 2020 Willy Tarreau - w@1wt.eu
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_THREAD_H
#define _HAPROXY_THREAD_H
#include <haproxy/api.h>
#include <haproxy/thread-t.h>
#include <haproxy/tinfo.h>
/* Note: this file mainly contains 5 sections:
* - a small common part, which also corresponds to the common API
* - one used solely when USE_THREAD is *not* set
* - one used solely when USE_THREAD is set
* - one used solely when USE_THREAD is set WITHOUT debugging
* - one used solely when USE_THREAD is set WITH debugging
*
*/
/* Generic exports */
int parse_nbthread(const char *arg, char **err);
void ha_tkill(unsigned int thr, int sig);
void ha_tkillall(int sig);
void ha_thread_relax(void);
extern int thread_cpus_enabled_at_boot;
#ifndef USE_THREAD
/********************** THREADS DISABLED ************************/
/* Only way found to replace variables with constants that are optimized away
* at build time.
*/
enum { all_threads_mask = 1UL };
enum { threads_harmless_mask = 0 };
MEDIUM: threads: add a stronger thread_isolate_full() call The current principle of running under isolation was made to access sensitive data while being certain that no other thread was using them in parallel, without necessarily having to place locks everywhere. The main use case are "show sess" and "show fd" which run over long chains of pointers. The thread_isolate() call relies on the "harmless" bit that indicates for a given thread that it's not currently doing such sensitive things, which is advertised using thread_harmless_now() and which ends usings thread_harmless_end(), which also waits for possibly concurrent threads to complete their work if they took this opportunity for starting something tricky. As some system calls were notoriously slow (e.g. mmap()), a bunch of thread_harmless_now() / thread_harmless_end() were placed around them to let waiting threads do their work while such other threads were not able to modify memory contents. But this is not sufficient for performing memory modifications. One such example is the server deletion code. By modifying memory, it not only requires that other threads are not playing with it, but are not either in the process of touching it. The fact that a pool_alloc() or pool_free() on some structure may call thread_harmless_now() and let another thread start to release the same object's memory is not acceptable. This patch introduces the concept of "idle threads". Threads entering the polling loop are idle, as well as those that are waiting for all others to become idle via the new function thread_isolate_full(). Once thread_isolate_full() is granted, the thread is not idle anymore, and it is released using thread_release() just like regular isolation. Its users have to keep in mind that across this call nothing is granted as another thread might have performed shared memory modifications. But such users are extremely rare and are actually expecting this from their peers as well. Note that that in case of backport, this patch depends on previous patch: MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
enum { threads_idle_mask = 0 };
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
enum { threads_sync_mask = 0 };
enum { threads_want_rdv_mask = 0 };
enum { tid_bit = 1UL };
enum { tid = 0 };
#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_SKLOCK(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_SKTOWR(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_WRTOSK(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_SKTORD(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_WRTORD(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_SKUNLOCK(lbl,l) do { /* do nothing */ } while(0)
#define HA_RWLOCK_TRYSKLOCK(lbl,l) ({ 0; })
#define HA_RWLOCK_TRYRDTOSK(lbl,l) ({ 0; })
#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
static inline void ha_set_tid(unsigned int tid)
{
ti = &ha_thread_info[tid];
th_ctx = &ha_thread_ctx[tid];
}
MEDIUM: threads: add a stronger thread_isolate_full() call The current principle of running under isolation was made to access sensitive data while being certain that no other thread was using them in parallel, without necessarily having to place locks everywhere. The main use case are "show sess" and "show fd" which run over long chains of pointers. The thread_isolate() call relies on the "harmless" bit that indicates for a given thread that it's not currently doing such sensitive things, which is advertised using thread_harmless_now() and which ends usings thread_harmless_end(), which also waits for possibly concurrent threads to complete their work if they took this opportunity for starting something tricky. As some system calls were notoriously slow (e.g. mmap()), a bunch of thread_harmless_now() / thread_harmless_end() were placed around them to let waiting threads do their work while such other threads were not able to modify memory contents. But this is not sufficient for performing memory modifications. One such example is the server deletion code. By modifying memory, it not only requires that other threads are not playing with it, but are not either in the process of touching it. The fact that a pool_alloc() or pool_free() on some structure may call thread_harmless_now() and let another thread start to release the same object's memory is not acceptable. This patch introduces the concept of "idle threads". Threads entering the polling loop are idle, as well as those that are waiting for all others to become idle via the new function thread_isolate_full(). Once thread_isolate_full() is granted, the thread is not idle anymore, and it is released using thread_release() just like regular isolation. Its users have to keep in mind that across this call nothing is granted as another thread might have performed shared memory modifications. But such users are extremely rare and are actually expecting this from their peers as well. Note that that in case of backport, this patch depends on previous patch: MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
static inline void thread_idle_now()
{
}
static inline void thread_idle_end()
{
}
static inline void thread_harmless_now()
{
}
static inline void thread_harmless_end()
{
}
static inline void thread_isolate()
{
}
MEDIUM: threads: add a stronger thread_isolate_full() call The current principle of running under isolation was made to access sensitive data while being certain that no other thread was using them in parallel, without necessarily having to place locks everywhere. The main use case are "show sess" and "show fd" which run over long chains of pointers. The thread_isolate() call relies on the "harmless" bit that indicates for a given thread that it's not currently doing such sensitive things, which is advertised using thread_harmless_now() and which ends usings thread_harmless_end(), which also waits for possibly concurrent threads to complete their work if they took this opportunity for starting something tricky. As some system calls were notoriously slow (e.g. mmap()), a bunch of thread_harmless_now() / thread_harmless_end() were placed around them to let waiting threads do their work while such other threads were not able to modify memory contents. But this is not sufficient for performing memory modifications. One such example is the server deletion code. By modifying memory, it not only requires that other threads are not playing with it, but are not either in the process of touching it. The fact that a pool_alloc() or pool_free() on some structure may call thread_harmless_now() and let another thread start to release the same object's memory is not acceptable. This patch introduces the concept of "idle threads". Threads entering the polling loop are idle, as well as those that are waiting for all others to become idle via the new function thread_isolate_full(). Once thread_isolate_full() is granted, the thread is not idle anymore, and it is released using thread_release() just like regular isolation. Its users have to keep in mind that across this call nothing is granted as another thread might have performed shared memory modifications. But such users are extremely rare and are actually expecting this from their peers as well. Note that that in case of backport, this patch depends on previous patch: MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
static inline void thread_isolate_full()
{
}
static inline void thread_release()
{
}
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
static inline void thread_sync_release()
{
}
static inline unsigned long thread_isolated()
{
return 1;
}
static inline void setup_extra_threads(void *(*handler)(void *))
{
}
static inline void wait_for_threads_completion()
{
}
static inline void set_thread_cpu_affinity()
{
}
static inline unsigned long long ha_get_pthread_id(unsigned int thr)
{
return 0;
}
#else /* !USE_THREAD */
/********************** THREADS ENABLED ************************/
#include <import/plock.h>
CLEANUP: tree-wide: fix prototypes for functions taking no arguments. "f(void)" is the correct and preferred form for a function taking no argument, while some places use the older "f()". These were reported by clang's -Wmissing-prototypes, for example: src/cpuset.c:111:5: warning: no previous prototype for function 'ha_cpuset_size' [-Wmissing-prototypes] int ha_cpuset_size() include/haproxy/cpuset.h:42:5: note: this declaration is not a prototype; add 'void' to make it a prototype for a zero-parameter function int ha_cpuset_size(); ^ void This aggregate patch fixes this for the following functions: ha_backtrace_to_stderr(), ha_cpuset_size(), ha_panic(), ha_random64(), ha_thread_dump_all_to_trash(), get_exec_path(), check_config_validity(), mworker_child_nb(), mworker_cli_proxy_(create|stop)(), mworker_cleantasks(), mworker_cleanlisteners(), mworker_ext_launch_all(), mworker_reload(), mworker_(env|proc_list)_to_(proc_list|env)(), mworker_(un|)block_signals(), proxy_adjust_all_maxconn(), proxy_destroy_all_defaults(), get_tainted(), pool_total_(allocated|used)(), thread_isolate(_full|)(), thread(_sync|)_release(), thread_harmless_till_end(), thread_cpu_mask_forced(), dequeue_all_listeners(), next_timer_expiry(), wake_expired_tasks(), process_runnable_tasks(), init_acl(), init_buffer(), (de|)init_log_buffers(), (de|)init_pollers(), fork_poller(), pool_destroy_all(), pool_evict_from_local_caches(), pool_total_failures(), dump_pools_to_trash(), cfg_run_diagnostics(), tv_init_(process|thread)_date(), __signal_process_queue(), deinit_signals(), haproxy_unblock_signals()
2021-09-12 06:49:33 -04:00
void thread_harmless_till_end(void);
void thread_isolate(void);
void thread_isolate_full(void);
void thread_release(void);
void thread_sync_release(void);
void ha_spin_init(HA_SPINLOCK_T *l);
void ha_rwlock_init(HA_RWLOCK_T *l);
void setup_extra_threads(void *(*handler)(void *));
void wait_for_threads_completion();
void set_thread_cpu_affinity();
unsigned long long ha_get_pthread_id(unsigned int thr);
extern volatile unsigned long all_threads_mask;
extern volatile unsigned long threads_harmless_mask;
MEDIUM: threads: add a stronger thread_isolate_full() call The current principle of running under isolation was made to access sensitive data while being certain that no other thread was using them in parallel, without necessarily having to place locks everywhere. The main use case are "show sess" and "show fd" which run over long chains of pointers. The thread_isolate() call relies on the "harmless" bit that indicates for a given thread that it's not currently doing such sensitive things, which is advertised using thread_harmless_now() and which ends usings thread_harmless_end(), which also waits for possibly concurrent threads to complete their work if they took this opportunity for starting something tricky. As some system calls were notoriously slow (e.g. mmap()), a bunch of thread_harmless_now() / thread_harmless_end() were placed around them to let waiting threads do their work while such other threads were not able to modify memory contents. But this is not sufficient for performing memory modifications. One such example is the server deletion code. By modifying memory, it not only requires that other threads are not playing with it, but are not either in the process of touching it. The fact that a pool_alloc() or pool_free() on some structure may call thread_harmless_now() and let another thread start to release the same object's memory is not acceptable. This patch introduces the concept of "idle threads". Threads entering the polling loop are idle, as well as those that are waiting for all others to become idle via the new function thread_isolate_full(). Once thread_isolate_full() is granted, the thread is not idle anymore, and it is released using thread_release() just like regular isolation. Its users have to keep in mind that across this call nothing is granted as another thread might have performed shared memory modifications. But such users are extremely rare and are actually expecting this from their peers as well. Note that that in case of backport, this patch depends on previous patch: MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
extern volatile unsigned long threads_idle_mask;
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
extern volatile unsigned long threads_sync_mask;
extern volatile unsigned long threads_want_rdv_mask;
extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
extern THREAD_LOCAL unsigned int tid; /* The thread id */
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
/* explanation for threads_want_rdv_mask, threads_harmless_mask, and
* threads_sync_mask :
* - threads_want_rdv_mask is a bit field indicating all threads that have
* requested a rendez-vous of other threads using thread_isolate().
* - threads_harmless_mask is a bit field indicating all threads that are
* currently harmless in that they promise not to access a shared resource.
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
* - threads_sync_mask is a bit field indicating that a thread waiting for
* others to finish wants to leave synchronized with others and as such
* promises to do so as well using thread_sync_release().
*
* For a given thread, its bits in want_rdv and harmless can be translated like
* this :
*
* ----------+----------+----------------------------------------------------
* want_rdv | harmless | description
* ----------+----------+----------------------------------------------------
* 0 | 0 | thread not interested in RDV, possibly harmful
* 0 | 1 | thread not interested in RDV but harmless
* 1 | 1 | thread interested in RDV and waiting for its turn
* 1 | 0 | thread currently working isolated from others
* ----------+----------+----------------------------------------------------
MEDIUM: threads: add thread_sync_release() to synchronize steps This function provides an alternate way to leave a critical section run under thread_isolate(). Currently, a thread may remain in thread_release() without having the time to notice that the rdv mask was released and taken again by another thread entering thread_isolate() (often the same that just released it). This is because threads wait in harmless mode in the loop, which is compatible with the conditions to enter thread_isolate(). It's not possible to make them wait with the harmless bit off or we cannot know when the job is finished for the next thread to start in thread_isolate(), and if we don't clear the rdv bit when going there, we create another race on the start point of thread_isolate(). This new synchronous variant of thread_release() makes use of an extra mask to indicate the threads that want to be synchronously released. In this case, they will be marked harmless before releasing their sync bit, and will wait for others to release their bit as well, guaranteeing that thread_isolate() cannot be started by any of them before they all left thread_sync_release(). This allows to construct synchronized blocks like this : thread_isolate() /* optionally do something alone here */ thread_sync_release() /* do something together here */ thread_isolate() /* optionally do something alone here */ thread_sync_release() And so on. This is particularly useful during initialization where several steps have to be respected and no thread must start a step before the previous one is completed by other threads. This one must not be placed after any call to thread_release() or it would risk to block an earlier call to thread_isolate() which the current thread managed to leave without waiting for others to complete, and end up here with the thread's harmless bit cleared, blocking others. This might be improved in the future.
2019-06-09 06:20:02 -04:00
*
* thread_sync_mask only delays the leaving of threads_sync_release() to make
* sure that each thread's harmless bit is cleared before leaving the function.
*/
#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
/* sets the thread ID and the TID bit for the current thread */
static inline void ha_set_tid(unsigned int data)
{
tid = data;
tid_bit = (1UL << tid);
ti = &ha_thread_info[tid];
th_ctx = &ha_thread_ctx[tid];
}
MEDIUM: threads: add a stronger thread_isolate_full() call The current principle of running under isolation was made to access sensitive data while being certain that no other thread was using them in parallel, without necessarily having to place locks everywhere. The main use case are "show sess" and "show fd" which run over long chains of pointers. The thread_isolate() call relies on the "harmless" bit that indicates for a given thread that it's not currently doing such sensitive things, which is advertised using thread_harmless_now() and which ends usings thread_harmless_end(), which also waits for possibly concurrent threads to complete their work if they took this opportunity for starting something tricky. As some system calls were notoriously slow (e.g. mmap()), a bunch of thread_harmless_now() / thread_harmless_end() were placed around them to let waiting threads do their work while such other threads were not able to modify memory contents. But this is not sufficient for performing memory modifications. One such example is the server deletion code. By modifying memory, it not only requires that other threads are not playing with it, but are not either in the process of touching it. The fact that a pool_alloc() or pool_free() on some structure may call thread_harmless_now() and let another thread start to release the same object's memory is not acceptable. This patch introduces the concept of "idle threads". Threads entering the polling loop are idle, as well as those that are waiting for all others to become idle via the new function thread_isolate_full(). Once thread_isolate_full() is granted, the thread is not idle anymore, and it is released using thread_release() just like regular isolation. Its users have to keep in mind that across this call nothing is granted as another thread might have performed shared memory modifications. But such users are extremely rare and are actually expecting this from their peers as well. Note that that in case of backport, this patch depends on previous patch: MINOR: threads: make thread_release() not wait for other ones to complete
2021-08-04 05:44:17 -04:00
/* Marks the thread as idle, which means that not only it's not doing anything
* dangerous, but in addition it has not started anything sensitive either.
* This essentially means that the thread currently is in the poller, thus
* outside of any execution block. Needs to be terminated using
* thread_idle_end(). This is needed to release a concurrent call to
* thread_isolate_full().
*/
static inline void thread_idle_now()
{
HA_ATOMIC_OR(&threads_idle_mask, tid_bit);
}
/* Ends the harmless period started by thread_idle_now(), i.e. the thread is
* about to restart engaging in sensitive operations. This must not be done on
* a thread marked harmless, as it could cause a deadlock between another
* thread waiting for idle again and thread_harmless_end() in this thread.
*
* The right sequence is thus:
* thread_idle_now();
* thread_harmless_now();
* poll();
* thread_harmless_end();
* thread_idle_end();
*/
static inline void thread_idle_end()
{
HA_ATOMIC_AND(&threads_idle_mask, ~tid_bit);
}
/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
* not be touching any unprotected shared resource during this period. Usually
* this is called before poll(), but it may also be placed around very slow
* calls (eg: some crypto operations). Needs to be terminated using
* thread_harmless_end().
*/
static inline void thread_harmless_now()
{
HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
}
/* Ends the harmless period started by thread_harmless_now(). Usually this is
* placed after the poll() call. If it is discovered that a job was running and
* is relying on the thread still being harmless, the thread waits for the
* other one to finish.
*/
static inline void thread_harmless_end()
{
while (1) {
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
if (likely((threads_want_rdv_mask & all_threads_mask & ~tid_bit) == 0))
break;
thread_harmless_till_end();
}
}
/* an isolated thread has harmless cleared and want_rdv set */
static inline unsigned long thread_isolated()
{
return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
}
/* Returns 1 if the cpu set is currently restricted for the process else 0.
* Currently only implemented for the Linux platform.
*/
CLEANUP: tree-wide: fix prototypes for functions taking no arguments. "f(void)" is the correct and preferred form for a function taking no argument, while some places use the older "f()". These were reported by clang's -Wmissing-prototypes, for example: src/cpuset.c:111:5: warning: no previous prototype for function 'ha_cpuset_size' [-Wmissing-prototypes] int ha_cpuset_size() include/haproxy/cpuset.h:42:5: note: this declaration is not a prototype; add 'void' to make it a prototype for a zero-parameter function int ha_cpuset_size(); ^ void This aggregate patch fixes this for the following functions: ha_backtrace_to_stderr(), ha_cpuset_size(), ha_panic(), ha_random64(), ha_thread_dump_all_to_trash(), get_exec_path(), check_config_validity(), mworker_child_nb(), mworker_cli_proxy_(create|stop)(), mworker_cleantasks(), mworker_cleanlisteners(), mworker_ext_launch_all(), mworker_reload(), mworker_(env|proc_list)_to_(proc_list|env)(), mworker_(un|)block_signals(), proxy_adjust_all_maxconn(), proxy_destroy_all_defaults(), get_tainted(), pool_total_(allocated|used)(), thread_isolate(_full|)(), thread(_sync|)_release(), thread_harmless_till_end(), thread_cpu_mask_forced(), dequeue_all_listeners(), next_timer_expiry(), wake_expired_tasks(), process_runnable_tasks(), init_acl(), init_buffer(), (de|)init_log_buffers(), (de|)init_pollers(), fork_poller(), pool_destroy_all(), pool_evict_from_local_caches(), pool_total_failures(), dump_pools_to_trash(), cfg_run_diagnostics(), tv_init_(process|thread)_date(), __signal_process_queue(), deinit_signals(), haproxy_unblock_signals()
2021-09-12 06:49:33 -04:00
int thread_cpu_mask_forced(void);
#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
/* Thread debugging is DISABLED, these are the regular locking functions */
#define HA_SPIN_INIT(l) ({ (*l) = 0; })
#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
#define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
#define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
#define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
/* rwlock upgrades via seek locks */
#define HA_RWLOCK_SKLOCK(lbl,l) pl_take_s(l) /* N --> S */
#define HA_RWLOCK_SKTOWR(lbl,l) pl_stow(l) /* S --> W */
#define HA_RWLOCK_WRTOSK(lbl,l) pl_wtos(l) /* W --> S */
#define HA_RWLOCK_SKTORD(lbl,l) pl_stor(l) /* S --> R */
#define HA_RWLOCK_WRTORD(lbl,l) pl_wtor(l) /* W --> R */
#define HA_RWLOCK_SKUNLOCK(lbl,l) pl_drop_s(l) /* S --> N */
#define HA_RWLOCK_TRYSKLOCK(lbl,l) (!pl_try_s(l)) /* N -?> S */
#define HA_RWLOCK_TRYRDTOSK(lbl,l) (!pl_try_rtos(l)) /* R -?> S */
#else /* !defined(DEBUG_THREAD) && !defined(DEBUG_FULL) */
/* Thread debugging is ENABLED, these are the instrumented functions */
#define __SPIN_INIT(l) ({ (*l) = 0; })
#define __SPIN_DESTROY(l) ({ (*l) = 0; })
#define __SPIN_LOCK(l) pl_take_s(l)
#define __SPIN_TRYLOCK(l) (!pl_try_s(l))
#define __SPIN_UNLOCK(l) pl_drop_s(l)
#define __RWLOCK_INIT(l) ({ (*l) = 0; })
#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
#define __RWLOCK_WRLOCK(l) pl_take_w(l)
#define __RWLOCK_TRYWRLOCK(l) (!pl_try_w(l))
#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
#define __RWLOCK_RDLOCK(l) pl_take_r(l)
#define __RWLOCK_TRYRDLOCK(l) (!pl_try_r(l))
#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
/* rwlock upgrades via seek locks */
#define __RWLOCK_SKLOCK(l) pl_take_s(l) /* N --> S */
#define __RWLOCK_SKTOWR(l) pl_stow(l) /* S --> W */
#define __RWLOCK_WRTOSK(l) pl_wtos(l) /* W --> S */
#define __RWLOCK_SKTORD(l) pl_stor(l) /* S --> R */
#define __RWLOCK_WRTORD(l) pl_wtor(l) /* W --> R */
#define __RWLOCK_SKUNLOCK(l) pl_drop_s(l) /* S --> N */
#define __RWLOCK_TRYSKLOCK(l) (!pl_try_s(l)) /* N -?> S */
#define __RWLOCK_TRYRDTOSK(l) (!pl_try_rtos(l)) /* R -?> S */
#define HA_SPIN_INIT(l) __spin_init(l)
#define HA_SPIN_DESTROY(l) __spin_destroy(l)
#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
#define HA_RWLOCK_SKLOCK(lbl,l) __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_SKTOWR(lbl,l) __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_WRTOSK(lbl,l) __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_SKTORD(lbl,l) __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_WRTORD(lbl,l) __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_SKUNLOCK(lbl,l) __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_TRYSKLOCK(lbl,l) __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__)
#define HA_RWLOCK_TRYRDTOSK(lbl,l) __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__)
/* WARNING!!! if you update this enum, please also keep lock_label() up to date
* below.
*/
enum lock_label {
TASK_RQ_LOCK,
TASK_WQ_LOCK,
LISTENER_LOCK,
PROXY_LOCK,
SERVER_LOCK,
LBPRM_LOCK,
SIGNALS_LOCK,
STK_TABLE_LOCK,
STK_SESS_LOCK,
APPLETS_LOCK,
PEER_LOCK,
SHCTX_LOCK,
SSL_LOCK,
SSL_GEN_CERTS_LOCK,
PATREF_LOCK,
PATEXP_LOCK,
VARS_LOCK,
COMP_POOL_LOCK,
LUA_LOCK,
NOTIF_LOCK,
SPOE_APPLET_LOCK,
DNS_LOCK,
PID_LIST_LOCK,
EMAIL_ALERTS_LOCK,
PIPES_LOCK,
TLSKEYS_REF_LOCK,
AUTH_LOCK,
LOGSRV_LOCK,
DICT_LOCK,
PROTO_LOCK,
QUEUE_LOCK,
CKCH_LOCK,
SNI_LOCK,
SSL_SERVER_LOCK,
SFT_LOCK, /* sink forward target */
IDLE_CONNS_LOCK,
QUIC_LOCK,
OTHER_LOCK,
/* WT: make sure never to use these ones outside of development,
* we need them for lock profiling!
*/
DEBUG1_LOCK,
DEBUG2_LOCK,
DEBUG3_LOCK,
DEBUG4_LOCK,
DEBUG5_LOCK,
LOCK_LABELS
};
/* Following functions are used to collect some stats about locks. We wrap
* pthread functions to known how much time we wait in a lock. */
void show_lock_stats();
void __ha_rwlock_init(struct ha_rwlock *l);
void __ha_rwlock_destroy(struct ha_rwlock *l);
void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l);
int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l);
void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l);
void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
const char *func, const char *file, int line);
int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
const char *func, const char *file, int line);
void __spin_init(struct ha_spinlock *l);
void __spin_destroy(struct ha_spinlock *l);
void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
const char *func, const char *file, int line);
int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
const char *func, const char *file, int line);
void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
const char *func, const char *file, int line);
#endif /* DEBUG_THREAD */
#endif /* USE_THREAD */
/* returns a mask if set, otherwise all_threads_mask */
static inline unsigned long thread_mask(unsigned long mask)
{
return mask ? mask : all_threads_mask;
}
#endif /* _HAPROXY_THREAD_H */