2017-10-12 10:09:09 -04:00
|
|
|
/*
|
2020-05-28 09:29:19 -04:00
|
|
|
* include/haproxy/thread.h
|
|
|
|
|
* definitions, macros and inline functions used by threads.
|
2017-10-12 10:09:09 -04:00
|
|
|
*
|
2020-05-28 09:29:19 -04:00
|
|
|
* Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
|
|
|
|
|
* Copyright (C) 2020 Willy Tarreau - w@1wt.eu
|
2017-10-12 10:09:09 -04:00
|
|
|
*
|
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
|
* exclusively.
|
|
|
|
|
*
|
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
|
*/
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#ifndef _HAPROXY_THREAD_H
|
|
|
|
|
#define _HAPROXY_THREAD_H
|
2017-10-12 10:09:09 -04:00
|
|
|
|
2019-05-22 02:43:34 -04:00
|
|
|
#include <signal.h>
|
2019-05-17 10:33:13 -04:00
|
|
|
#include <unistd.h>
|
|
|
|
|
#ifdef _POSIX_PRIORITY_SCHEDULING
|
|
|
|
|
#include <sched.h>
|
|
|
|
|
#endif
|
2020-05-28 09:29:19 -04:00
|
|
|
|
2020-05-27 06:58:42 -04:00
|
|
|
#include <haproxy/api.h>
|
2020-05-28 09:29:19 -04:00
|
|
|
#include <haproxy/thread-t.h>
|
2020-06-29 03:57:23 -04:00
|
|
|
#include <haproxy/tinfo.h>
|
2019-05-17 10:33:13 -04:00
|
|
|
|
2017-10-12 10:09:09 -04:00
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
/* Note: this file mainly contains 5 sections:
|
|
|
|
|
* - a small common part, which also corresponds to the common API
|
|
|
|
|
* - one used solely when USE_THREAD is *not* set
|
|
|
|
|
* - one used solely when USE_THREAD is set
|
|
|
|
|
* - one used solely when USE_THREAD is set WITHOUT debugging
|
|
|
|
|
* - one used solely when USE_THREAD is set WITH debugging
|
|
|
|
|
*
|
2018-07-30 04:34:35 -04:00
|
|
|
*/
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
|
|
|
|
|
/* Generic exports */
|
|
|
|
|
int parse_nbthread(const char *arg, char **err);
|
|
|
|
|
int thread_get_default_count();
|
|
|
|
|
extern int thread_cpus_enabled_at_boot;
|
2019-05-22 01:06:44 -04:00
|
|
|
|
|
|
|
|
|
2017-10-12 10:09:09 -04:00
|
|
|
#ifndef USE_THREAD
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
/********************** THREADS DISABLED ************************/
|
2018-08-01 13:12:20 -04:00
|
|
|
|
|
|
|
|
/* Only way found to replace variables with constants that are optimized away
|
|
|
|
|
* at build time.
|
|
|
|
|
*/
|
|
|
|
|
enum { all_threads_mask = 1UL };
|
2019-05-22 01:48:18 -04:00
|
|
|
enum { threads_harmless_mask = 0 };
|
2019-06-09 06:20:02 -04:00
|
|
|
enum { threads_sync_mask = 0 };
|
2020-05-28 09:29:19 -04:00
|
|
|
enum { threads_want_rdv_mask = 0 };
|
2018-08-01 13:12:20 -04:00
|
|
|
enum { tid_bit = 1UL };
|
|
|
|
|
enum { tid = 0 };
|
2018-01-20 12:19:22 -05:00
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
|
|
|
|
|
#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
2019-05-20 12:57:53 -04:00
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
|
2017-11-07 04:42:54 -05:00
|
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
|
|
|
|
#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
|
2017-11-07 04:42:54 -05:00
|
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
|
2017-10-12 10:09:09 -04:00
|
|
|
|
2018-06-07 05:23:40 -04:00
|
|
|
#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
|
|
|
|
|
|
2018-08-01 13:12:20 -04:00
|
|
|
static inline void ha_set_tid(unsigned int tid)
|
|
|
|
|
{
|
2019-09-13 00:03:12 -04:00
|
|
|
ti = &ha_thread_info[tid];
|
2018-08-01 13:12:20 -04:00
|
|
|
}
|
2018-06-07 05:23:40 -04:00
|
|
|
|
2020-05-01 06:26:03 -04:00
|
|
|
static inline unsigned long long ha_get_pthread_id(unsigned int thr)
|
MINOR: threads: export the POSIX thread ID in panic dumps
It is very difficult to map a panic dump against a gdb thread dump
because the thread numbers do not match. However gdb provides the
pthread ID but this one is supposed to be opaque and not to be cast
to a scalar.
This patch provides a fnuction, ha_get_pthread_id() which retrieves
the pthread ID of the indicated thread and casts it to an unsigned
long long so as to lose the least possible amount of information from
it. This is done cleanly using a union to maintain alignment so as
long as these IDs are stored on 1..8 bytes they will be properly
reported. This ID is now presented in the panic dumps so it now
becomes possible to map these threads. When threads are disabled,
zero is returned. For example, this is a panic dump:
Thread 1 is about to kill the process.
*>Thread 1 : id=0x7fe92b825180 act=0 glob=0 wq=1 rq=0 tl=0 tlsz=0 rqsz=0
stuck=1 prof=0 harmless=0 wantrdv=0
cpu_ns: poll=5119122 now=2009446995 diff=2004327873
curr_task=0xc99bf0 (task) calls=4 last=0
fct=0x592440(task_run_applet) ctx=0xca9c50(<CLI>)
strm=0xc996a0 src=unix fe=GLOBAL be=GLOBAL dst=<CLI>
rqf=848202 rqa=0 rpf=80048202 rpa=0 sif=EST,200008 sib=EST,204018
af=(nil),0 csf=0xc9ba40,8200
ab=0xca9c50,4 csb=(nil),0
cof=0xbf0e50,1300:PASS(0xc9cee0)/RAW((nil))/unix_stream(20)
cob=(nil),0:NONE((nil))/NONE((nil))/NONE(0)
call trace(20):
| 0x59e4cf [48 83 c4 10 5b 5d 41 5c]: wdt_handler+0xff/0x10c
| 0x7fe92c170690 [48 c7 c0 0f 00 00 00 0f]: libpthread:+0x13690
| 0x7ffce29519d9 [48 c1 e2 20 48 09 d0 48]: linux-vdso:+0x9d9
| 0x7ffce2951d54 [eb d9 f3 90 e9 1c ff ff]: linux-vdso:__vdso_gettimeofday+0x104/0x133
| 0x57b484 [48 89 e6 48 8d 7c 24 10]: main+0x157114
| 0x50ee6a [85 c0 75 76 48 8b 55 38]: main+0xeaafa
| 0x50f69c [48 63 54 24 20 85 c0 0f]: main+0xeb32c
| 0x59252c [48 c7 c6 d8 ff ff ff 44]: task_run_applet+0xec/0x88c
Thread 2 : id=0x7fe92b6e6700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=786738 now=1086955 diff=300217
curr_task=0
Thread 3 : id=0x7fe92aee5700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=828056 now=1129738 diff=301682
curr_task=0
Thread 4 : id=0x7fe92a6e4700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=818900 now=1153551 diff=334651
curr_task=0
And this is the gdb output:
(gdb) info thr
Id Target Id Frame
* 1 Thread 0x7fe92b825180 (LWP 15234) 0x00007fe92ba81d6b in raise () from /lib64/libc.so.6
2 Thread 0x7fe92b6e6700 (LWP 15235) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
3 Thread 0x7fe92a6e4700 (LWP 15237) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
4 Thread 0x7fe92aee5700 (LWP 15236) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
We can clearly see that while threads 1 and 2 are the same, gdb's
threads 3 and 4 respectively are haproxy's threads 4 and 3.
This may be backported to 2.0 as it removes some confusion in github issues.
2020-05-01 05:28:49 -04:00
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 10:33:13 -04:00
|
|
|
static inline void ha_thread_relax(void)
|
|
|
|
|
{
|
|
|
|
|
#if _POSIX_PRIORITY_SCHEDULING
|
|
|
|
|
sched_yield();
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-22 02:43:34 -04:00
|
|
|
/* send signal <sig> to thread <thr> */
|
|
|
|
|
static inline void ha_tkill(unsigned int thr, int sig)
|
|
|
|
|
{
|
|
|
|
|
raise(sig);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* send signal <sig> to all threads */
|
|
|
|
|
static inline void ha_tkillall(int sig)
|
|
|
|
|
{
|
|
|
|
|
raise(sig);
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-02 04:16:17 -04:00
|
|
|
static inline void thread_harmless_now()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void thread_harmless_end()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void thread_isolate()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void thread_release()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-09 06:20:02 -04:00
|
|
|
static inline void thread_sync_release()
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-02 04:16:17 -04:00
|
|
|
static inline unsigned long thread_isolated()
|
|
|
|
|
{
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#else /* !USE_THREAD */
|
|
|
|
|
|
|
|
|
|
/********************** THREADS ENABLED ************************/
|
2017-10-12 10:09:09 -04:00
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
#include <import/plock.h>
|
|
|
|
|
|
2018-08-02 04:16:17 -04:00
|
|
|
void thread_harmless_till_end();
|
|
|
|
|
void thread_isolate();
|
|
|
|
|
void thread_release();
|
2019-06-09 06:20:02 -04:00
|
|
|
void thread_sync_release();
|
2019-05-22 02:43:34 -04:00
|
|
|
void ha_tkill(unsigned int thr, int sig);
|
|
|
|
|
void ha_tkillall(int sig);
|
2020-05-28 09:29:19 -04:00
|
|
|
void ha_spin_init(HA_SPINLOCK_T *l);
|
|
|
|
|
void ha_rwlock_init(HA_RWLOCK_T *l);
|
2017-10-19 05:59:15 -04:00
|
|
|
|
2018-07-20 03:31:53 -04:00
|
|
|
extern volatile unsigned long all_threads_mask;
|
2018-08-02 04:16:17 -04:00
|
|
|
extern volatile unsigned long threads_harmless_mask;
|
2019-06-09 06:20:02 -04:00
|
|
|
extern volatile unsigned long threads_sync_mask;
|
2020-05-28 09:29:19 -04:00
|
|
|
extern volatile unsigned long threads_want_rdv_mask;
|
|
|
|
|
extern THREAD_LOCAL unsigned long tid_bit; /* The bit corresponding to the thread id */
|
|
|
|
|
extern THREAD_LOCAL unsigned int tid; /* The thread id */
|
2018-08-02 04:16:17 -04:00
|
|
|
|
2019-06-09 06:20:02 -04:00
|
|
|
/* explanation for threads_want_rdv_mask, threads_harmless_mask, and
|
|
|
|
|
* threads_sync_mask :
|
2018-08-02 04:16:17 -04:00
|
|
|
* - threads_want_rdv_mask is a bit field indicating all threads that have
|
|
|
|
|
* requested a rendez-vous of other threads using thread_isolate().
|
|
|
|
|
* - threads_harmless_mask is a bit field indicating all threads that are
|
|
|
|
|
* currently harmless in that they promise not to access a shared resource.
|
2019-06-09 06:20:02 -04:00
|
|
|
* - threads_sync_mask is a bit field indicating that a thread waiting for
|
|
|
|
|
* others to finish wants to leave synchronized with others and as such
|
|
|
|
|
* promises to do so as well using thread_sync_release().
|
2018-08-02 04:16:17 -04:00
|
|
|
*
|
|
|
|
|
* For a given thread, its bits in want_rdv and harmless can be translated like
|
|
|
|
|
* this :
|
|
|
|
|
*
|
|
|
|
|
* ----------+----------+----------------------------------------------------
|
|
|
|
|
* want_rdv | harmless | description
|
|
|
|
|
* ----------+----------+----------------------------------------------------
|
|
|
|
|
* 0 | 0 | thread not interested in RDV, possibly harmful
|
|
|
|
|
* 0 | 1 | thread not interested in RDV but harmless
|
|
|
|
|
* 1 | 1 | thread interested in RDV and waiting for its turn
|
|
|
|
|
* 1 | 0 | thread currently working isolated from others
|
|
|
|
|
* ----------+----------+----------------------------------------------------
|
2019-06-09 06:20:02 -04:00
|
|
|
*
|
|
|
|
|
* thread_sync_mask only delays the leaving of threads_sync_release() to make
|
|
|
|
|
* sure that each thread's harmless bit is cleared before leaving the function.
|
2018-08-02 04:16:17 -04:00
|
|
|
*/
|
2018-04-25 10:58:25 -04:00
|
|
|
|
2018-06-07 05:23:40 -04:00
|
|
|
#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
|
|
|
|
|
|
2018-08-01 13:12:20 -04:00
|
|
|
/* sets the thread ID and the TID bit for the current thread */
|
|
|
|
|
static inline void ha_set_tid(unsigned int data)
|
|
|
|
|
{
|
|
|
|
|
tid = data;
|
|
|
|
|
tid_bit = (1UL << tid);
|
2019-09-13 00:03:12 -04:00
|
|
|
ti = &ha_thread_info[tid];
|
2018-08-01 13:12:20 -04:00
|
|
|
}
|
|
|
|
|
|
MINOR: threads: export the POSIX thread ID in panic dumps
It is very difficult to map a panic dump against a gdb thread dump
because the thread numbers do not match. However gdb provides the
pthread ID but this one is supposed to be opaque and not to be cast
to a scalar.
This patch provides a fnuction, ha_get_pthread_id() which retrieves
the pthread ID of the indicated thread and casts it to an unsigned
long long so as to lose the least possible amount of information from
it. This is done cleanly using a union to maintain alignment so as
long as these IDs are stored on 1..8 bytes they will be properly
reported. This ID is now presented in the panic dumps so it now
becomes possible to map these threads. When threads are disabled,
zero is returned. For example, this is a panic dump:
Thread 1 is about to kill the process.
*>Thread 1 : id=0x7fe92b825180 act=0 glob=0 wq=1 rq=0 tl=0 tlsz=0 rqsz=0
stuck=1 prof=0 harmless=0 wantrdv=0
cpu_ns: poll=5119122 now=2009446995 diff=2004327873
curr_task=0xc99bf0 (task) calls=4 last=0
fct=0x592440(task_run_applet) ctx=0xca9c50(<CLI>)
strm=0xc996a0 src=unix fe=GLOBAL be=GLOBAL dst=<CLI>
rqf=848202 rqa=0 rpf=80048202 rpa=0 sif=EST,200008 sib=EST,204018
af=(nil),0 csf=0xc9ba40,8200
ab=0xca9c50,4 csb=(nil),0
cof=0xbf0e50,1300:PASS(0xc9cee0)/RAW((nil))/unix_stream(20)
cob=(nil),0:NONE((nil))/NONE((nil))/NONE(0)
call trace(20):
| 0x59e4cf [48 83 c4 10 5b 5d 41 5c]: wdt_handler+0xff/0x10c
| 0x7fe92c170690 [48 c7 c0 0f 00 00 00 0f]: libpthread:+0x13690
| 0x7ffce29519d9 [48 c1 e2 20 48 09 d0 48]: linux-vdso:+0x9d9
| 0x7ffce2951d54 [eb d9 f3 90 e9 1c ff ff]: linux-vdso:__vdso_gettimeofday+0x104/0x133
| 0x57b484 [48 89 e6 48 8d 7c 24 10]: main+0x157114
| 0x50ee6a [85 c0 75 76 48 8b 55 38]: main+0xeaafa
| 0x50f69c [48 63 54 24 20 85 c0 0f]: main+0xeb32c
| 0x59252c [48 c7 c6 d8 ff ff ff 44]: task_run_applet+0xec/0x88c
Thread 2 : id=0x7fe92b6e6700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=786738 now=1086955 diff=300217
curr_task=0
Thread 3 : id=0x7fe92aee5700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=828056 now=1129738 diff=301682
curr_task=0
Thread 4 : id=0x7fe92a6e4700 act=0 glob=0 wq=0 rq=0 tl=0 tlsz=0 rqsz=0
stuck=0 prof=0 harmless=1 wantrdv=0
cpu_ns: poll=818900 now=1153551 diff=334651
curr_task=0
And this is the gdb output:
(gdb) info thr
Id Target Id Frame
* 1 Thread 0x7fe92b825180 (LWP 15234) 0x00007fe92ba81d6b in raise () from /lib64/libc.so.6
2 Thread 0x7fe92b6e6700 (LWP 15235) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
3 Thread 0x7fe92a6e4700 (LWP 15237) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
4 Thread 0x7fe92aee5700 (LWP 15236) 0x00007fe92bb56a56 in epoll_wait () from /lib64/libc.so.6
We can clearly see that while threads 1 and 2 are the same, gdb's
threads 3 and 4 respectively are haproxy's threads 4 and 3.
This may be backported to 2.0 as it removes some confusion in github issues.
2020-05-01 05:28:49 -04:00
|
|
|
/* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
|
|
|
|
|
* since POSIX took great care of not specifying its representation, making it
|
|
|
|
|
* hard to export for post-mortem analysis. For this reason we copy it into a
|
|
|
|
|
* union and will use the smallest scalar type at least as large as its size,
|
|
|
|
|
* which will keep endianness and alignment for all regular sizes. As a last
|
|
|
|
|
* resort we end up with a long long ligned to the first bytes in memory, which
|
|
|
|
|
* will be endian-dependent if pthread_t is larger than a long long (not seen
|
|
|
|
|
* yet).
|
|
|
|
|
*/
|
|
|
|
|
static inline unsigned long long ha_get_pthread_id(unsigned int thr)
|
|
|
|
|
{
|
|
|
|
|
union {
|
|
|
|
|
pthread_t t;
|
|
|
|
|
unsigned long long ll;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
unsigned short s;
|
|
|
|
|
unsigned char c;
|
|
|
|
|
} u;
|
|
|
|
|
|
|
|
|
|
memset(&u, 0, sizeof(u));
|
|
|
|
|
u.t = ha_thread_info[thr].pthread;
|
|
|
|
|
|
|
|
|
|
if (sizeof(u.t) <= sizeof(u.c))
|
|
|
|
|
return u.c;
|
|
|
|
|
else if (sizeof(u.t) <= sizeof(u.s))
|
|
|
|
|
return u.s;
|
|
|
|
|
else if (sizeof(u.t) <= sizeof(u.i))
|
|
|
|
|
return u.i;
|
|
|
|
|
return u.ll;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-17 10:33:13 -04:00
|
|
|
static inline void ha_thread_relax(void)
|
|
|
|
|
{
|
|
|
|
|
#if _POSIX_PRIORITY_SCHEDULING
|
|
|
|
|
sched_yield();
|
|
|
|
|
#else
|
|
|
|
|
pl_cpu_relax();
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-02 04:16:17 -04:00
|
|
|
/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
|
|
|
|
|
* not be touching any unprotected shared resource during this period. Usually
|
|
|
|
|
* this is called before poll(), but it may also be placed around very slow
|
|
|
|
|
* calls (eg: some crypto operations). Needs to be terminated using
|
|
|
|
|
* thread_harmless_end().
|
|
|
|
|
*/
|
|
|
|
|
static inline void thread_harmless_now()
|
|
|
|
|
{
|
|
|
|
|
HA_ATOMIC_OR(&threads_harmless_mask, tid_bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Ends the harmless period started by thread_harmless_now(). Usually this is
|
|
|
|
|
* placed after the poll() call. If it is discovered that a job was running and
|
|
|
|
|
* is relying on the thread still being harmless, the thread waits for the
|
|
|
|
|
* other one to finish.
|
|
|
|
|
*/
|
|
|
|
|
static inline void thread_harmless_end()
|
|
|
|
|
{
|
|
|
|
|
while (1) {
|
|
|
|
|
HA_ATOMIC_AND(&threads_harmless_mask, ~tid_bit);
|
|
|
|
|
if (likely((threads_want_rdv_mask & all_threads_mask) == 0))
|
|
|
|
|
break;
|
|
|
|
|
thread_harmless_till_end();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* an isolated thread has harmless cleared and want_rdv set */
|
|
|
|
|
static inline unsigned long thread_isolated()
|
|
|
|
|
{
|
|
|
|
|
return threads_want_rdv_mask & ~threads_harmless_mask & tid_bit;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-07 05:23:40 -04:00
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
|
|
|
|
|
|
|
|
|
|
/* Thread debugging is DISABLED, these are the regular locking functions */
|
|
|
|
|
|
|
|
|
|
#define HA_SPIN_INIT(l) ({ (*l) = 0; })
|
|
|
|
|
#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
|
|
|
|
|
#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
|
|
|
|
|
|
|
|
|
|
#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
|
|
|
|
|
#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
|
|
|
|
#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
|
|
|
|
|
#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
|
|
|
|
|
|
|
|
|
|
#else /* !defined(DEBUG_THREAD) && !defined(DEBUG_FULL) */
|
|
|
|
|
|
|
|
|
|
/* Thread debugging is ENABLED, these are the instrumented functions */
|
|
|
|
|
|
|
|
|
|
#define __SPIN_INIT(l) ({ (*l) = 0; })
|
|
|
|
|
#define __SPIN_DESTROY(l) ({ (*l) = 0; })
|
|
|
|
|
#define __SPIN_LOCK(l) pl_take_s(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define __SPIN_TRYLOCK(l) (!pl_try_s(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define __SPIN_UNLOCK(l) pl_drop_s(l)
|
|
|
|
|
|
|
|
|
|
#define __RWLOCK_INIT(l) ({ (*l) = 0; })
|
|
|
|
|
#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
|
|
|
|
|
#define __RWLOCK_WRLOCK(l) pl_take_w(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define __RWLOCK_TRYWRLOCK(l) (!pl_try_w(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
|
|
|
|
|
#define __RWLOCK_RDLOCK(l) pl_take_r(l)
|
2020-06-12 05:42:25 -04:00
|
|
|
#define __RWLOCK_TRYRDLOCK(l) (!pl_try_r(l))
|
2020-05-28 09:29:19 -04:00
|
|
|
#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
|
|
|
|
|
|
|
|
|
|
#define HA_SPIN_INIT(l) __spin_init(l)
|
|
|
|
|
#define HA_SPIN_DESTROY(l) __spin_destroy(l)
|
|
|
|
|
|
|
|
|
|
#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
|
|
|
|
|
#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
|
|
|
|
|
#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
|
|
|
|
|
#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
|
|
|
|
|
#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
|
|
|
|
|
#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
|
|
|
|
|
#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
|
2017-10-12 10:09:09 -04:00
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
/* WARNING!!! if you update this enum, please also keep lock_label() up to date
|
|
|
|
|
* below.
|
|
|
|
|
*/
|
2017-10-12 10:09:09 -04:00
|
|
|
enum lock_label {
|
2017-09-27 08:59:38 -04:00
|
|
|
TASK_RQ_LOCK,
|
|
|
|
|
TASK_WQ_LOCK,
|
2017-08-29 03:52:38 -04:00
|
|
|
POOL_LOCK,
|
2017-05-30 09:36:50 -04:00
|
|
|
LISTENER_LOCK,
|
2017-06-02 09:33:24 -04:00
|
|
|
PROXY_LOCK,
|
2017-06-08 08:04:45 -04:00
|
|
|
SERVER_LOCK,
|
2017-06-09 08:17:53 -04:00
|
|
|
LBPRM_LOCK,
|
2017-05-30 09:34:30 -04:00
|
|
|
SIGNALS_LOCK,
|
2017-06-13 13:37:32 -04:00
|
|
|
STK_TABLE_LOCK,
|
|
|
|
|
STK_SESS_LOCK,
|
2017-06-19 06:38:55 -04:00
|
|
|
APPLETS_LOCK,
|
2017-06-19 11:46:37 -04:00
|
|
|
PEER_LOCK,
|
2017-06-30 10:23:45 -04:00
|
|
|
STRMS_LOCK,
|
2017-06-15 10:37:39 -04:00
|
|
|
SSL_LOCK,
|
|
|
|
|
SSL_GEN_CERTS_LOCK,
|
2017-07-03 05:34:05 -04:00
|
|
|
PATREF_LOCK,
|
|
|
|
|
PATEXP_LOCK,
|
2017-07-24 10:30:34 -04:00
|
|
|
VARS_LOCK,
|
2017-07-25 05:07:15 -04:00
|
|
|
COMP_POOL_LOCK,
|
2017-07-12 05:41:21 -04:00
|
|
|
LUA_LOCK,
|
2017-07-16 18:14:07 -04:00
|
|
|
NOTIF_LOCK,
|
2017-09-25 08:48:02 -04:00
|
|
|
SPOE_APPLET_LOCK,
|
2017-10-04 10:17:58 -04:00
|
|
|
DNS_LOCK,
|
2017-10-20 09:40:23 -04:00
|
|
|
PID_LIST_LOCK,
|
2017-10-23 09:54:24 -04:00
|
|
|
EMAIL_ALERTS_LOCK,
|
2017-11-07 05:19:48 -05:00
|
|
|
PIPES_LOCK,
|
2018-02-16 05:23:49 -05:00
|
|
|
TLSKEYS_REF_LOCK,
|
2018-10-29 13:02:54 -04:00
|
|
|
AUTH_LOCK,
|
2019-04-25 01:42:09 -04:00
|
|
|
LOGSRV_LOCK,
|
2019-05-28 08:47:17 -04:00
|
|
|
DICT_LOCK,
|
2019-07-25 01:53:56 -04:00
|
|
|
PROTO_LOCK,
|
2019-09-19 11:12:49 -04:00
|
|
|
CKCH_LOCK,
|
|
|
|
|
SNI_LOCK,
|
2020-05-28 05:13:15 -04:00
|
|
|
SFT_LOCK, /* sink forward target */
|
2019-02-05 08:24:00 -05:00
|
|
|
OTHER_LOCK,
|
2017-10-19 05:59:15 -04:00
|
|
|
LOCK_LABELS
|
2017-10-12 10:09:09 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
extern struct lock_stat lock_stats[LOCK_LABELS];
|
|
|
|
|
|
2018-01-30 05:04:29 -05:00
|
|
|
static inline const char *lock_label(enum lock_label label)
|
|
|
|
|
{
|
|
|
|
|
switch (label) {
|
|
|
|
|
case TASK_RQ_LOCK: return "TASK_RQ";
|
|
|
|
|
case TASK_WQ_LOCK: return "TASK_WQ";
|
|
|
|
|
case POOL_LOCK: return "POOL";
|
|
|
|
|
case LISTENER_LOCK: return "LISTENER";
|
|
|
|
|
case PROXY_LOCK: return "PROXY";
|
|
|
|
|
case SERVER_LOCK: return "SERVER";
|
|
|
|
|
case LBPRM_LOCK: return "LBPRM";
|
|
|
|
|
case SIGNALS_LOCK: return "SIGNALS";
|
|
|
|
|
case STK_TABLE_LOCK: return "STK_TABLE";
|
|
|
|
|
case STK_SESS_LOCK: return "STK_SESS";
|
|
|
|
|
case APPLETS_LOCK: return "APPLETS";
|
|
|
|
|
case PEER_LOCK: return "PEER";
|
|
|
|
|
case STRMS_LOCK: return "STRMS";
|
|
|
|
|
case SSL_LOCK: return "SSL";
|
|
|
|
|
case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
|
|
|
|
|
case PATREF_LOCK: return "PATREF";
|
|
|
|
|
case PATEXP_LOCK: return "PATEXP";
|
|
|
|
|
case VARS_LOCK: return "VARS";
|
|
|
|
|
case COMP_POOL_LOCK: return "COMP_POOL";
|
|
|
|
|
case LUA_LOCK: return "LUA";
|
|
|
|
|
case NOTIF_LOCK: return "NOTIF";
|
|
|
|
|
case SPOE_APPLET_LOCK: return "SPOE_APPLET";
|
|
|
|
|
case DNS_LOCK: return "DNS";
|
|
|
|
|
case PID_LIST_LOCK: return "PID_LIST";
|
|
|
|
|
case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
|
|
|
|
|
case PIPES_LOCK: return "PIPES";
|
2018-02-16 05:23:49 -05:00
|
|
|
case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
|
2018-10-29 13:02:54 -04:00
|
|
|
case AUTH_LOCK: return "AUTH";
|
2019-04-25 01:42:09 -04:00
|
|
|
case LOGSRV_LOCK: return "LOGSRV";
|
2019-05-28 08:47:17 -04:00
|
|
|
case DICT_LOCK: return "DICT";
|
2019-07-25 01:53:56 -04:00
|
|
|
case PROTO_LOCK: return "PROTO";
|
2019-09-19 11:12:49 -04:00
|
|
|
case CKCH_LOCK: return "CKCH";
|
|
|
|
|
case SNI_LOCK: return "SNI";
|
2020-05-28 05:13:15 -04:00
|
|
|
case SFT_LOCK: return "SFT";
|
2019-02-05 08:24:00 -05:00
|
|
|
case OTHER_LOCK: return "OTHER";
|
2018-01-30 05:04:29 -05:00
|
|
|
case LOCK_LABELS: break; /* keep compiler happy */
|
|
|
|
|
};
|
|
|
|
|
/* only way to come here is consecutive to an internal bug */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-12 10:09:09 -04:00
|
|
|
static inline void show_lock_stats()
|
|
|
|
|
{
|
|
|
|
|
int lbl;
|
|
|
|
|
|
|
|
|
|
for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
|
|
|
|
|
fprintf(stderr,
|
|
|
|
|
"Stats about Lock %s: \n"
|
|
|
|
|
"\t # write lock : %lu\n"
|
|
|
|
|
"\t # write unlock: %lu (%ld)\n"
|
|
|
|
|
"\t # wait time for write : %.3f msec\n"
|
|
|
|
|
"\t # wait time for write/lock: %.3f nsec\n"
|
|
|
|
|
"\t # read lock : %lu\n"
|
|
|
|
|
"\t # read unlock : %lu (%ld)\n"
|
|
|
|
|
"\t # wait time for read : %.3f msec\n"
|
|
|
|
|
"\t # wait time for read/lock : %.3f nsec\n",
|
2018-01-30 05:04:29 -05:00
|
|
|
lock_label(lbl),
|
2017-10-12 10:09:09 -04:00
|
|
|
lock_stats[lbl].num_write_locked,
|
|
|
|
|
lock_stats[lbl].num_write_unlocked,
|
|
|
|
|
lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked,
|
|
|
|
|
(double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
|
|
|
|
|
lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0,
|
|
|
|
|
lock_stats[lbl].num_read_locked,
|
|
|
|
|
lock_stats[lbl].num_read_unlocked,
|
|
|
|
|
lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked,
|
|
|
|
|
(double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
|
|
|
|
|
lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Following functions are used to collect some stats about locks. We wrap
|
|
|
|
|
* pthread functions to known how much time we wait in a lock. */
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
static uint64_t nsec_now(void)
|
|
|
|
|
{
|
2017-10-12 10:09:09 -04:00
|
|
|
struct timespec ts;
|
|
|
|
|
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts);
|
|
|
|
|
return ((uint64_t) ts.tv_sec * 1000000000ULL +
|
|
|
|
|
(uint64_t) ts.tv_nsec);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_init(struct ha_rwlock *l)
|
|
|
|
|
{
|
|
|
|
|
memset(l, 0, sizeof(struct ha_rwlock));
|
|
|
|
|
__RWLOCK_INIT(&l->lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_destroy(struct ha_rwlock *l)
|
|
|
|
|
{
|
|
|
|
|
__RWLOCK_DESTROY(&l->lock);
|
|
|
|
|
memset(l, 0, sizeof(struct ha_rwlock));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
uint64_t start_time;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_writer & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for write */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_readers & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for read */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
|
|
|
|
|
|
|
|
|
|
start_time = nsec_now();
|
|
|
|
|
__RWLOCK_WRLOCK(&l->lock);
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
|
|
|
|
|
|
|
|
|
l->info.cur_writer = tid_bit;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
uint64_t start_time;
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_writer & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for write */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_readers & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for read */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We set waiting writer because trywrlock could wait for readers to quit */
|
|
|
|
|
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
|
|
|
|
|
|
|
|
|
|
start_time = nsec_now();
|
|
|
|
|
r = __RWLOCK_TRYWRLOCK(&l->lock);
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
|
|
|
|
if (unlikely(r)) {
|
|
|
|
|
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
|
|
|
|
|
|
|
|
|
l->info.cur_writer = tid_bit;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(!(l->info.cur_writer & tid_bit))) {
|
|
|
|
|
/* the thread is not owning the lock for write */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
l->info.cur_writer = 0;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
|
|
|
|
__RWLOCK_WRUNLOCK(&l->lock);
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
|
|
|
|
|
{
|
|
|
|
|
uint64_t start_time;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_writer & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for write */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_readers & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for read */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
|
|
|
|
|
|
|
|
|
|
start_time = nsec_now();
|
|
|
|
|
__RWLOCK_RDLOCK(&l->lock);
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
|
|
|
|
|
{
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_writer & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for write */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.cur_readers & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock for read */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* try read should never wait */
|
|
|
|
|
r = __RWLOCK_TRYRDLOCK(&l->lock);
|
|
|
|
|
if (unlikely(r))
|
|
|
|
|
return r;
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(!(l->info.cur_readers & tid_bit))) {
|
|
|
|
|
/* the thread is not owning the lock for read */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
|
|
|
|
|
|
|
|
|
|
__RWLOCK_RDUNLOCK(&l->lock);
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __spin_init(struct ha_spinlock *l)
|
|
|
|
|
{
|
|
|
|
|
memset(l, 0, sizeof(struct ha_spinlock));
|
|
|
|
|
__SPIN_INIT(&l->lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __spin_destroy(struct ha_spinlock *l)
|
|
|
|
|
{
|
|
|
|
|
__SPIN_DESTROY(&l->lock);
|
|
|
|
|
memset(l, 0, sizeof(struct ha_spinlock));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
uint64_t start_time;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.owner & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_OR(&l->info.waiters, tid_bit);
|
|
|
|
|
|
|
|
|
|
start_time = nsec_now();
|
|
|
|
|
__SPIN_LOCK(&l->lock);
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
l->info.owner = tid_bit;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
|
|
|
|
HA_ATOMIC_AND(&l->info.waiters, ~tid_bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
|
|
if (unlikely(l->info.owner & tid_bit)) {
|
|
|
|
|
/* the thread is already owning the lock */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* try read should never wait */
|
|
|
|
|
r = __SPIN_TRYLOCK(&l->lock);
|
|
|
|
|
if (unlikely(r))
|
|
|
|
|
return r;
|
|
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
|
|
|
|
|
|
|
|
|
l->info.owner = tid_bit;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
|
|
|
|
|
const char *func, const char *file, int line)
|
|
|
|
|
{
|
|
|
|
|
if (unlikely(!(l->info.owner & tid_bit))) {
|
|
|
|
|
/* the thread is not owning the lock */
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
l->info.owner = 0;
|
|
|
|
|
l->info.last_location.function = func;
|
|
|
|
|
l->info.last_location.file = file;
|
|
|
|
|
l->info.last_location.line = line;
|
|
|
|
|
|
2017-11-02 11:26:02 -04:00
|
|
|
__SPIN_UNLOCK(&l->lock);
|
2017-10-12 10:09:09 -04:00
|
|
|
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* DEBUG_THREAD */
|
|
|
|
|
|
|
|
|
|
#endif /* USE_THREAD */
|
|
|
|
|
|
2020-05-28 09:29:19 -04:00
|
|
|
#endif /* _HAPROXY_THREAD_H */
|