libacfutils
A general purpose library of utility functions designed to make it easier to develop addons for the X-Plane flight simulator.
Loading...
Searching...
No Matches
thread.h
Go to the documentation of this file.
1/*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14*/
15/*
16 * Copyright 2021 Saso Kiselkov. All rights reserved.
17 */
18
19#ifndef _ACF_UTILS_THREAD_H_
20#define _ACF_UTILS_THREAD_H_
21
22#include <errno.h>
23#include <stddef.h>
24#include <stdlib.h>
25#include <string.h>
26
27#if APL || LIN
28#include <pthread.h>
29#include <stdint.h>
30#include <time.h>
31#else /* !APL && !LIN */
32#include <windows.h>
33#endif /* !APL && !LIN */
34
35#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) && \
36 (!defined(__GNUC__) || __GNUC__ >= 7 || defined(__clang__))
37#define _USE_STDATOMICS
38#endif
39
40#ifdef _USE_STDATOMICS
41#include <stdatomic.h>
42#elif APL
43#include <libkern/OSAtomic.h>
44#endif
45
46#if LIN
47#include <sys/syscall.h>
48#include <unistd.h>
49#endif /* LIN */
50
51#include "assert.h"
52#include "helpers.h"
53#include "list.h"
54#include "time.h"
55#include "safe_alloc.h"
56
57#ifdef __cplusplus
58extern "C" {
59#endif
60
171typedef struct {
172 void (*proc)(void *);
173 void *arg;
174 const char *filename;
175 int linenum;
176 list_node_t node;
178
179/*
180 * !!!! CAUTION !!!!
181 * MacOS's kernel API contains a function named `thread_create' that'll
182 * clash with the naming below. To work around this, we declare the
183 * function name to be a macro to our own scoped version. The user must
184 * include /usr/include/mach/task.h before us, otherwise we could be
185 * rewriting the function name in the header too. The flip side is that
186 * the user will get calls to libacfutils' thread_create instead.
187 */
188#if !APL || !defined(_task_user_)
189#define thread_create(thrp, start_proc, arg) \
190 lacf_thread_create((thrp), (start_proc), (arg), \
191 log_basename(__FILE__), __LINE__)
192#endif
193
280#ifdef _USE_STDATOMICS
281#define atomic32_t _Atomic int32_t
282#define atomic_inc_32(x) atomic_fetch_add((x), 1)
283#define atomic_dec_32(x) atomic_fetch_add((x), -1)
284#define atomic_set_32(x, y) atomic_store((x), (y))
285#define atomic_add_32(x, y) atomic_fetch_add((x), (y))
286#define atomic64_t _Atomic int64_t
287#define atomic_inc_64(x) atomic_fetch_add((x), 1)
288#define atomic_dec_64(x) atomic_fetch_add((x), -1)
289#define atomic_set_64(x, y) atomic_store((x), (y))
290#define atomic_add_64(x, y) atomic_fetch_add((x), (y))
291#elif IBM
292#define atomic32_t volatile LONG
293#define atomic_inc_32(x) InterlockedIncrement((x))
294#define atomic_dec_32(x) InterlockedDecrement((x))
295#define atomic_add_32(x, y) InterlockedAdd((x), (y))
296#define atomic_set_32(x, y) InterlockedExchange((x), (y))
297#define atomic64_t volatile LONG64
298#define atomic_inc_64(x) InterlockedIncrement64((x))
299#define atomic_dec_64(x) InterlockedDecrement64((x))
300#define atomic_add_64(x, y) InterlockedAdd64((x), (y))
301#define atomic_set_64(x, y) InterlockedExchange64((x), (y))
302#elif APL
303#define atomic32_t volatile int32_t
304#define atomic_inc_32(x) OSAtomicAdd32(1, (x))
305#define atomic_dec_32(x) OSAtomicAdd32(-1, (x))
306#define atomic_add_32(x, y) OSAtomicAdd32((y), (x))
307#define atomic_set_32(x, y) (x) = (y) /* No such op on OSX */
308#define atomic64_t volatile int64_t
309#define atomic_inc_64(x) OSAtomicAdd64(1, (x))
310#define atomic_dec_64(x) OSAtomicAdd64(-1, (x))
311#define atomic_add_64(x, y) OSAtomicAdd64((y), (x))
312#define atomic_set_64(x, y) (x) = (y) /* No such op on OSX */
313#else /* LIN */
314#define atomic32_t volatile int32_t
315#define atomic_inc_32(x) __sync_add_and_fetch((x), 1)
316#define atomic_dec_32(x) __sync_add_and_fetch((x), -1)
317#define atomic_add_32(x, y) __sync_add_and_fetch((x), (y))
318#define atomic_set_32(x, y) __atomic_store_n((x), (y), __ATOMIC_RELAXED)
319#define atomic64_t volatile int64_t
320#define atomic_inc_64(x) __sync_add_and_fetch((x), 1)
321#define atomic_dec_64(x) __sync_add_and_fetch((x), -1)
322#define atomic_add_64(x, y) __sync_add_and_fetch((x), (y))
323#define atomic_set_64(x, y) __atomic_store_n((x), (y), __ATOMIC_RELAXED)
324#endif /* LIN */
325
326#if APL || LIN
327
328#if APL
329#define thread_t pthread_t
330#define thread_id_t pthread_t
331#define mutex_t pthread_mutex_t
332#define condvar_t pthread_cond_t
333#else /* !APL */
334typedef pthread_t thread_t;
335typedef pthread_t thread_id_t;
336typedef pthread_mutex_t mutex_t;
337typedef pthread_cond_t condvar_t;
338#endif /* !APL */
339#define curthread_id pthread_self()
340#define curthread pthread_self()
341
342static inline bool_t
343thread_equal(thread_id_t t1, thread_id_t t2)
344{
345 return (pthread_equal(t1, t2));
346}
347
348static inline void
350{
351 pthread_mutexattr_t attr;
352 pthread_mutexattr_init(&attr);
353 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
354 pthread_mutex_init(mtx, &attr);
355}
356
357static inline void
359{
360 pthread_mutex_destroy(mtx);
361}
362
363static inline void
365{
366 pthread_mutex_lock(mtx);
367}
368
369static inline void
371{
372 pthread_mutex_unlock(mtx);
373}
374
375#if LIN
376#define VERIFY_MUTEX_HELD(mtx) \
377 VERIFY((mtx)->__data.__owner == syscall(SYS_gettid))
378#define VERIFY_MUTEX_NOT_HELD(mtx) \
379 VERIFY((mtx)->__data.__owner != syscall(SYS_gettid))
380#else /* APL */
381#define VERIFY_MUTEX_HELD(mtx) (void)1
382#define VERIFY_MUTEX_NOT_HELD(mtx) (void)1
383#endif /* APL */
384
385#else /* !APL && !LIN */
386
393typedef HANDLE thread_t;
401typedef DWORD thread_id_t;
413typedef struct {
414 bool_t inited;
415 CRITICAL_SECTION cs;
416} mutex_t;
465typedef CONDITION_VARIABLE condvar_t;
466
467#define curthread_id GetCurrentThreadId()
468#define curthread GetCurrentThread()
469
470/*
471 * Compares thread IDs in a platform-independent way.
472 * @return Returns true if thread ID `tid1` is equal to `tid2`. These
473 * thread IDs are as returned by curthread_id().
474 * @see curthread_id
475 */
476static inline bool_t
477thread_equal(thread_id_t tid1, thread_id_t tid2)
478{
479 return (tid1 == tid2);
480}
481
487static inline void
489{
490 mtx->inited = B_TRUE;
491 InitializeCriticalSection(&mtx->cs);
492}
493
498static inline void
500{
501 if (mtx->inited) {
502 DeleteCriticalSection(&mtx->cs);
503 mtx->inited = B_FALSE;
504 }
505}
506
529static inline void
531{
532 ASSERT(mtx->inited);
533 EnterCriticalSection(&mtx->cs);
534}
535
555static inline void
557{
558 ASSERT(mtx->inited);
559 LeaveCriticalSection(&mtx->cs);
560}
561
562#define VERIFY_MUTEX_HELD(mtx) (void)1
563#define VERIFY_MUTEX_NOT_HELD(mtx) (void)1
564
565#endif /* !APL && !LIN */
566
567/*
568 * This is the thread cleanup tracking machinery.
569 */
570API_EXPORT extern bool_t lacf_thread_list_inited;
571API_EXPORT extern mutex_t lacf_thread_list_lock;
572API_EXPORT extern list_t lacf_thread_list;
573
577static inline void
579{
580 if (!lacf_thread_list_inited) {
581 mutex_init(&lacf_thread_list_lock);
582 list_create(&lacf_thread_list, sizeof (lacf_thread_info_t),
583 offsetof(lacf_thread_info_t, node));
584 lacf_thread_list_inited = B_TRUE;
585 }
586}
587
591static inline void
593{
594 if (lacf_thread_list_inited) {
595 mutex_enter(&lacf_thread_list_lock);
596 if (list_count(&lacf_thread_list) == 0) {
597 list_destroy(&lacf_thread_list);
598 mutex_exit(&lacf_thread_list_lock);
599 mutex_destroy(&lacf_thread_list_lock);
600 lacf_thread_list_inited = B_FALSE;
601 } else {
602 mutex_exit(&lacf_thread_list_lock);
603 }
604 }
605}
606
610static inline void
612{
613 ASSERT(ti != NULL);
614 ASSERT(lacf_thread_list_inited);
615 mutex_enter(&lacf_thread_list_lock);
616 list_insert_tail(&lacf_thread_list, ti);
617 mutex_exit(&lacf_thread_list_lock);
618}
619
623static inline void
625{
626 ASSERT(ti != NULL);
627 ASSERT(lacf_thread_list_inited);
628 mutex_enter(&lacf_thread_list_lock);
629 list_remove(&lacf_thread_list, ti);
630 mutex_exit(&lacf_thread_list_lock);
631}
632
641static inline void
643{
644 if (!lacf_thread_list_inited)
645 return;
646
647 mutex_enter(&lacf_thread_list_lock);
648 for (lacf_thread_info_t *ti =
649 (lacf_thread_info_t *)list_head(&lacf_thread_list); ti != NULL;
650 ti = (lacf_thread_info_t *)list_next(&lacf_thread_list, ti)) {
651 logMsg("Leaked thread, created here %s:%d", ti->filename,
652 ti->linenum);
653 }
654 VERIFY0(list_count(&lacf_thread_list));
655 mutex_exit(&lacf_thread_list_lock);
656
658}
659
660#if APL || LIN
661
662void *lacf_thread_start_routine(void *arg);
663
664WARN_UNUSED_RES_ATTR static inline bool_t
665lacf_thread_create(thread_t *thrp, void (*proc)(void *), void *arg,
666 const char *filename, int linenum)
667{
669 (lacf_thread_info_t *)safe_calloc(1, sizeof (*ti));
670 ti->proc = proc;
671 ti->arg = arg;
672 ti->filename = filename;
673 ti->linenum = linenum;
676 if (pthread_create(thrp, NULL, lacf_thread_start_routine, ti) == 0) {
677 /* Start success */
678 return (B_TRUE);
679 }
680 /* Start failure - remove from list and try to destroy the list */
683 free(ti);
684 return (B_FALSE);
685}
686
687static inline void
689{
690 pthread_join(*thrp, NULL);
692}
693
694#if LIN
695
696static inline void
697thread_set_name(const char *name)
698{
699 pthread_setname_np(pthread_self(), name);
700}
701
702#else /* APL */
703
704static inline void
705thread_set_name(const char *name)
706{
707 pthread_setname_np(name);
708}
709
710#endif /* APL */
711
712static inline void
713cv_wait(condvar_t *cv, mutex_t *mtx)
714{
715 pthread_cond_wait(cv, mtx);
716}
717
718static inline int
719cv_timedwait(condvar_t *cv, mutex_t *mtx, uint64_t limit)
720{
721 struct timespec ts = { .tv_sec = (time_t)(limit / 1000000),
722 .tv_nsec = (long)((limit % 1000000) * 1000) };
723 return (pthread_cond_timedwait(cv, mtx, &ts));
724}
725
726static inline void
728{
729 pthread_cond_init(cv, NULL);
730}
731
732static inline void
734{
735 pthread_cond_destroy(cv);
736}
737
738static inline void
740{
741 pthread_cond_signal(cv);
742}
743
744static inline void
746{
747 pthread_cond_broadcast(cv);
748}
749
750#if !APL
751#define THREAD_PRIO_IDLE sched_get_priority_min()
752#define THREAD_PRIO_VERY_LOW (THREAD_PRIO_NORM - 2)
753#define THREAD_PRIO_LOW (THREAD_PRIO_NORM - 1)
754#define THREAD_PRIO_NORM 0 /* Default priority on Linux */
755#define THREAD_PRIO_HIGH (THREAD_PRIO_NORM + 1)
756#define THREAD_PRIO_VERY_HIGH (THREAD_PRIO_NORM + 2)
757#define THREAD_PRIO_RT sched_get_priority_max()
758static inline void
759thread_set_prio(thread_t thr, int prio)
760{
761 struct sched_param param = {0};
762 param.sched_priority = (prio);
763 pthread_setschedparam(thr, SCHED_OTHER, &param);
764}
765
766#else /* APL */
767/*
768 * BIG CAVEAT: Apparently idle thread prioritization is causing massive
769 * thread scheduling stability issues on MacOS Monterey with its Rosetta
770 * x86 emulation. Threads either don't get scheduled, or they run in
771 * "slow mo", gradually speeding up and generally just behave entirely
772 * erratically.
773 */
774#define THREAD_PRIO_IDLE 0
775#define THREAD_PRIO_VERY_LOW 0
776#define THREAD_PRIO_LOW 0
777#define THREAD_PRIO_NORM 0
778#define THREAD_PRIO_HIGH 0
779#define THREAD_PRIO_VERY_HIGH 0
780#define THREAD_PRIO_RT 0
781static inline void
782thread_set_prio(thread_t thr, int prio)
783{
784 LACF_UNUSED(thr);
785 LACF_UNUSED(prio);
786}
787
788#endif /* APL */
789
790#else /* !APL && !LIN */
791
795DWORD lacf_thread_start_routine(void *arg);
796
802WARN_UNUSED_RES_ATTR static inline bool_t
803lacf_thread_create(thread_t *thrp, void (*proc)(void *), void *arg,
804 const char *filename, int linenum)
805{
807 (lacf_thread_info_t *)safe_calloc(1, sizeof (*ti));
808 ti->proc = proc;
809 ti->arg = arg;
810 ti->filename = filename;
811 ti->linenum = linenum;
814 if ((*(thrp) = CreateThread(NULL, 0, lacf_thread_start_routine, ti,
815 0, NULL)) != NULL) {
816 /* Start success */
817 return (B_TRUE);
818 }
819 /* Start failure - remove from list and try to destroy the list */
822 free(ti);
823 return (B_FALSE);
824}
825
835static inline void
837{
838 VERIFY3S(WaitForSingleObject(*thrp, INFINITE), ==, WAIT_OBJECT_0);
840}
841
851static inline void
852thread_set_name(const char *name)
853{
854 LACF_UNUSED(name);
855}
856
867static inline void
869{
870 VERIFY(SleepConditionVariableCS(cv, &mtx->cs, INFINITE));
871}
872
897static inline int
898cv_timedwait(condvar_t *cv, mutex_t *mtx, uint64_t limit)
899{
900 uint64_t now = microclock();
901 if (now < limit) {
902 /*
903 * The only way to guarantee that when we return due to a
904 * timeout the full microsecond-accurate quantum has elapsed
905 * is to round-up to the nearest millisecond.
906 */
907 if (SleepConditionVariableCS(cv, &mtx->cs,
908 ceil((limit - now) / 1000.0)) != 0) {
909 return (0);
910 }
911 if (GetLastError() == ERROR_TIMEOUT)
912 return (ETIMEDOUT);
913 return (-1);
914 }
915 return (ETIMEDOUT);
916}
917
925static inline void
927{
928 InitializeConditionVariable(cv);
929}
930
935static inline void
937{
938 LACF_UNUSED(cv);
939}
940
947static inline void
949{
950 WakeConditionVariable(cv);
951}
952
959static inline void
961{
962 WakeAllConditionVariable(cv);
963}
964
970#define THREAD_PRIO_IDLE THREAD_PRIORITY_IDLE
975#define THREAD_PRIO_VERY_LOW THREAD_PRIORITY_LOWEST
980#define THREAD_PRIO_LOW THREAD_PRIORITY_BELOW_NORMAL
986#define THREAD_PRIO_NORM THREAD_PRIORITY_NORMAL
991#define THREAD_PRIO_HIGH THREAD_PRIORITY_ABOVE_NORMAL
996#define THREAD_PRIO_VERY_HIGH THREAD_PRIORITY_HIGHEST
1004#define THREAD_PRIO_RT THREAD_PRIORITY_TIME_CRITICAL
1016static inline void
1018{
1019 SetThreadPriority(thr, prio);
1020}
1021
1022#endif /* !APL && !LIN */
1023
1024API_EXPORT void lacf_mask_sigpipe(void);
1025
1041typedef struct {
1042 mutex_t lock;
1043 condvar_t cv;
1044 bool_t write_locked;
1045 thread_id_t writer;
1046 unsigned refcount;
1047 list_t waiters;
1048} rwmutex_t;
1049
1050typedef struct {
1051 bool_t write;
1052 list_node_t node;
1054
1060UNUSED_ATTR static void
1062{
1063 memset(rw, 0, sizeof (*rw));
1064 mutex_init(&rw->lock);
1065 cv_init(&rw->cv);
1066 list_create(&rw->waiters, sizeof (rwlock_waiter_t),
1067 offsetof(rwlock_waiter_t, node));
1068}
1069
1074UNUSED_ATTR static void
1076{
1077 ASSERT3U(rw->refcount, ==, 0);
1078 list_destroy(&rw->waiters);
1079 cv_destroy(&rw->cv);
1080 mutex_destroy(&rw->lock);
1081}
1082
1090static inline bool_t
1092{
1093 return (rw->write_locked && rw->writer == curthread_id);
1094}
1095
1099static inline bool_t
1101{
1102 for (const rwlock_waiter_t *wt = (rwlock_waiter_t *)list_head(
1103 &rw->waiters);;
1104 wt = (rwlock_waiter_t *)list_next(&rw->waiters, wt)) {
1105 /* Our wt_self MUST be somewhere in rw->waiters! */
1106 VERIFY(wt != NULL);
1107 if (wt == wt_self)
1108 return (B_TRUE);
1109 if (wt->write)
1110 return (B_FALSE);
1111 }
1112}
1113
1142UNUSED_ATTR static void
1143rwmutex_enter(rwmutex_t *rw, bool_t write)
1144{
1145 rwlock_waiter_t wt = {0};
1146
1147 wt.write = write;
1148 /*
1149 * No recursion allowed! We can't check for recursive read attempts,
1150 * only write (since readers don't retain any ownership information),
1151 * so it's best to avoid recursion altogether.
1152 */
1153 ASSERT_MSG(!rwmutex_held_write(rw), "%s", "Attempted to recursively "
1154 "acquire an rwmutex_t. This is NOT supported!");
1155
1156 mutex_enter(&rw->lock);
1157 /*
1158 * Enter the queue of threads waiting to acquire the mutex.
1159 */
1160 list_insert_tail(&rw->waiters, &wt);
1161
1162 if (write) {
1163 /*
1164 * Wait until everybody else is out of the mutex
1165 * and we're next to enter.
1166 */
1167 while (rw->refcount != 0 ||
1168 list_head(&rw->waiters) != (void *)&wt) {
1169 cv_wait(&rw->cv, &rw->lock);
1170 }
1171 /*
1172 * We're clear to proceed, mark the mutex as
1173 * write-locked by us.
1174 */
1175 rw->writer = curthread_id;
1176 rw->write_locked = B_TRUE;
1177 } else {
1178 /*
1179 * If the mutex is currently held by a writer, or
1180 * there's another writer ahead of us, wait.
1181 */
1182 while (rw->write_locked ||
1183 !rwmutex_can_enter_impl(rw, &wt)) {
1184 cv_wait(&rw->cv, &rw->lock);
1185 }
1186 }
1187 /*
1188 * Exit the wait queue. We've now acquired the mutex.
1189 */
1190 list_remove(&rw->waiters, &wt);
1191 rw->refcount++;
1192
1193 mutex_exit(&rw->lock);
1194}
1195
1199UNUSED_ATTR static void
1201{
1202 mutex_enter(&rw->lock);
1203 ASSERT(rw->refcount != 0);
1204 rw->refcount--;
1205 if (rw->refcount == 0 && rw->write_locked) {
1206 ASSERT3U(rw->writer, ==, curthread_id);
1207 rw->write_locked = B_FALSE;
1208 }
1209 if (list_head(&rw->waiters) != NULL)
1210 cv_broadcast(&rw->cv);
1211 mutex_exit(&rw->lock);
1212}
1213
1217static inline void
1219{
1220 rwmutex_exit(rw);
1221 rwmutex_enter(rw, B_TRUE);
1222}
1223
1224#ifdef DEBUG
1225#define ASSERT_MUTEX_HELD(mtx) VERIFY_MUTEX_HELD(mtx)
1226#define ASSERT_MUTEX_NOT_HELD(mtx) VERIFY_MUTEX_NOT_HELD(mtx)
1227#else /* !DEBUG */
1228#define ASSERT_MUTEX_HELD(mtx)
1229#define ASSERT_MUTEX_NOT_HELD(mtx)
1230#endif /* !DEBUG */
1231
1232#ifdef __cplusplus
1233}
1234#endif
1235
1236#endif /* _ACF_UTILS_THREAD_H_ */
#define ASSERT_MSG(x, fmt,...)
Definition assert.h:214
#define VERIFY(x)
Definition assert.h:78
#define VERIFY0(x)
Definition assert.h:154
#define ASSERT3U(x, op, y)
Definition assert.h:210
#define ASSERT(x)
Definition assert.h:208
#define VERIFY3S(x, op, y)
Definition assert.h:125
#define UNUSED_ATTR
Definition core.h:95
#define WARN_UNUSED_RES_ATTR
Definition core.h:96
void list_destroy(list_t *)
Definition list.c:136
void * list_head(const list_t *)
Definition list.c:292
void list_create(list_t *, size_t, size_t)
Definition list.c:113
void * list_next(const list_t *, const void *)
Definition list.c:344
size_t list_count(const list_t *)
Definition list.c:543
void list_remove(list_t *, void *)
Definition list.c:226
void list_insert_tail(list_t *, void *)
Definition list.c:213
#define logMsg(...)
Definition log.h:112
static void * safe_calloc(size_t nmemb, size_t size)
Definition safe_alloc.h:71
CONDITION_VARIABLE condvar_t
Definition thread.h:465
static void rwmutex_enter(rwmutex_t *rw, bool_t write)
Definition thread.h:1143
static void rwmutex_init(rwmutex_t *rw)
Definition thread.h:1061
static bool_t lacf_thread_create(thread_t *thrp, void(*proc)(void *), void *arg, const char *filename, int linenum)
Definition thread.h:803
static void _lacf_thread_list_fini(void)
Definition thread.h:592
DWORD lacf_thread_start_routine(void *arg)
Definition thread.c:40
static void rwmutex_destroy(rwmutex_t *rw)
Definition thread.h:1075
static void cv_destroy(condvar_t *cv)
Definition thread.h:936
void lacf_mask_sigpipe(void)
Definition worker.c:35
static void thread_set_name(const char *name)
Definition thread.h:852
static void thread_set_prio(thread_t thr, int prio)
Definition thread.h:1017
static void mutex_destroy(mutex_t *mtx)
Definition thread.h:499
static void thread_join(thread_t *thrp)
Definition thread.h:836
static void mutex_enter(mutex_t *mtx)
Definition thread.h:530
static void cv_init(condvar_t *cv)
Definition thread.h:926
#define curthread_id
Definition thread.h:467
static void mutex_exit(mutex_t *mtx)
Definition thread.h:556
static bool_t rwmutex_can_enter_impl(const rwmutex_t *rw, const rwlock_waiter_t *wt_self)
Definition thread.h:1100
DWORD thread_id_t
Definition thread.h:401
HANDLE thread_t
Definition thread.h:393
static bool_t rwmutex_held_write(rwmutex_t *rw)
Definition thread.h:1091
static void cv_wait(condvar_t *cv, mutex_t *mtx)
Definition thread.h:868
static void _lacf_thread_list_init(void)
Definition thread.h:578
static void cv_signal(condvar_t *cv)
Definition thread.h:948
static void _lacf_thread_list_add(lacf_thread_info_t *ti)
Definition thread.h:611
static void rwmutex_exit(rwmutex_t *rw)
Definition thread.h:1200
static int cv_timedwait(condvar_t *cv, mutex_t *mtx, uint64_t limit)
Definition thread.h:898
static void mutex_init(mutex_t *mtx)
Definition thread.h:488
static void cv_broadcast(condvar_t *cv)
Definition thread.h:960
static void lacf_threads_fini(void)
Definition thread.h:642
static void _lacf_thread_list_remove(lacf_thread_info_t *ti)
Definition thread.h:624
static void rwmutex_upgrade(rwmutex_t *rw)
Definition thread.h:1218