Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
| Comment: | Use C11 atomic functions. (WIP this is still UNIX-only) |
|---|---|
| Timelines: | family | ancestors | descendants | both | 893f8cc5-tip509-nested-mutex-atomic |
| Files: | files | file ages | folders |
| SHA3-256: |
10bcfc0e125e9575e09a06819af2897f |
| User & Date: | jan.nijtmans 2025-09-18 09:15:02.157 |
References
|
2025-09-18
| ||
| 09:18 | • Ticket [893f8cc5db] Nested mutexes following TIP 509 status still Open with 3 other changes artifact: b0a2160db7 user: jan.nijtmans | |
Context
|
2025-09-18
| ||
| 12:18 | Add Christian's spinlock patch. So we can do performance testing to see which is better, or simply k... check-in: 5efb090ce2 user: jan.nijtmans tags: 893f8cc5-tip509-nested-mutex-atomic | |
| 09:15 | Use C11 atomic functions. (WIP this is still UNIX-only) check-in: 10bcfc0e12 user: jan.nijtmans tags: 893f8cc5-tip509-nested-mutex-atomic | |
|
2025-09-17
| ||
| 11:06 | Rebase branch to 9.0 Closed-Leaf check-in: f4daf288c6 user: jan.nijtmans tags: 893f8cc5-tip509-nested-mutex | |
Changes
Changes to unix/tclUnixThrd.c.
| ︙ | ︙ | |||
8 9 10 11 12 13 14 15 16 17 18 19 20 21 | * Copyright © 2008 George Peter Staplin * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. */ #include "tclInt.h" #if TCL_THREADS /* * TIP #509. Ensures that Tcl's mutexes are reentrant. * *---------------------------------------------------------------------- | > | 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 | * Copyright © 2008 George Peter Staplin * * See the file "license.terms" for information on usage and redistribution of * this file, and for a DISCLAIMER OF ALL WARRANTIES. */ #include "tclInt.h" #include <stdatomic.h> #if TCL_THREADS /* * TIP #509. Ensures that Tcl's mutexes are reentrant. * *---------------------------------------------------------------------- |
| ︙ | ︙ | |||
64 65 66 67 68 69 70 |
/*
* No correct native support for reentrant mutexes. Emulate them with regular mutexes
* and threadlocal counters.
*/
typedef struct PMutex {
pthread_mutex_t mutex;
| | | | 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
/*
* No correct native support for reentrant mutexes. Emulate them with regular mutexes
* and threadlocal counters.
*/
typedef struct PMutex {
pthread_mutex_t mutex;
volatile pthread_t thread;
volatile int counter;
} PMutex;
static void
PMutexInit(
PMutex *pmutexPtr)
{
pthread_mutex_init(&pmutexPtr->mutex, NULL);
|
| ︙ | ︙ | |||
88 89 90 91 92 93 94 |
pthread_mutex_destroy(&pmutexPtr->mutex);
}
static void
PMutexLock(
PMutex *pmutexPtr)
{
| | > | > > > | | | < | | < | < | | | < | < | | | 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
pthread_mutex_destroy(&pmutexPtr->mutex);
}
static void
PMutexLock(
PMutex *pmutexPtr)
{
pthread_t mythread = pthread_self();
if (__atomic_load_n(&pmutexPtr->counter, __ATOMIC_RELAXED) == 0) {
pthread_mutex_lock(&pmutexPtr->mutex);
__atomic_store_n(&pmutexPtr->thread, mythread, __ATOMIC_RELAXED);
} else if (__atomic_load_n (&pmutexPtr->thread, __ATOMIC_RELAXED) != mythread) {
pthread_mutex_lock(&pmutexPtr->mutex);
__atomic_store_n(&pmutexPtr->thread, mythread, __ATOMIC_RELAXED);
__atomic_store_n(&pmutexPtr->counter, 0, __ATOMIC_RELAXED);
}
__atomic_fetch_add(&pmutexPtr->counter, 1, __ATOMIC_RELAXED);
}
static void
PMutexUnlock(
PMutex *pmutexPtr)
{
if (__atomic_fetch_add(&pmutexPtr->counter, -1, __ATOMIC_RELAXED) == 1) {
__atomic_store_n(&pmutexPtr->thread, 0, __ATOMIC_RELAXED);
pthread_mutex_unlock(&pmutexPtr->mutex);
}
}
static void
PCondWait(
pthread_cond_t *pcondPtr,
PMutex *pmutexPtr)
{
int counter =__atomic_exchange_n(&pmutexPtr->counter, 0, __ATOMIC_RELAXED);
__atomic_store_n(&pmutexPtr->thread, 0, __ATOMIC_RELAXED);
pthread_cond_wait(pcondPtr, &pmutexPtr->mutex);
__atomic_store_n(&pmutexPtr->thread, pthread_self(), __ATOMIC_RELAXED);
__atomic_store_n(&pmutexPtr->counter, counter, __ATOMIC_RELAXED);
}
static void
PCondTimedWait(
pthread_cond_t *pcondPtr,
PMutex *pmutexPtr,
struct timespec *ptime)
{
int counter =__atomic_exchange_n(&pmutexPtr->counter, 0, __ATOMIC_RELAXED);
__atomic_store_n(&pmutexPtr->thread, 0, __ATOMIC_RELAXED);
pthread_cond_timedwait(pcondPtr, &pmutexPtr->mutex, ptime);
__atomic_store_n(&pmutexPtr->thread, pthread_self(), __ATOMIC_RELAXED);
__atomic_store_n(&pmutexPtr->counter, counter, __ATOMIC_RELAXED);
}
/*
* globalLock is used to serialize creation of mutexes, condition variables,
* and thread local storage. This is the only place that can count on the
* ability to statically initialize the mutex.
*/
|
| ︙ | ︙ |