It seems that the ctx->threadLimit variable is updated in POOL_create_advanced() without being protected by the ctx->queueMutex. This can lead to a data race. Will this be a problem?
|
static void* POOL_thread(void* opaque) { |
|
POOL_ctx* const ctx = (POOL_ctx*)opaque; |
|
if (!ctx) { return NULL; } |
|
for (;;) { |
|
/* Lock the mutex and wait for a non-empty queue or until shutdown */ |
|
ZSTD_pthread_mutex_lock(&ctx->queueMutex); |
|
|
|
while ( ctx->queueEmpty |
|
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) { |
|
if (ctx->shutdown) { |
|
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), |
|
* a few threads will be shutdown while !queueEmpty, |
|
* but enough threads will remain active to finish the queue */ |
|
ZSTD_pthread_mutex_unlock(&ctx->queueMutex); |
|
return opaque; |
|
} |
|
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); |
|
} |
|
/* Pop a job off the queue */ |
|
{ POOL_job const job = ctx->queue[ctx->queueHead]; |
|
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; |
|
ctx->numThreadsBusy++; |
|
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail); |
|
/* Unlock the mutex, signal a pusher, and run the job */ |
|
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); |
|
ctx->threadCapacity = 0; |
|
ctx->customMem = customMem; |
|
/* Check for errors */ |
|
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } |
|
/* Initialize the threads */ |
|
{ size_t i; |
|
for (i = 0; i < numThreads; ++i) { |
|
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { |
|
ctx->threadCapacity = i; |
|
POOL_free(ctx); |
|
return NULL; |
|
} } |
|
ctx->threadCapacity = numThreads; |
|
ctx->threadLimit = numThreads; |
|
} |
|
return ctx; |
It seems that the
ctx->threadLimitvariable is updated inPOOL_create_advanced()without being protected by thectx->queueMutex. This can lead to a data race. Will this be a problem?zstd/lib/common/pool.c
Lines 67 to 90 in bfd8ad8
zstd/lib/common/pool.c
Lines 143 to 159 in bfd8ad8