ref: 135286f4796fe5dc26cf37b73a2c3df4cfa85792
parent: e705519d406941886431300ca432d33980cb554c
author: Henrik Gramner <[email protected]>
date: Tue Dec 8 17:34:43 EST 2020
Fix use of references to buffers after calling dav1d_close() 9057d286 had the side effect of causing references to buffers allocated using memory pools to no longer be valid after closing the decoder. Restore this functionality by making buffer pools reference counted.
--- a/src/cdf.c
+++ b/src/cdf.c
@@ -4096,7 +4096,7 @@
int dav1d_cdf_thread_alloc(Dav1dContext *const c, CdfThreadContext *const cdf,
struct thread_data *const t)
{
- cdf->ref = dav1d_ref_create_using_pool(&c->cdf_pool,
+ cdf->ref = dav1d_ref_create_using_pool(c->cdf_pool,
sizeof(CdfContext) + sizeof(atomic_uint));
if (!cdf->ref) return DAV1D_ERR(ENOMEM);
cdf->data.cdf = cdf->ref->data;
--- a/src/decode.c
+++ b/src/decode.c
@@ -3462,7 +3462,7 @@
// ref_mvs
if ((f->frame_hdr->frame_type & 1) || f->frame_hdr->allow_intrabc) {
- f->mvs_ref = dav1d_ref_create_using_pool(&c->refmvs_pool,
+ f->mvs_ref = dav1d_ref_create_using_pool(c->refmvs_pool,
sizeof(*f->mvs) * f->sb128h * 16 * (f->b4_stride >> 1));
if (!f->mvs_ref) {
res = DAV1D_ERR(ENOMEM);
@@ -3526,7 +3526,7 @@
// We're updating an existing map, but need somewhere to
// put the new values. Allocate them here (the data
// actually gets set elsewhere)
- f->cur_segmap_ref = dav1d_ref_create_using_pool(&c->segmap_pool,
+ f->cur_segmap_ref = dav1d_ref_create_using_pool(c->segmap_pool,
sizeof(*f->cur_segmap) * f->b4_stride * 32 * f->sb128h);
if (!f->cur_segmap_ref) {
dav1d_ref_dec(&f->prev_segmap_ref);
@@ -3543,7 +3543,7 @@
} else {
// We need to make a new map. Allocate one here and zero it out.
const size_t segmap_size = sizeof(*f->cur_segmap) * f->b4_stride * 32 * f->sb128h;
- f->cur_segmap_ref = dav1d_ref_create_using_pool(&c->segmap_pool, segmap_size);
+ f->cur_segmap_ref = dav1d_ref_create_using_pool(c->segmap_pool, segmap_size);
if (!f->cur_segmap_ref) {
res = DAV1D_ERR(ENOMEM);
goto error;
--- a/src/internal.h
+++ b/src/internal.h
@@ -82,10 +82,10 @@
int n_tile_data_alloc;
int n_tile_data;
int n_tiles;
- Dav1dMemPool seq_hdr_pool;
+ Dav1dMemPool *seq_hdr_pool;
Dav1dRef *seq_hdr_ref;
Dav1dSequenceHeader *seq_hdr;
- Dav1dMemPool frame_hdr_pool;
+ Dav1dMemPool *frame_hdr_pool;
Dav1dRef *frame_hdr_ref;
Dav1dFrameHeader *frame_hdr;
@@ -109,8 +109,8 @@
} frame_thread;
// reference/entropy state
- Dav1dMemPool segmap_pool;
- Dav1dMemPool refmvs_pool;
+ Dav1dMemPool *segmap_pool;
+ Dav1dMemPool *refmvs_pool;
struct {
Dav1dThreadPicture p;
Dav1dRef *segmap;
@@ -117,7 +117,7 @@
Dav1dRef *refmvs;
unsigned refpoc[7];
} refs[8];
- Dav1dMemPool cdf_pool;
+ Dav1dMemPool *cdf_pool;
CdfThreadContext cdf[8];
Dav1dDSPContext dsp[3 /* 8, 10, 12 bits/component */];
@@ -141,8 +141,7 @@
Dav1dLogger logger;
- Dav1dMemPool picture_pool;
- int mem_pools_inited;
+ Dav1dMemPool *picture_pool;
};
struct Dav1dFrameContext {
--- a/src/lib.c
+++ b/src/lib.c
@@ -76,35 +76,6 @@
s->frame_size_limit = 0;
}
-static COLD int init_mem_pools(Dav1dContext *const c) {
- if (!pthread_mutex_init(&c->seq_hdr_pool.lock, NULL)) {
- if (!pthread_mutex_init(&c->frame_hdr_pool.lock, NULL)) {
- if (!pthread_mutex_init(&c->segmap_pool.lock, NULL)) {
- if (!pthread_mutex_init(&c->refmvs_pool.lock, NULL)) {
- if (!pthread_mutex_init(&c->cdf_pool.lock, NULL)) {
- if (c->allocator.alloc_picture_callback == dav1d_default_picture_alloc) {
- if (!pthread_mutex_init(&c->picture_pool.lock, NULL)) {
- c->allocator.cookie = &c->picture_pool;
- c->mem_pools_inited = 2;
- return 0;
- }
- } else {
- c->mem_pools_inited = 1;
- return 0;
- }
- pthread_mutex_destroy(&c->cdf_pool.lock);
- }
- pthread_mutex_destroy(&c->refmvs_pool.lock);
- }
- pthread_mutex_destroy(&c->segmap_pool.lock);
- }
- pthread_mutex_destroy(&c->frame_hdr_pool.lock);
- }
- pthread_mutex_destroy(&c->seq_hdr_pool.lock);
- }
- return -1;
-}
-
static void close_internal(Dav1dContext **const c_out, int flush);
NO_SANITIZE("cfi-icall") // CFI is broken with dlsym()
@@ -157,7 +128,18 @@
c->all_layers = s->all_layers;
c->frame_size_limit = s->frame_size_limit;
- if (init_mem_pools(c)) goto error;
+ if (dav1d_mem_pool_init(&c->seq_hdr_pool) ||
+ dav1d_mem_pool_init(&c->frame_hdr_pool) ||
+ dav1d_mem_pool_init(&c->segmap_pool) ||
+ dav1d_mem_pool_init(&c->refmvs_pool) ||
+ dav1d_mem_pool_init(&c->cdf_pool))
+ {
+ goto error;
+ }
+ if (c->allocator.alloc_picture_callback == dav1d_default_picture_alloc) {
+ if (dav1d_mem_pool_init(&c->picture_pool)) goto error;
+ c->allocator.cookie = c->picture_pool;
+ }
/* On 32-bit systems extremely large frame sizes can cause overflows in
* dav1d_decode_frame() malloc size calculations. Prevent that from occuring
@@ -602,15 +584,12 @@
dav1d_ref_dec(&c->content_light_ref);
dav1d_ref_dec(&c->itut_t35_ref);
- if (c->mem_pools_inited) {
- dav1d_mem_pool_destroy(&c->seq_hdr_pool);
- dav1d_mem_pool_destroy(&c->frame_hdr_pool);
- dav1d_mem_pool_destroy(&c->segmap_pool);
- dav1d_mem_pool_destroy(&c->refmvs_pool);
- dav1d_mem_pool_destroy(&c->cdf_pool);
- if (c->mem_pools_inited == 2)
- dav1d_mem_pool_destroy(&c->picture_pool);
- }
+ dav1d_mem_pool_end(c->seq_hdr_pool);
+ dav1d_mem_pool_end(c->frame_hdr_pool);
+ dav1d_mem_pool_end(c->segmap_pool);
+ dav1d_mem_pool_end(c->refmvs_pool);
+ dav1d_mem_pool_end(c->cdf_pool);
+ dav1d_mem_pool_end(c->picture_pool);
dav1d_freep_aligned(c_out);
}
--- a/src/mem.c
+++ b/src/mem.c
@@ -29,19 +29,33 @@
#include <stdint.h>
-#include "src/mem.h"
-#include "src/thread.h"
+#include "src/internal.h"
+static COLD void mem_pool_destroy(Dav1dMemPool *const pool) {
+ pthread_mutex_destroy(&pool->lock);
+ free(pool);
+}
+
void dav1d_mem_pool_push(Dav1dMemPool *const pool, Dav1dMemPoolBuffer *const buf) {
pthread_mutex_lock(&pool->lock);
- buf->next = pool->buf;
- pool->buf = buf;
- pthread_mutex_unlock(&pool->lock);
+ const int ref_cnt = --pool->ref_cnt;
+ if (!pool->end) {
+ buf->next = pool->buf;
+ pool->buf = buf;
+ pthread_mutex_unlock(&pool->lock);
+ assert(ref_cnt > 0);
+ } else {
+ pthread_mutex_unlock(&pool->lock);
+ dav1d_free_aligned(buf->data);
+ if (!ref_cnt) mem_pool_destroy(pool);
+ }
}
Dav1dMemPoolBuffer *dav1d_mem_pool_pop(Dav1dMemPool *const pool, const size_t size) {
+ assert(!(size & (sizeof(void*) - 1)));
pthread_mutex_lock(&pool->lock);
Dav1dMemPoolBuffer *buf = pool->buf;
+ pool->ref_cnt++;
uint8_t *data;
if (buf) {
pool->buf = buf->next;
@@ -48,6 +62,7 @@
pthread_mutex_unlock(&pool->lock);
data = buf->data;
if ((uintptr_t)buf - (uintptr_t)data != size) {
+ /* Reallocate if the size has changed */
dav1d_free_aligned(data);
goto alloc;
}
@@ -55,7 +70,13 @@
pthread_mutex_unlock(&pool->lock);
alloc:
data = dav1d_alloc_aligned(size + sizeof(Dav1dMemPoolBuffer), 64);
- if (!data) return NULL;
+ if (!data) {
+ pthread_mutex_lock(&pool->lock);
+ const int ref_cnt = --pool->ref_cnt;
+ pthread_mutex_unlock(&pool->lock);
+ if (!ref_cnt) mem_pool_destroy(pool);
+ return NULL;
+ }
buf = (Dav1dMemPoolBuffer*)(data + size);
buf->data = data;
}
@@ -63,12 +84,36 @@
return buf;
}
-COLD void dav1d_mem_pool_destroy(Dav1dMemPool *const pool) {
- pthread_mutex_destroy(&pool->lock);
- Dav1dMemPoolBuffer *buf = pool->buf;
- while (buf) {
- void *const data = buf->data;
- buf = buf->next;
- dav1d_free_aligned(data);
+COLD int dav1d_mem_pool_init(Dav1dMemPool **const ppool) {
+ Dav1dMemPool *const pool = malloc(sizeof(Dav1dMemPool));
+ if (pool) {
+ if (!pthread_mutex_init(&pool->lock, NULL)) {
+ pool->buf = NULL;
+ pool->ref_cnt = 1;
+ pool->end = 0;
+ *ppool = pool;
+ return 0;
+ }
+ free(pool);
+ }
+ *ppool = NULL;
+ return DAV1D_ERR(ENOMEM);
+}
+
+COLD void dav1d_mem_pool_end(Dav1dMemPool *const pool) {
+ if (pool) {
+ pthread_mutex_lock(&pool->lock);
+ Dav1dMemPoolBuffer *buf = pool->buf;
+ const int ref_cnt = --pool->ref_cnt;
+ pool->buf = NULL;
+ pool->end = 1;
+ pthread_mutex_unlock(&pool->lock);
+
+ while (buf) {
+ void *const data = buf->data;
+ buf = buf->next;
+ dav1d_free_aligned(data);
+ }
+ if (!ref_cnt) mem_pool_destroy(pool);
}
}
--- a/src/mem.h
+++ b/src/mem.h
@@ -46,11 +46,14 @@
typedef struct Dav1dMemPool {
pthread_mutex_t lock;
Dav1dMemPoolBuffer *buf;
+ int ref_cnt;
+ int end;
} Dav1dMemPool;
void dav1d_mem_pool_push(Dav1dMemPool *pool, Dav1dMemPoolBuffer *buf);
Dav1dMemPoolBuffer *dav1d_mem_pool_pop(Dav1dMemPool *pool, size_t size);
-void dav1d_mem_pool_destroy(Dav1dMemPool *pool);
+int dav1d_mem_pool_init(Dav1dMemPool **pool);
+void dav1d_mem_pool_end(Dav1dMemPool *pool);
/*
* Allocate align-byte aligned memory. The return value can be released
--- a/src/obu.c
+++ b/src/obu.c
@@ -1234,7 +1234,7 @@
switch (type) {
case DAV1D_OBU_SEQ_HDR: {
- Dav1dRef *ref = dav1d_ref_create_using_pool(&c->seq_hdr_pool,
+ Dav1dRef *ref = dav1d_ref_create_using_pool(c->seq_hdr_pool,
sizeof(Dav1dSequenceHeader));
if (!ref) return DAV1D_ERR(ENOMEM);
Dav1dSequenceHeader *seq_hdr = ref->data;
@@ -1281,7 +1281,7 @@
if (global) break;
if (!c->seq_hdr) goto error;
if (!c->frame_hdr_ref) {
- c->frame_hdr_ref = dav1d_ref_create_using_pool(&c->frame_hdr_pool,
+ c->frame_hdr_ref = dav1d_ref_create_using_pool(c->frame_hdr_pool,
sizeof(Dav1dFrameHeader));
if (!c->frame_hdr_ref) return DAV1D_ERR(ENOMEM);
}