summaryrefslogtreecommitdiff
path: root/fitz
diff options
context:
space:
mode:
authorRobin Watts <robin.watts@artifex.com>2013-04-22 15:14:38 +0100
committerRobin Watts <robin.watts@artifex.com>2013-04-22 15:22:29 +0100
commit2ea11eace69fe5a2d88f374a253a3deb05804f2d (patch)
treee6e1cfff1abdf62f3da53aa053897f7a26c535d3 /fitz
parent784fc400b3415f5120e3eef59a43e519c6a06fe0 (diff)
downloadmupdf-2ea11eace69fe5a2d88f374a253a3deb05804f2d.tar.xz
Fix various multi-threading problems with the store.
When resizing the hash table, we have a special case to cope with someone else resizing the table before we get a chance to. In this rare situation we were unlocking (regardless of whether we should have been), and failing to relock. Fixed here. When storing an item, I recently changed the code to put the new item into the hash before ensuring that we had enough space. This change was motivated by us wanting not to evict to make room only to find that we didn't need the room as there was a duplicate entry anyway. In so doing, this opened up a potential race condition where another thread could 'find' the item from the hash before it had been filled out. To solve this, we move the "filling out" of the item entries earlier in the function. Another problem is found due to the same block of code; as soon as a new item is put into the hash, it can be found elsewhere. Any attempt to manipulate it's linked list will fail. We therefore set all new items with their prev/next pointers pointing to themselves, enabling us to spot this special case and avoid corrupting the linked list.
Diffstat (limited to 'fitz')
-rw-r--r--fitz/base_hash.c7
-rw-r--r--fitz/res_store.c83
2 files changed, 57 insertions, 33 deletions
diff --git a/fitz/base_hash.c b/fitz/base_hash.c
index 48bab138..037988cb 100644
--- a/fitz/base_hash.c
+++ b/fitz/base_hash.c
@@ -141,6 +141,8 @@ do_hash_insert(fz_context *ctx, fz_hash_table *table, void *key, void *val, unsi
}
}
+/* Entered with the lock taken, held throughout and at exit, UNLESS the lock
+ * is the alloc lock in which case it may be momentarily dropped. */
static void
fz_resize_hash(fz_context *ctx, fz_hash_table *table, int newsize)
{
@@ -166,8 +168,11 @@ fz_resize_hash(fz_context *ctx, fz_hash_table *table, int newsize)
if (table->size >= newsize)
{
/* Someone else fixed it before we could lock! */
- fz_unlock(ctx, table->lock);
+ if (table->lock == FZ_LOCK_ALLOC)
+ fz_unlock(ctx, table->lock);
fz_free(ctx, newents);
+ if (table->lock == FZ_LOCK_ALLOC)
+ fz_lock(ctx, table->lock);
return;
}
}
diff --git a/fitz/res_store.c b/fitz/res_store.c
index 42d90a0c..93a7c6e3 100644
--- a/fitz/res_store.c
+++ b/fitz/res_store.c
@@ -231,6 +231,18 @@ fz_store_item(fz_context *ctx, void *key, void *val_, unsigned int itemsize, fz_
type->keep_key(ctx, key);
fz_lock(ctx, FZ_LOCK_ALLOC);
+
+ /* Fill out the item. To start with, we always set item->next == item
+ * and item->prev == item. This is so that we can spot items that have
+ * been put into the hash table without having made it into the linked
+ * list yet. */
+ item->key = key;
+ item->val = val;
+ item->size = itemsize;
+ item->next = item;
+ item->prev = item;
+ item->type = type;
+
/* If we can index it fast, put it into the hash table. This serves
* to check whether we have one there already. */
if (use_hash)
@@ -289,12 +301,6 @@ fz_store_item(fz_context *ctx, void *key, void *val_, unsigned int itemsize, fz_
}
store->size += itemsize;
- item->key = key;
- item->val = val;
- item->size = itemsize;
- item->next = NULL;
- item->type = type;
-
/* Regardless of whether it's indexed, it goes into the linked list */
item->next = store->head;
if (item->next)
@@ -345,24 +351,31 @@ fz_find_item(fz_context *ctx, fz_store_free_fn *free, void *key, fz_store_type *
}
if (item)
{
- /* LRU: Move the block to the front */
- /* Unlink from present position */
- if (item->next)
- item->next->prev = item->prev;
- else
- store->tail = item->prev;
- if (item->prev)
- item->prev->next = item->next;
- else
- store->head = item->next;
- /* Insert at head */
- item->next = store->head;
- if (item->next)
- item->next->prev = item;
- else
- store->tail = item;
- item->prev = NULL;
- store->head = item;
+ /* Momentarily things can be in the hash table (and hence can
+ * be found) without being in the list. Don't attempt to LRU
+ * these. We indicate such items by setting
+ * item->next == item. */
+ if (item->next != item)
+ {
+ /* LRU: Move the block to the front */
+ /* Unlink from present position */
+ if (item->next)
+ item->next->prev = item->prev;
+ else
+ store->tail = item->prev;
+ if (item->prev)
+ item->prev->next = item->next;
+ else
+ store->head = item->next;
+ /* Insert at head */
+ item->next = store->head;
+ if (item->next)
+ item->next->prev = item;
+ else
+ store->tail = item;
+ item->prev = NULL;
+ store->head = item;
+ }
/* And bump the refcount before returning */
if (item->val->refs > 0)
item->val->refs++;
@@ -406,14 +419,20 @@ fz_remove_item(fz_context *ctx, fz_store_free_fn *free, void *key, fz_store_type
}
if (item)
{
- if (item->next)
- item->next->prev = item->prev;
- else
- store->tail = item->prev;
- if (item->prev)
- item->prev->next = item->next;
- else
- store->head = item->next;
+ /* Momentarily things can be in the hash table without being
+ * in the list. Don't attempt to unlink these. We indicate
+ * such items by setting item->next == item. */
+ if (item->next != item)
+ {
+ if (item->next)
+ item->next->prev = item->prev;
+ else
+ store->tail = item->prev;
+ if (item->prev)
+ item->prev->next = item->next;
+ else
+ store->head = item->next;
+ }
drop = (item->val->refs > 0 && --item->val->refs == 0);
fz_unlock(ctx, FZ_LOCK_ALLOC);
if (drop)