From e9c534aee18fa86a61decb9f7c17b3d28ead94dc Mon Sep 17 00:00:00 2001 From: Robin Watts Date: Mon, 13 Feb 2012 19:16:54 +0000 Subject: Remove STORE lock in favour of smarter use of ALLOC lock. This simplifies other locking issues (notably freetype). --- fitz/res_store.c | 152 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 99 insertions(+), 53 deletions(-) (limited to 'fitz/res_store.c') diff --git a/fitz/res_store.c b/fitz/res_store.c index c29a74bc..379c1dc5 100644 --- a/fitz/res_store.c +++ b/fitz/res_store.c @@ -112,6 +112,9 @@ evict(fz_context *ctx, fz_item *item) item->prev->next = item->next; else store->head = item->next; + /* Drop a reference to the value (freeing if required) */ + drop = (item->val->refs > 0 && --item->val->refs == 0); + fz_unlock(ctx, FZ_LOCK_ALLOC); /* Remove from the hash table */ if (fz_is_indirect(item->key)) { @@ -121,15 +124,12 @@ evict(fz_context *ctx, fz_item *item) refkey.gen = fz_to_gen(item->key); fz_hash_remove(ctx, store->hash, &refkey); } - /* Drop a reference to the value (freeing if required) */ - fz_lock(ctx, FZ_LOCK_ALLOC); - drop = (item->val->refs > 0 && --item->val->refs == 0); - fz_unlock(ctx, FZ_LOCK_ALLOC); if (drop) item->val->free(ctx, item->val); /* Always drops the key and free the item */ fz_drop_obj(item->key); fz_free(ctx, item); + fz_lock(ctx, FZ_LOCK_ALLOC); } static int @@ -139,7 +139,8 @@ ensure_space(fz_context *ctx, unsigned int tofree) unsigned int count; fz_store *store = ctx->store; - fz_lock(ctx, FZ_LOCK_ALLOC); + fz_assert_lock_held(ctx, FZ_LOCK_ALLOC); + /* First check that we *can* free tofree; if not, we'd rather not * cache this. */ count = 0; @@ -156,8 +157,7 @@ ensure_space(fz_context *ctx, unsigned int tofree) /* If we ran out of items to search, then we can never free enough */ if (item == NULL) { - fz_unlock(ctx, FZ_LOCK_ALLOC); - return 1; + return 0; } /* Actually free the items */ @@ -167,19 +167,30 @@ ensure_space(fz_context *ctx, unsigned int tofree) prev = item->prev; if (item->val->refs == 1) { - /* Free this item */ + /* Free this item. Evict has to drop the lock to + * manage that, which could cause prev to be removed + * in the meantime. To avoid that we bump its reference + * count here. This may cause another simultaneous + * evict process to fail to make enough space as prev is + * pinned - but that will only happen if we're near to + * the limit anyway, and it will only cause something to + * not be cached. */ count += item->size; - fz_unlock(ctx, FZ_LOCK_ALLOC); - evict(ctx, item); + if (prev) + prev->val->refs++; + evict(ctx, item); /* Drops then retakes lock */ + /* So the store has 1 reference to prev, as do we, so + * no other evict process can have thrown prev away in + * the meantime. So we are safe to just decrement its + * reference count here. */ + --prev->val->refs; if (count >= tofree) - return 0; - fz_lock(ctx, FZ_LOCK_ALLOC); + return count; } } - fz_unlock(ctx, FZ_LOCK_ALLOC); - return 0; + return count; } void @@ -189,12 +200,23 @@ fz_store_item(fz_context *ctx, fz_obj *key, void *val_, unsigned int itemsize) unsigned int size; fz_storable *val = (fz_storable *)val_; fz_store *store = ctx->store; + int indirect; + struct refkey refkey; if (!store) return; fz_var(item); + /* Form the key before we take the lock */ + indirect = fz_is_indirect(key); + if (indirect) + { + refkey.free = val->free; + refkey.num = fz_to_num(key); + refkey.gen = fz_to_gen(key); + } + /* If we fail for any reason, we swallow the exception and continue. * All that the above program will see is that we failed to store * the item. */ @@ -207,13 +229,21 @@ fz_store_item(fz_context *ctx, fz_obj *key, void *val_, unsigned int itemsize) return; } - fz_lock(ctx, FZ_LOCK_STORE); - size = store->size + itemsize; - if (store->max != FZ_STORE_UNLIMITED && size > store->max && ensure_space(ctx, size - store->max)) + fz_lock(ctx, FZ_LOCK_ALLOC); + if (store->max != FZ_STORE_UNLIMITED) { - fz_unlock(ctx, FZ_LOCK_STORE); - fz_free(ctx, item); - return; + size = store->size + itemsize; + while (size > store->max) + { + /* ensure_space may drop, then retake the lock */ + if (ensure_space(ctx, size - store->max) == 0) + { + /* Failed to free any space */ + fz_unlock(ctx, FZ_LOCK_ALLOC); + fz_free(ctx, item); + return; + } + } } store->size += itemsize; @@ -223,28 +253,26 @@ fz_store_item(fz_context *ctx, fz_obj *key, void *val_, unsigned int itemsize) item->next = NULL; /* If we can index it fast, put it into the hash table */ - if (fz_is_indirect(key)) + if (indirect) { - struct refkey refkey; - refkey.free = val->free; - refkey.num = fz_to_num(key); - refkey.gen = fz_to_gen(key); + fz_unlock(ctx, FZ_LOCK_ALLOC); fz_try(ctx) { fz_hash_insert(ctx, store->hash, &refkey, item); } fz_catch(ctx) { - fz_unlock(ctx, FZ_LOCK_STORE); fz_free(ctx, item); + fz_lock(ctx, FZ_LOCK_ALLOC); + store->size -= itemsize; + fz_unlock(ctx, FZ_LOCK_ALLOC); return; } + fz_lock(ctx, FZ_LOCK_ALLOC); } /* Now we can never fail, bump the ref */ - fz_lock(ctx, FZ_LOCK_ALLOC); if (val->refs > 0) val->refs++; - fz_unlock(ctx, FZ_LOCK_ALLOC); /* Regardless of whether it's indexed, it goes into the linked list */ item->next = store->head; if (item->next) @@ -253,7 +281,7 @@ fz_store_item(fz_context *ctx, fz_obj *key, void *val_, unsigned int itemsize) store->tail = item; store->head = item; item->prev = NULL; - fz_unlock(ctx, FZ_LOCK_STORE); + fz_unlock(ctx, FZ_LOCK_ALLOC); } void * @@ -262,6 +290,7 @@ fz_find_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) struct refkey refkey; fz_item *item; fz_store *store = ctx->store; + int indirect; if (!store) return NULL; @@ -269,13 +298,19 @@ fz_find_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) if (!key) return NULL; - fz_lock(ctx, FZ_LOCK_STORE); - if (fz_is_indirect(key)) + /* Form the key before we take the lock */ + indirect = fz_is_indirect(key); + if (indirect) { - /* We can find objects keyed on indirected objects quickly */ refkey.free = free; refkey.num = fz_to_num(key); refkey.gen = fz_to_gen(key); + } + + fz_lock(ctx, FZ_LOCK_ALLOC); + if (indirect) + { + /* We can find objects keyed on indirected objects quickly */ item = fz_hash_find(ctx, store->hash, &refkey); } else @@ -308,14 +343,12 @@ fz_find_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) item->prev = NULL; store->head = item; /* And bump the refcount before returning */ - fz_lock(ctx, FZ_LOCK_ALLOC); if (item->val->refs > 0) item->val->refs++; fz_unlock(ctx, FZ_LOCK_ALLOC); - fz_unlock(ctx, FZ_LOCK_STORE); return (void *)item->val; } - fz_unlock(ctx, FZ_LOCK_STORE); + fz_unlock(ctx, FZ_LOCK_ALLOC); return NULL; } @@ -326,15 +359,21 @@ fz_remove_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) struct refkey refkey; fz_item *item; fz_store *store = ctx->store; - int drop; + int drop, indirect; - fz_lock(ctx, FZ_LOCK_STORE); - if (fz_is_indirect(key)) + /* Form the key before we take the lock */ + indirect = fz_is_indirect(key); + if (indirect) { - /* We can find objects keyed on indirect objects quickly */ refkey.free = free; refkey.num = fz_to_num(key); refkey.gen = fz_to_gen(key); + } + + fz_lock(ctx, FZ_LOCK_ALLOC); + if (indirect) + { + /* We can find objects keyed on indirect objects quickly */ item = fz_hash_find(ctx, store->hash, &refkey); if (item) fz_hash_remove(ctx, store->hash, &refkey); @@ -356,7 +395,6 @@ fz_remove_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) item->prev->next = item->next; else store->head = item->next; - fz_lock(ctx, FZ_LOCK_ALLOC); drop = (item->val->refs > 0 && --item->val->refs == 0); fz_unlock(ctx, FZ_LOCK_ALLOC); if (drop) @@ -364,26 +402,25 @@ fz_remove_item(fz_context *ctx, fz_store_free_fn *free, fz_obj *key) fz_drop_obj(item->key); fz_free(ctx, item); } - fz_unlock(ctx, FZ_LOCK_STORE); + else + fz_unlock(ctx, FZ_LOCK_ALLOC); } void fz_empty_store(fz_context *ctx) { - fz_item *item, *next; fz_store *store = ctx->store; if (store == NULL) return; - fz_lock(ctx, FZ_LOCK_STORE); + fz_lock(ctx, FZ_LOCK_ALLOC); /* Run through all the items in the store */ - for (item = store->head; item; item = next) + while (store->head) { - next = item->next; - evict(ctx, item); + evict(ctx, store->head); /* Drops then retakes lock */ } - fz_unlock(ctx, FZ_LOCK_STORE); + fz_unlock(ctx, FZ_LOCK_ALLOC); } fz_store * @@ -418,15 +455,16 @@ fz_free_store_context(fz_context *ctx) void fz_debug_store(fz_context *ctx) { - fz_item *item; + fz_item *item, *next; fz_store *store = ctx->store; printf("-- resource store contents --\n"); - fz_lock(ctx, FZ_LOCK_STORE); - for (item = store->head; item; item = item->next) + fz_lock(ctx, FZ_LOCK_ALLOC); + for (item = store->head; item; item = next) { - fz_lock(ctx, FZ_LOCK_ALLOC); + next = item->next; + next->val->refs++; printf("store[*][refs=%d][size=%d] ", item->val->refs, item->size); fz_unlock(ctx, FZ_LOCK_ALLOC); if (fz_is_indirect(item->key)) @@ -435,10 +473,14 @@ fz_debug_store(fz_context *ctx) } else fz_debug_obj(item->key); printf(" = %p\n", item->val); + fz_lock(ctx, FZ_LOCK_ALLOC); + next->val->refs--; } - fz_unlock(ctx, FZ_LOCK_STORE); + fz_unlock(ctx, FZ_LOCK_ALLOC); } +/* This is now an n^2 algorithm - not ideal, but it'll only be bad if we are + * actually managing to scavenge lots of blocks back. */ static int scavenge(fz_context *ctx, unsigned int tofree) { @@ -454,10 +496,14 @@ scavenge(fz_context *ctx, unsigned int tofree) { /* Free this item */ count += item->size; - evict(ctx, item); + evict(ctx, item); /* Drops then retakes lock */ if (count >= tofree) break; + + /* Have to restart search again, as prev may no longer + * be valid due to release of lock in evict. */ + prev = store->tail; } } /* Success is managing to evict any blocks */ -- cgit v1.2.3