X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcache.c;h=8d2cda1a59e226ef7939e84ff44ef1aa6f05082a;hb=d7d143ef0ccf0f49a8fd23a3c76bc68c0b04e07e;hp=41348e22c0b369981899b1dd96ca1bdfee736c16;hpb=3175b881ccd14a39c4a206147cc99ecc80de3412;p=bluesky.git diff --git a/bluesky/cache.c b/bluesky/cache.c index 41348e2..8d2cda1 100644 --- a/bluesky/cache.c +++ b/bluesky/cache.c @@ -14,6 +14,7 @@ #include "bluesky-private.h" #define WRITEBACK_DELAY (5 * 1000000) +#define CACHE_CLEAN_DELAY (30 * 1000000) /* Filesystem caching and cache coherency. */ @@ -36,7 +37,14 @@ static void writeback_complete(gpointer a, gpointer i) g_mutex_unlock(inode->lock); } -static void flushd_inode(gpointer key, gpointer value, gpointer user_data) +/* Drop cached data for a given inode, if it is clean. inode must be locked. */ +static void drop_caches(BlueSkyInode *inode) +{ + if (inode->type == BLUESKY_REGULAR) + bluesky_file_drop_cached(inode); +} + +static void flushd_inode(gpointer value, gpointer user_data) { BlueSkyFS *fs = (BlueSkyFS *)user_data; @@ -45,7 +53,29 @@ static void flushd_inode(gpointer key, gpointer value, gpointer user_data) g_mutex_lock(inode->lock); if (inode->change_count == inode->change_commit) { + uint64_t delay = bluesky_get_current_time() - inode->access_time; + if (delay >= CACHE_CLEAN_DELAY) { + drop_caches(inode); + + /* If the only references are the one we hold and the one in the + * filesystem inum->inode hash table... First check the refcount + * without the lock for speed, but if the check looks good verify + * it after taking the filesystem lock. */ + if (inode->refcount == 2) { + g_mutex_lock(fs->lock); + if (inode->refcount == 2) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Trying to drop inode %"PRIu64" from cache", + inode->inum); + if (g_hash_table_remove(fs->inodes, &inode->inum)) + bluesky_inode_unref(inode); + } + g_mutex_unlock(fs->lock); + } + } + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); return; } @@ -53,6 +83,7 @@ static void flushd_inode(gpointer key, gpointer value, gpointer user_data) /* Waiting for an earlier writeback to finish, so don't start a new * writeback yet. */ g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); return; } @@ -60,6 +91,7 @@ static void flushd_inode(gpointer key, gpointer value, gpointer user_data) if (elapsed < WRITEBACK_DELAY) { /* Give a bit more time before starting writeback. */ g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); return; } @@ -81,15 +113,35 @@ static void flushd_inode(gpointer key, gpointer value, gpointer user_data) bluesky_store_async_unref(barrier); g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); } /* Scan through the cache for dirty data and start flushing it to stable * storage. This does not guarantee that data is committed when it returns. * Instead, this can be called occasionally to ensure that dirty data is - * gradually flushed. */ + * gradually flushed. + * + * We do not want to hold the filesystem lock while flushing individual inodes, + * a that could lead to deadlock. So first scan through the inode table to get + * a reference to all inodes, then process that queue of inodes after dropping + * the filesystem lock. */ +static void gather_inodes(gpointer key, gpointer value, gpointer user_data) +{ + GSList **list = (GSList **)user_data; + *list = g_slist_prepend(*list, value); + bluesky_inode_ref((BlueSkyInode *)value); +} + void bluesky_flushd_invoke(BlueSkyFS *fs) { + GSList *list = NULL; + g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->inodes, flushd_inode, fs); + g_hash_table_foreach(fs->inodes, gather_inodes, &list); g_mutex_unlock(fs->lock); + + list = g_slist_reverse(list); + g_slist_foreach(list, flushd_inode, fs); + + g_slist_free(list); }