X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcache.c;h=00d0fec1d13ebd5a64eab26ca3dfd88cbb6d2341;hb=0e76979181d1e7b7bbaf24e7b196b58cea5d7879;hp=f953a6455a0736ee6cac142038a270404cbb25f7;hpb=0ef7fc934daee6ded318e3d52023521b758b295e;p=bluesky.git diff --git a/bluesky/cache.c b/bluesky/cache.c index f953a64..00d0fec 100644 --- a/bluesky/cache.c +++ b/bluesky/cache.c @@ -13,35 +13,304 @@ #include "bluesky-private.h" +#define WRITEBACK_DELAY (20 * 1000000) + /* Filesystem caching and cache coherency. */ -static void flushd_inode(gpointer key, gpointer value, gpointer user_data) +static void writeback_complete(gpointer a, gpointer i) +{ + BlueSkyInode *inode = (BlueSkyInode *)i; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Writeback for inode %"PRIu64" complete", inode->inum); + } + + g_mutex_lock(inode->lock); + + inode->change_commit = inode->change_pending; + inode->change_pending = 0; + if (inode->change_count == inode->change_commit) { + /* If inode is no longer dirty... */ + inode->change_time = 0; + g_mutex_lock(inode->fs->lock); + bluesky_list_unlink(&inode->fs->dirty_list, inode->dirty_list); + inode->dirty_list = NULL; + g_mutex_unlock(inode->fs->lock); + } + + g_mutex_unlock(inode->lock); +} + +#if 0 +static void flushd_inode(gpointer value, gpointer user_data) { BlueSkyFS *fs = (BlueSkyFS *)user_data; BlueSkyInode *inode = (BlueSkyInode *)value; - if (inode->change_count == inode->change_commit) + g_mutex_lock(inode->lock); + + if (inode->change_count == inode->change_commit) { + uint64_t delay = bluesky_get_current_time() - inode->access_time; + if (delay >= CACHE_CLEAN_DELAY) { + drop_caches(inode); + + /* If the only references are the one we hold and the one in the + * filesystem inum->inode hash table... First check the refcount + * without the lock for speed, but if the check looks good verify + * it after taking the filesystem lock. */ + if (inode->refcount == 2) { + g_mutex_lock(fs->lock); + if (inode->refcount == 2) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Trying to drop inode %"PRIu64" from cache", + inode->inum); + if (g_hash_table_remove(fs->inodes, &inode->inum)) + bluesky_inode_unref(inode); + } + bluesky_list_unlink(&inode->fs->accessed_list, + inode->accessed_list); + inode->accessed_list = NULL; + bluesky_list_unlink(&inode->fs->dirty_list, + inode->dirty_list); + inode->dirty_list = NULL; + g_mutex_unlock(fs->lock); + } + } + + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + return; + } + + if (inode->change_pending) { + /* Waiting for an earlier writeback to finish, so don't start a new + * writeback yet. */ + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); return; + } + + uint64_t elapsed = bluesky_get_current_time() - inode->change_time; + if (elapsed < WRITEBACK_DELAY) { + /* Give a bit more time before starting writeback. */ + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + return; + } + + inode->change_pending = inode->change_count; g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, "Starting flush of inode %"PRIu64, inode->inum); - if (inode->type == BLUESKY_REGULAR) - bluesky_file_flush(inode); - bluesky_inode_flush(fs, inode); + /* Create a store barrier. All operations part of the writeback will be + * added to this barrier, so when the barrier completes we know that the + * writeback is finished. */ + BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store); + barrier->op = STORE_OP_BARRIER; + + bluesky_inode_start_sync(inode, barrier); + + bluesky_store_async_add_notifier(barrier, writeback_complete, inode); + bluesky_store_async_submit(barrier); + bluesky_store_async_unref(barrier); + + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); } +#endif -/* Scan through the cache for dirty data and start flushing it to stable - * storage. This does not guarantee that data is committed when it returns. - * Instead, this can be called occasionally to ensure that dirty data is - * gradually flushed. */ -void bluesky_flushd_invoke(BlueSkyFS *fs) +static void flushd_dirty_inode(BlueSkyInode *inode) { - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Writeout process invoked"); + BlueSkyFS *fs = inode->fs; g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->inodes, flushd_inode, fs); + bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); + inode->dirty_list = NULL; g_mutex_unlock(fs->lock); + + /* Inode is clean; nothing to do. */ + if (inode->change_count == inode->change_commit) + return; + + /* Inode writeback is in progress; put back on the dirty list. */ + if (inode->change_pending) { + /* Waiting for an earlier writeback to finish, so don't start a new + * writeback yet. */ + g_mutex_lock(fs->lock); + inode->change_time = bluesky_get_current_time(); + bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); + inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode); + g_mutex_unlock(fs->lock); + return; + } + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Starting flush of inode %"PRIu64, inode->inum); + } + inode->change_pending = inode->change_count; + + /* Create a store barrier. All operations part of the writeback will be + * added to this barrier, so when the barrier completes we know that the + * writeback is finished. */ + BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store); + barrier->op = STORE_OP_BARRIER; + + bluesky_inode_start_sync(inode, barrier); + + bluesky_store_async_add_notifier(barrier, writeback_complete, inode); + bluesky_store_async_submit(barrier); + bluesky_store_async_unref(barrier); +} + +/* Try to flush dirty data to disk, either due to memory pressure or due to + * timeouts. */ +static void flushd_dirty(BlueSkyFS *fs) +{ + int64_t start_time = bluesky_get_current_time(); + g_mutex_lock(fs->lock); + + while (1) { + BlueSkyInode *inode; + if (fs->dirty_list.prev == NULL) + break; + inode = fs->dirty_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering flushing inode %"PRIu64, inode->inum); + } + + /* Stop processing dirty inodes if we both have enough memory available + * and the oldest inode is sufficiently new that it need not be flushed + * out. */ + uint64_t elapsed = bluesky_get_current_time() - inode->change_time; + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty + && elapsed < WRITEBACK_DELAY) + break; + if (inode->change_time > start_time) + break; + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + flushd_dirty_inode(inode); + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + } + + g_mutex_unlock(fs->lock); +} + +/* Drop cached data for a given inode, if it is clean. inode must be locked. */ +static void drop_caches(BlueSkyInode *inode) +{ + if (inode->type == BLUESKY_REGULAR) + bluesky_file_drop_cached(inode); +} + +/* Drop clean data fromt the cache if needed due to memory pressure. */ +static void flushd_clean(BlueSkyFS *fs) +{ + g_mutex_lock(fs->lock); + + size_t inode_count = g_hash_table_size(fs->inodes); + if (!inode_count) + inode_count = 1; + + while (inode_count-- > 0) { + if (g_atomic_int_get(&fs->cache_total) < bluesky_watermark_medium_total) + break; + + BlueSkyInode *inode; + if (fs->accessed_list.prev == NULL) + break; + inode = fs->accessed_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering dropping cached data for inode %"PRIu64, + inode->inum); + } + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->accessed_list, inode->accessed_list); + inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode); + g_mutex_unlock(fs->lock); + + drop_caches(inode); + + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + } + + g_mutex_unlock(fs->lock); +} + +/* Run the flush daemon for a single iteration, though if it is already + * executing returns immediately. */ +static gpointer flushd_task(BlueSkyFS *fs) +{ + if (!g_mutex_trylock(fs->flushd_lock)) + return NULL; + flushd_dirty(fs); + flushd_clean(fs); + g_mutex_unlock(fs->flushd_lock); + + return NULL; +} + +void bluesky_flushd_invoke(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL); +} + +void bluesky_flushd_invoke_conditional(BlueSkyFS *fs) +{ + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_high_dirty + && g_atomic_int_get(&fs->cache_total) < bluesky_watermark_high_total) + return; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Too much data; invoking flushd: dirty=%d total=%d", + g_atomic_int_get(&fs->cache_dirty), + g_atomic_int_get(&fs->cache_total)); + } + + bluesky_flushd_invoke(fs); +} + +/* Start a perpetually-running thread that flushes the cache occasionally. */ +static gpointer flushd_thread(BlueSkyFS *fs) +{ + while (TRUE) { + bluesky_flushd_invoke(fs); + struct timespec delay; + delay.tv_sec = 2; + delay.tv_nsec = 0; + nanosleep(&delay, NULL); + } + + return NULL; +} + +void bluesky_flushd_thread_launch(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL); }