X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcache.c;h=58b7cd6f1ac79c6b67af07fc4ece5979f872312d;hb=e49fed25d000a51b316b480c1bfb1b00c1f5d345;hp=5f16ab0afc710afa059d2ad2d9ea77e56920ce0f;hpb=641411b0a7ce9afa309bea28c01320eda913b2d2;p=bluesky.git diff --git a/bluesky/cache.c b/bluesky/cache.c index 5f16ab0..58b7cd6 100644 --- a/bluesky/cache.c +++ b/bluesky/cache.c @@ -15,14 +15,22 @@ #define WRITEBACK_DELAY (20 * 1000000) -/* Filesystem caching and cache coherency. */ +/* Filesystem caching and cache coherency. There are actually a couple of + * different tasks that are performed here: + * - Forcing data to the log if needed to reclaim memory or simply if the + * data has been dirty in memory long enough. + * - Writing batches of data to the cloud. + */ +#if 0 static void writeback_complete(gpointer a, gpointer i) { BlueSkyInode *inode = (BlueSkyInode *)i; - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Writeback for inode %"PRIu64" complete", inode->inum); + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Writeback for inode %"PRIu64" complete", inode->inum); + } g_mutex_lock(inode->lock); @@ -39,122 +47,27 @@ static void writeback_complete(gpointer a, gpointer i) g_mutex_unlock(inode->lock); } - -#if 0 -static void flushd_inode(gpointer value, gpointer user_data) -{ - BlueSkyFS *fs = (BlueSkyFS *)user_data; - - BlueSkyInode *inode = (BlueSkyInode *)value; - - g_mutex_lock(inode->lock); - - if (inode->change_count == inode->change_commit) { - uint64_t delay = bluesky_get_current_time() - inode->access_time; - if (delay >= CACHE_CLEAN_DELAY) { - drop_caches(inode); - - /* If the only references are the one we hold and the one in the - * filesystem inum->inode hash table... First check the refcount - * without the lock for speed, but if the check looks good verify - * it after taking the filesystem lock. */ - if (inode->refcount == 2) { - g_mutex_lock(fs->lock); - if (inode->refcount == 2) { - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Trying to drop inode %"PRIu64" from cache", - inode->inum); - if (g_hash_table_remove(fs->inodes, &inode->inum)) - bluesky_inode_unref(inode); - } - bluesky_list_unlink(&inode->fs->accessed_list, - inode->accessed_list); - inode->accessed_list = NULL; - bluesky_list_unlink(&inode->fs->dirty_list, - inode->dirty_list); - inode->dirty_list = NULL; - g_mutex_unlock(fs->lock); - } - } - - g_mutex_unlock(inode->lock); - bluesky_inode_unref(inode); - return; - } - - if (inode->change_pending) { - /* Waiting for an earlier writeback to finish, so don't start a new - * writeback yet. */ - g_mutex_unlock(inode->lock); - bluesky_inode_unref(inode); - return; - } - - uint64_t elapsed = bluesky_get_current_time() - inode->change_time; - if (elapsed < WRITEBACK_DELAY) { - /* Give a bit more time before starting writeback. */ - g_mutex_unlock(inode->lock); - bluesky_inode_unref(inode); - return; - } - - inode->change_pending = inode->change_count; - - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Starting flush of inode %"PRIu64, inode->inum); - - /* Create a store barrier. All operations part of the writeback will be - * added to this barrier, so when the barrier completes we know that the - * writeback is finished. */ - BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store); - barrier->op = STORE_OP_BARRIER; - - bluesky_inode_start_sync(inode, barrier); - - bluesky_store_async_add_notifier(barrier, writeback_complete, inode); - bluesky_store_async_submit(barrier); - bluesky_store_async_unref(barrier); - - g_mutex_unlock(inode->lock); - bluesky_inode_unref(inode); -} #endif static void flushd_dirty_inode(BlueSkyInode *inode) { BlueSkyFS *fs = inode->fs; + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); + inode->dirty_list = NULL; + g_mutex_unlock(fs->lock); + /* Inode is clean; nothing to do. */ if (inode->change_count == inode->change_commit) return; - /* Inode writeback is in progress; put back on the dirty list. */ - if (inode->change_pending) { - /* Waiting for an earlier writeback to finish, so don't start a new - * writeback yet. */ - g_mutex_lock(fs->lock); - inode->change_time = bluesky_get_current_time(); - bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); - inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode); - g_mutex_unlock(fs->lock); - return; + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Starting flush of inode %"PRIu64, inode->inum); } - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Starting flush of inode %"PRIu64, inode->inum); - inode->change_pending = inode->change_count; - - /* Create a store barrier. All operations part of the writeback will be - * added to this barrier, so when the barrier completes we know that the - * writeback is finished. */ - BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store); - barrier->op = STORE_OP_BARRIER; - - bluesky_inode_start_sync(inode, barrier); - - bluesky_store_async_add_notifier(barrier, writeback_complete, inode); - bluesky_store_async_submit(barrier); - bluesky_store_async_unref(barrier); + bluesky_inode_start_sync(inode); } /* Try to flush dirty data to disk, either due to memory pressure or due to @@ -170,7 +83,10 @@ static void flushd_dirty(BlueSkyFS *fs) break; inode = fs->dirty_list.prev->data; - g_print("Considering flushing inode %"PRIu64"\n", inode->inum); + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering flushing inode %"PRIu64, inode->inum); + } /* Stop processing dirty inodes if we both have enough memory available * and the oldest inode is sufficiently new that it need not be flushed @@ -184,9 +100,6 @@ static void flushd_dirty(BlueSkyFS *fs) bluesky_inode_ref(inode); - bluesky_list_unlink(&fs->dirty_list, fs->dirty_list.prev); - inode->dirty_list = NULL; - g_mutex_unlock(fs->lock); g_mutex_lock(inode->lock); @@ -207,7 +120,7 @@ static void drop_caches(BlueSkyInode *inode) bluesky_file_drop_cached(inode); } -/* Drop clean data fromt the cache if needed due to memory pressure. */ +/* Drop clean data from the cache if needed due to memory pressure. */ static void flushd_clean(BlueSkyFS *fs) { g_mutex_lock(fs->lock); @@ -217,26 +130,35 @@ static void flushd_clean(BlueSkyFS *fs) inode_count = 1; while (inode_count-- > 0) { +#if 0 if (g_atomic_int_get(&fs->cache_total) < bluesky_watermark_medium_total) break; +#endif BlueSkyInode *inode; if (fs->accessed_list.prev == NULL) break; inode = fs->accessed_list.prev->data; - g_print("Considering dropping cached data for inode %"PRIu64"\n", - inode->inum); + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering dropping cached data for inode %"PRIu64, + inode->inum); + } bluesky_inode_ref(inode); - bluesky_list_unlink(&fs->accessed_list, fs->accessed_list.prev); - inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode); - g_mutex_unlock(fs->lock); g_mutex_lock(inode->lock); + + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->accessed_list, inode->accessed_list); + inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode); + g_mutex_unlock(fs->lock); + drop_caches(inode); + g_mutex_unlock(inode->lock); bluesky_inode_unref(inode); @@ -253,6 +175,7 @@ static gpointer flushd_task(BlueSkyFS *fs) if (!g_mutex_trylock(fs->flushd_lock)) return NULL; flushd_dirty(fs); + bluesky_cloudlog_write_log(fs); flushd_clean(fs); g_mutex_unlock(fs->flushd_lock); @@ -270,10 +193,31 @@ void bluesky_flushd_invoke_conditional(BlueSkyFS *fs) && g_atomic_int_get(&fs->cache_total) < bluesky_watermark_high_total) return; - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Too much data; invoking flushd: dirty=%d total=%d", - g_atomic_int_get(&fs->cache_dirty), - g_atomic_int_get(&fs->cache_total)); + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Too much data; invoking flushd: dirty=%d total=%d", + g_atomic_int_get(&fs->cache_dirty), + g_atomic_int_get(&fs->cache_total)); + } bluesky_flushd_invoke(fs); } + +/* Start a perpetually-running thread that flushes the cache occasionally. */ +static gpointer flushd_thread(BlueSkyFS *fs) +{ + while (TRUE) { + bluesky_flushd_invoke(fs); + struct timespec delay; + delay.tv_sec = 2; + delay.tv_nsec = 0; + nanosleep(&delay, NULL); + } + + return NULL; +} + +void bluesky_flushd_thread_launch(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL); +}