X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcache.c;h=a4b9a0cd6cca6f49b91218133ad3edb9add5bbfa;hb=4bd76c48487520c2b1c08ff67009f752e0bde941;hp=f953a6455a0736ee6cac142038a270404cbb25f7;hpb=0ef7fc934daee6ded318e3d52023521b758b295e;p=bluesky.git diff --git a/bluesky/cache.c b/bluesky/cache.c index f953a64..a4b9a0c 100644 --- a/bluesky/cache.c +++ b/bluesky/cache.c @@ -13,35 +13,286 @@ #include "bluesky-private.h" -/* Filesystem caching and cache coherency. */ +#define WRITEBACK_DELAY (20 * 1000000) +#define CACHE_DROP_DELAY (20 * 1000000) -static void flushd_inode(gpointer key, gpointer value, gpointer user_data) +/* Filesystem caching and cache coherency. There are actually a couple of + * different tasks that are performed here: + * - Forcing data to the log if needed to reclaim memory or simply if the + * data has been dirty in memory long enough. + * - Writing batches of data to the cloud. + */ + +static void flushd_dirty_inode(BlueSkyInode *inode) { - BlueSkyFS *fs = (BlueSkyFS *)user_data; + BlueSkyFS *fs = inode->fs; - BlueSkyInode *inode = (BlueSkyInode *)value; + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list); + inode->unlogged_list = NULL; + g_mutex_unlock(fs->lock); + /* Inode is clean; nothing to do. */ if (inode->change_count == inode->change_commit) return; - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Starting flush of inode %"PRIu64, inode->inum); + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Starting flush of inode %"PRIu64, inode->inum); + } - if (inode->type == BLUESKY_REGULAR) - bluesky_file_flush(inode); - bluesky_inode_flush(fs, inode); + bluesky_inode_start_sync(inode); } -/* Scan through the cache for dirty data and start flushing it to stable - * storage. This does not guarantee that data is committed when it returns. - * Instead, this can be called occasionally to ensure that dirty data is - * gradually flushed. */ -void bluesky_flushd_invoke(BlueSkyFS *fs) +/* Check whether memory usage may have dropped below critical thresholds for + * waking up waiting threads. */ +void flushd_check_wakeup(BlueSkyFS *fs) { - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Writeout process invoked"); + int dirty = g_atomic_int_get(&fs->cache_dirty); + dirty += g_atomic_int_get(&fs->cache_log_dirty); + + if (dirty <= bluesky_watermark_high_dirty) + g_cond_broadcast(fs->flushd_cond); +} +/* Try to flush dirty data to disk, either due to memory pressure or due to + * timeouts. */ +static void flushd_dirty(BlueSkyFS *fs) +{ + int64_t start_time = bluesky_get_current_time(); g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->inodes, flushd_inode, fs); + + while (1) { + BlueSkyInode *inode; + if (fs->unlogged_list.prev == NULL) + break; + inode = fs->unlogged_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering flushing inode %"PRIu64, inode->inum); + } + + /* Stop processing dirty inodes if we both have enough memory available + * and the oldest inode is sufficiently new that it need not be flushed + * out. */ + uint64_t elapsed = bluesky_get_current_time() - inode->change_time; + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty + && elapsed < WRITEBACK_DELAY) + break; + if (inode->change_time > start_time) + break; + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + flushd_dirty_inode(inode); + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + flushd_check_wakeup(fs); + } + + g_cond_broadcast(fs->flushd_cond); + g_mutex_unlock(fs->lock); } + +/* Try to flush dirty data to the cloud. + * TODO: Rewrite this to work on cloud log items rather than inodes, so we can + * better track which logs are fully synchronized to the cloud and can be + * garbage collected if needed? */ +static void flushd_cloud(BlueSkyFS *fs) +{ + int64_t start_time = bluesky_get_current_time(); + g_mutex_lock(fs->lock); + + while (1) { + BlueSkyInode *inode; + if (fs->dirty_list.prev == NULL) + break; + inode = fs->dirty_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Flushing inode %"PRIu64" to cloud", inode->inum); + } + + /* Stop processing dirty inodes if we both have enough memory available + * and the oldest inode is sufficiently new that it need not be flushed + * out. */ + uint64_t elapsed = bluesky_get_current_time() - inode->change_time; + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty + && elapsed < WRITEBACK_DELAY) + break; + if (inode->change_time > start_time) + break; + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + flushd_dirty_inode(inode); + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); + inode->dirty_list = NULL; + g_mutex_unlock(fs->lock); + + BlueSkyCloudLog *log = inode->committed_item; + bluesky_cloudlog_ref(log); + g_mutex_unlock(inode->lock); + + if (log != NULL) + bluesky_cloudlog_serialize(log, fs); + bluesky_inode_unref(inode); + bluesky_cloudlog_unref(log); + + g_mutex_lock(fs->lock); + } + + g_mutex_unlock(fs->lock); + bluesky_cloudlog_flush(fs); +} + +/* Drop cached data for a given inode, if it is clean. inode must be locked. */ +static void drop_caches(BlueSkyInode *inode) +{ + if (inode->type == BLUESKY_REGULAR) + bluesky_file_drop_cached(inode); + + BlueSkyCloudLog *log = inode->committed_item; + if (log != NULL) { + g_mutex_lock(log->lock); + if (log->data != NULL + && g_atomic_int_get(&log->data_lock_count) == 0 + && (log->location_flags != 0)) + { + bluesky_cloudlog_stats_update(log, -1); + bluesky_string_unref(log->data); + log->data = NULL; + bluesky_cloudlog_stats_update(log, 1); + } + g_mutex_unlock(log->lock); + } +} + +/* Drop clean data from the cache if needed. Clean data should generally be + * memory-mapped from log file or similar, so the kernel can drop this clean + * data from memory for us and hence memory management isn't too important. + * Mainly, we'll want to drop references to data that hasn't been accessed in a + * while so that it is possible to reclaim log segments on disk. */ +static void flushd_clean(BlueSkyFS *fs) +{ + g_mutex_lock(fs->lock); + + size_t inode_count = g_hash_table_size(fs->inodes); + if (!inode_count) + inode_count = 1; + + while (inode_count-- > 0) { + BlueSkyInode *inode; + if (fs->accessed_list.prev == NULL) + break; + inode = fs->accessed_list.prev->data; + + uint64_t elapsed = bluesky_get_current_time() - inode->access_time; + if (elapsed < CACHE_DROP_DELAY) + break; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering dropping cached data for inode %"PRIu64, + inode->inum); + } + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->accessed_list, inode->accessed_list); + inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode); + g_mutex_unlock(fs->lock); + + drop_caches(inode); + + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + } + + g_mutex_unlock(fs->lock); +} + +/* Run the flush daemon for a single iteration, though if it is already + * executing returns immediately. */ +static gpointer flushd_task(BlueSkyFS *fs) +{ + if (!g_mutex_trylock(fs->flushd_lock)) + return NULL; + flushd_dirty(fs); + flushd_cloud(fs); + flushd_clean(fs); + g_mutex_unlock(fs->flushd_lock); + + return NULL; +} + +void bluesky_flushd_invoke(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL); +} + +void bluesky_flushd_invoke_conditional(BlueSkyFS *fs) +{ + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_medium_dirty) + return; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Too much data; invoking flushd: dirty=%d", + g_atomic_int_get(&fs->cache_dirty)); + } + + bluesky_flushd_invoke(fs); + + /* If the system is under heavy memory pressure, actually delay execution + * so the flush daemon can catch up. */ + while (g_atomic_int_get(&fs->cache_dirty) + + g_atomic_int_get(&fs->cache_log_dirty) + > bluesky_watermark_high_dirty) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Waiting due to memory pressure, dirty=%d + %d", + g_atomic_int_get(&fs->cache_dirty), + g_atomic_int_get(&fs->cache_log_dirty)); + g_mutex_lock(fs->lock); + g_cond_wait(fs->flushd_cond, fs->lock); + g_mutex_unlock(fs->lock); + } +} + +/* Start a perpetually-running thread that flushes the cache occasionally. */ +static gpointer flushd_thread(BlueSkyFS *fs) +{ + while (TRUE) { + bluesky_flushd_invoke(fs); + struct timespec delay; + delay.tv_sec = 2; + delay.tv_nsec = 0; + nanosleep(&delay, NULL); + } + + return NULL; +} + +void bluesky_flushd_thread_launch(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL); +}