X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcache.c;h=d1c5c84c6725e5b6ac2a2cc8f323e386c777b336;hb=bdb05ff1ad95ab25e13934f66c83452ef00119fe;hp=4fe467118504d8b3795ea2c1750e006261b607c7;hpb=6abd55eff09d83999c7a84e0ee63bdc3100f7666;p=bluesky.git diff --git a/bluesky/cache.c b/bluesky/cache.c index 4fe4671..d1c5c84 100644 --- a/bluesky/cache.c +++ b/bluesky/cache.c @@ -13,75 +13,331 @@ #include "bluesky-private.h" -#define WRITEBACK_DELAY (5 * 1000000) +#define WRITEBACK_DELAY (20 * 1000000) +#define CACHE_DROP_DELAY (20 * 1000000) -/* Filesystem caching and cache coherency. */ +/* Filesystem caching and cache coherency. There are actually a couple of + * different tasks that are performed here: + * - Forcing data to the log if needed to reclaim memory or simply if the + * data has been dirty in memory long enough. + * - Writing batches of data to the cloud. + */ -static void writeback_complete(gpointer a, gpointer i) +static void flushd_dirty_inode(BlueSkyInode *inode) { - BlueSkyInode *inode = (BlueSkyInode *)i; + BlueSkyFS *fs = inode->fs; - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Writeback for inode %"PRIu64" complete", inode->inum); + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list); + inode->unlogged_list = NULL; + g_mutex_unlock(fs->lock); - g_mutex_lock(inode->lock); + /* Inode is clean; nothing to do. */ + if (inode->change_count == inode->change_commit) + return; - inode->change_commit = inode->change_pending; - if (inode->change_count == inode->change_commit) { - /* If inode is no longer dirty... */ - inode->change_time = 0; - inode->change_pending = 0; + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Starting flush of inode %"PRIu64, inode->inum); } - g_mutex_unlock(inode->lock); + bluesky_inode_start_sync(inode); } -static void flushd_inode(gpointer key, gpointer value, gpointer user_data) +/* Check whether memory usage may have dropped below critical thresholds for + * waking up waiting threads. */ +void flushd_check_wakeup(BlueSkyFS *fs) { - BlueSkyFS *fs = (BlueSkyFS *)user_data; + int dirty = g_atomic_int_get(&fs->cache_dirty); + dirty += g_atomic_int_get(&fs->cache_log_dirty); - BlueSkyInode *inode = (BlueSkyInode *)value; + if (dirty <= bluesky_watermark_high_dirty) + g_cond_broadcast(fs->flushd_cond); +} - if (inode->change_count == inode->change_commit) - return; +/* Try to flush dirty data to disk, either due to memory pressure or due to + * timeouts. */ +static void flushd_dirty(BlueSkyFS *fs) +{ + int64_t start_time = bluesky_get_current_time(); + g_mutex_lock(fs->lock); - if (inode->change_pending) { - /* Waiting for an earlier writeback to finish, so don't start a new - * writeback yet. */ - return; + while (1) { + BlueSkyInode *inode; + if (fs->unlogged_list.prev == NULL) + break; + inode = fs->unlogged_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering flushing inode %"PRIu64, inode->inum); + } + + /* Stop processing dirty inodes if we both have enough memory available + * and the oldest inode is sufficiently new that it need not be flushed + * out. */ + uint64_t elapsed = bluesky_get_current_time() - inode->change_time; + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty + && elapsed < WRITEBACK_DELAY) + break; + if (inode->change_time > start_time) + break; + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + flushd_dirty_inode(inode); + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + flushd_check_wakeup(fs); } - uint64_t elapsed = bluesky_get_current_time() - inode->change_time; - if (elapsed < WRITEBACK_DELAY) { - /* Give a bit more time before starting writeback. */ - return; + g_cond_broadcast(fs->flushd_cond); + + g_mutex_unlock(fs->lock); +} + +/* Try to flush dirty data to the cloud. This will take a snapshot of the + * entire filesystem (though only point-in-time consistent for isolated inodes + * and not the filesystem as a whole) and ensure all data is written to the + * cloud. When the write completes, we will allow old journal segments (those + * that were fully written _before_ the snapshot process started) to be garbage + * collected. Newer journal segments can't be collected yet since they may + * still contain data which has not been written persistently to the cloud. + * + * Note that some of this code relies on the fact that only this thread of + * control (running flushd_cloud) is manipulating the inode map, and so + * concurrent updates to the inode map are prevented even without the + * filesystem lock held. Take great care if allowing multi-threaded access to + * the inode map... */ +static void flushd_cloud(BlueSkyFS *fs) +{ + g_mutex_lock(fs->lock); + + /* TODO: Locking? Since we're reading a single variable this is probably + * atomic but a lock could be safer. */ + BlueSkyCloudLog *marker = bluesky_log_get_commit_point(fs); + int journal_seq_start = fs->log->seq_num; + + while (1) { + BlueSkyInode *inode; + if (fs->dirty_list.prev == NULL) + break; + inode = fs->dirty_list.prev->data; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Flushing inode %"PRIu64" to cloud", inode->inum); + } + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + g_assert(inode->change_cloud == inode->change_commit); + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->dirty_list, inode->dirty_list); + inode->dirty_list = NULL; + g_mutex_unlock(fs->lock); + + BlueSkyCloudLog *log = inode->committed_item; + inode->committed_item = NULL; + g_mutex_unlock(inode->lock); + + if (log != NULL) + bluesky_cloudlog_serialize(log, fs); + bluesky_inode_unref(inode); + bluesky_cloudlog_unref(log); + + g_mutex_lock(fs->lock); } + g_mutex_unlock(fs->lock); + + /* Write out any updated inode map entries, so that all inodes just written + * can be located, and then a final commit record. */ + BlueSkyCloudLog *commit_record = bluesky_inode_map_serialize(fs); + if (commit_record != NULL) { + bluesky_cloudlog_serialize(commit_record, fs); + } else { + g_print("No need for a checkpoint record...\n"); + } + + bluesky_cloudlog_flush(fs); - inode->change_pending = inode->change_count; + /* Wait until all segments have been written to the cloud, so that it + * becomes safe to free up journal segments. */ + while (fs->log_state->pending_segments != NULL) { + SerializedRecord *segment + = (SerializedRecord *)fs->log_state->pending_segments->data; + g_mutex_lock(segment->lock); + while (!segment->complete) + g_cond_wait(segment->cond, segment->lock); + g_mutex_unlock(segment->lock); - g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, - "Starting flush of inode %"PRIu64, inode->inum); + g_mutex_free(segment->lock); + g_cond_free(segment->cond); + g_free(segment); - /* Create a store barrier. All operations part of the writeback will be - * added to this barrier, so when the barrier completes we know that the - * writeback is finished. */ - BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store); - barrier->op = STORE_OP_BARRIER; + fs->log_state->pending_segments + = g_list_delete_link(fs->log_state->pending_segments, + fs->log_state->pending_segments); + } + + bluesky_log_write_commit_point(fs, marker); + bluesky_cloudlog_unref(commit_record); + + g_print("All segments have been flushed, journal < %d is clean\n", + journal_seq_start); - bluesky_inode_start_sync(inode, barrier); + fs->log->journal_watermark = journal_seq_start; - bluesky_store_async_add_notifier(barrier, writeback_complete, inode); - bluesky_store_async_submit(barrier); - bluesky_store_async_unref(barrier); + bluesky_inode_map_minimize(fs); } -/* Scan through the cache for dirty data and start flushing it to stable - * storage. This does not guarantee that data is committed when it returns. - * Instead, this can be called occasionally to ensure that dirty data is - * gradually flushed. */ -void bluesky_flushd_invoke(BlueSkyFS *fs) +/* Drop cached data for a given inode, if it is clean. inode must be locked. */ +static void drop_caches(BlueSkyInode *inode) +{ + if (inode->type == BLUESKY_REGULAR) + bluesky_file_drop_cached(inode); + + BlueSkyCloudLog *log = inode->committed_item; + if (log != NULL) { + g_mutex_lock(log->lock); + if (log->data != NULL + && g_atomic_int_get(&log->data_lock_count) == 0 + && (log->location_flags != 0)) + { + bluesky_cloudlog_stats_update(log, -1); + bluesky_string_unref(log->data); + log->data = NULL; + bluesky_cloudlog_stats_update(log, 1); + } + if (log->location_flags & CLOUDLOG_CLOUD) { + log->location_flags &= ~CLOUDLOG_JOURNAL; + } + g_mutex_unlock(log->lock); + } +} + +/* Drop clean data from the cache if needed. Clean data should generally be + * memory-mapped from log file or similar, so the kernel can drop this clean + * data from memory for us and hence memory management isn't too important. + * Mainly, we'll want to drop references to data that hasn't been accessed in a + * while so that it is possible to reclaim log segments on disk. */ +static void flushd_clean(BlueSkyFS *fs) { g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->inodes, flushd_inode, fs); + + size_t inode_count = g_hash_table_size(fs->inodes); + if (!inode_count) + inode_count = 1; + + while (inode_count-- > 0) { + BlueSkyInode *inode; + if (fs->accessed_list.prev == NULL) + break; + inode = fs->accessed_list.prev->data; + + uint64_t elapsed = bluesky_get_current_time() - inode->access_time; + if (elapsed < CACHE_DROP_DELAY) + break; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Considering dropping cached data for inode %"PRIu64, + inode->inum); + } + + bluesky_inode_ref(inode); + + g_mutex_unlock(fs->lock); + + g_mutex_lock(inode->lock); + + g_mutex_lock(fs->lock); + bluesky_list_unlink(&fs->accessed_list, inode->accessed_list); + inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode); + g_mutex_unlock(fs->lock); + + drop_caches(inode); + + g_mutex_unlock(inode->lock); + bluesky_inode_unref(inode); + + g_mutex_lock(fs->lock); + } + g_mutex_unlock(fs->lock); } + +/* Run the flush daemon for a single iteration, though if it is already + * executing returns immediately. */ +static gpointer flushd_task(BlueSkyFS *fs) +{ + if (!g_mutex_trylock(fs->flushd_lock)) + return NULL; + flushd_dirty(fs); + flushd_cloud(fs); + flushd_clean(fs); + bluesky_cachefile_gc(fs); + g_mutex_unlock(fs->flushd_lock); + + return NULL; +} + +void bluesky_flushd_invoke(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL); +} + +void bluesky_flushd_invoke_conditional(BlueSkyFS *fs) +{ + if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_medium_dirty) + return; + + if (bluesky_verbose) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Too much data; invoking flushd: dirty=%d", + g_atomic_int_get(&fs->cache_dirty)); + } + + bluesky_flushd_invoke(fs); + + /* If the system is under heavy memory pressure, actually delay execution + * so the flush daemon can catch up. */ + while (g_atomic_int_get(&fs->cache_dirty) + + g_atomic_int_get(&fs->cache_log_dirty) + > bluesky_watermark_high_dirty) { + g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG, + "Waiting due to memory pressure, dirty=%d + %d", + g_atomic_int_get(&fs->cache_dirty), + g_atomic_int_get(&fs->cache_log_dirty)); + g_mutex_lock(fs->lock); + g_cond_wait(fs->flushd_cond, fs->lock); + g_mutex_unlock(fs->lock); + } +} + +/* Start a perpetually-running thread that flushes the cache occasionally. */ +static gpointer flushd_thread(BlueSkyFS *fs) +{ + while (TRUE) { + bluesky_flushd_invoke(fs); + struct timespec delay; + delay.tv_sec = 2; + delay.tv_nsec = 0; + nanosleep(&delay, NULL); + } + + return NULL; +} + +void bluesky_flushd_thread_launch(BlueSkyFS *fs) +{ + g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL); +}