1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2009 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
14 #include "bluesky-private.h"
16 #define WRITEBACK_DELAY (5 * 1000000)
17 #define CACHE_CLEAN_DELAY (30 * 1000000)
19 /* Filesystem caching and cache coherency. */
21 static void writeback_complete(gpointer a, gpointer i)
23 BlueSkyInode *inode = (BlueSkyInode *)i;
25 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
26 "Writeback for inode %"PRIu64" complete", inode->inum);
28 g_mutex_lock(inode->lock);
30 inode->change_commit = inode->change_pending;
31 if (inode->change_count == inode->change_commit) {
32 /* If inode is no longer dirty... */
33 inode->change_time = 0;
34 inode->change_pending = 0;
37 g_mutex_unlock(inode->lock);
40 /* Drop cached data for a given inode, if it is clean. inode must be locked. */
41 static void drop_caches(BlueSkyInode *inode)
43 if (inode->type == BLUESKY_REGULAR)
44 bluesky_file_drop_cached(inode);
47 static void flushd_inode(gpointer value, gpointer user_data)
49 BlueSkyFS *fs = (BlueSkyFS *)user_data;
51 BlueSkyInode *inode = (BlueSkyInode *)value;
53 g_mutex_lock(inode->lock);
55 if (inode->change_count == inode->change_commit) {
56 uint64_t delay = bluesky_get_current_time() - inode->access_time;
57 if (delay >= CACHE_CLEAN_DELAY)
59 g_mutex_unlock(inode->lock);
60 bluesky_inode_unref(inode);
64 if (inode->change_pending) {
65 /* Waiting for an earlier writeback to finish, so don't start a new
67 g_mutex_unlock(inode->lock);
68 bluesky_inode_unref(inode);
72 uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
73 if (elapsed < WRITEBACK_DELAY) {
74 /* Give a bit more time before starting writeback. */
75 g_mutex_unlock(inode->lock);
76 bluesky_inode_unref(inode);
80 inode->change_pending = inode->change_count;
82 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
83 "Starting flush of inode %"PRIu64, inode->inum);
85 /* Create a store barrier. All operations part of the writeback will be
86 * added to this barrier, so when the barrier completes we know that the
87 * writeback is finished. */
88 BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store);
89 barrier->op = STORE_OP_BARRIER;
91 bluesky_inode_start_sync(inode, barrier);
93 bluesky_store_async_add_notifier(barrier, writeback_complete, inode);
94 bluesky_store_async_submit(barrier);
95 bluesky_store_async_unref(barrier);
97 g_mutex_unlock(inode->lock);
98 bluesky_inode_unref(inode);
101 /* Scan through the cache for dirty data and start flushing it to stable
102 * storage. This does not guarantee that data is committed when it returns.
103 * Instead, this can be called occasionally to ensure that dirty data is
106 * We do not want to hold the filesystem lock while flushing individual inodes,
107 * a that could lead to deadlock. So first scan through the inode table to get
108 * a reference to all inodes, then process that queue of inodes after dropping
109 * the filesystem lock. */
110 static void gather_inodes(gpointer key, gpointer value, gpointer user_data)
112 GSList **list = (GSList **)user_data;
113 *list = g_slist_prepend(*list, value);
114 bluesky_inode_ref((BlueSkyInode *)value);
117 void bluesky_flushd_invoke(BlueSkyFS *fs)
121 g_mutex_lock(fs->lock);
122 g_hash_table_foreach(fs->inodes, gather_inodes, &list);
123 g_mutex_unlock(fs->lock);
125 list = g_slist_reverse(list);
126 g_slist_foreach(list, flushd_inode, fs);