1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2009 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
14 #include "bluesky-private.h"
16 #define WRITEBACK_DELAY (20 * 1000000)
18 /* Filesystem caching and cache coherency. */
20 static void writeback_complete(gpointer a, gpointer i)
22 BlueSkyInode *inode = (BlueSkyInode *)i;
24 if (bluesky_verbose) {
25 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
26 "Writeback for inode %"PRIu64" complete", inode->inum);
29 g_mutex_lock(inode->lock);
31 inode->change_commit = inode->change_pending;
32 inode->change_pending = 0;
33 if (inode->change_count == inode->change_commit) {
34 /* If inode is no longer dirty... */
35 inode->change_time = 0;
36 g_mutex_lock(inode->fs->lock);
37 bluesky_list_unlink(&inode->fs->dirty_list, inode->dirty_list);
38 inode->dirty_list = NULL;
39 g_mutex_unlock(inode->fs->lock);
42 g_mutex_unlock(inode->lock);
46 static void flushd_inode(gpointer value, gpointer user_data)
48 BlueSkyFS *fs = (BlueSkyFS *)user_data;
50 BlueSkyInode *inode = (BlueSkyInode *)value;
52 g_mutex_lock(inode->lock);
54 if (inode->change_count == inode->change_commit) {
55 uint64_t delay = bluesky_get_current_time() - inode->access_time;
56 if (delay >= CACHE_CLEAN_DELAY) {
59 /* If the only references are the one we hold and the one in the
60 * filesystem inum->inode hash table... First check the refcount
61 * without the lock for speed, but if the check looks good verify
62 * it after taking the filesystem lock. */
63 if (inode->refcount == 2) {
64 g_mutex_lock(fs->lock);
65 if (inode->refcount == 2) {
66 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
67 "Trying to drop inode %"PRIu64" from cache",
69 if (g_hash_table_remove(fs->inodes, &inode->inum))
70 bluesky_inode_unref(inode);
72 bluesky_list_unlink(&inode->fs->accessed_list,
73 inode->accessed_list);
74 inode->accessed_list = NULL;
75 bluesky_list_unlink(&inode->fs->dirty_list,
77 inode->dirty_list = NULL;
78 g_mutex_unlock(fs->lock);
82 g_mutex_unlock(inode->lock);
83 bluesky_inode_unref(inode);
87 if (inode->change_pending) {
88 /* Waiting for an earlier writeback to finish, so don't start a new
90 g_mutex_unlock(inode->lock);
91 bluesky_inode_unref(inode);
95 uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
96 if (elapsed < WRITEBACK_DELAY) {
97 /* Give a bit more time before starting writeback. */
98 g_mutex_unlock(inode->lock);
99 bluesky_inode_unref(inode);
103 inode->change_pending = inode->change_count;
105 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
106 "Starting flush of inode %"PRIu64, inode->inum);
108 /* Create a store barrier. All operations part of the writeback will be
109 * added to this barrier, so when the barrier completes we know that the
110 * writeback is finished. */
111 BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store);
112 barrier->op = STORE_OP_BARRIER;
114 bluesky_inode_start_sync(inode, barrier);
116 bluesky_store_async_add_notifier(barrier, writeback_complete, inode);
117 bluesky_store_async_submit(barrier);
118 bluesky_store_async_unref(barrier);
120 g_mutex_unlock(inode->lock);
121 bluesky_inode_unref(inode);
125 static void flushd_dirty_inode(BlueSkyInode *inode)
127 BlueSkyFS *fs = inode->fs;
129 g_mutex_lock(fs->lock);
130 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
131 inode->dirty_list = NULL;
132 g_mutex_unlock(fs->lock);
134 /* Inode is clean; nothing to do. */
135 if (inode->change_count == inode->change_commit)
138 /* Inode writeback is in progress; put back on the dirty list. */
139 if (inode->change_pending) {
140 /* Waiting for an earlier writeback to finish, so don't start a new
142 g_mutex_lock(fs->lock);
143 inode->change_time = bluesky_get_current_time();
144 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
145 inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
146 g_mutex_unlock(fs->lock);
150 if (bluesky_verbose) {
151 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
152 "Starting flush of inode %"PRIu64, inode->inum);
154 inode->change_pending = inode->change_count;
156 /* Create a store barrier. All operations part of the writeback will be
157 * added to this barrier, so when the barrier completes we know that the
158 * writeback is finished. */
159 BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store);
160 barrier->op = STORE_OP_BARRIER;
162 bluesky_inode_start_sync(inode, barrier);
164 bluesky_store_async_add_notifier(barrier, writeback_complete, inode);
165 bluesky_store_async_submit(barrier);
166 bluesky_store_async_unref(barrier);
169 /* Try to flush dirty data to disk, either due to memory pressure or due to
171 static void flushd_dirty(BlueSkyFS *fs)
173 int64_t start_time = bluesky_get_current_time();
174 g_mutex_lock(fs->lock);
178 if (fs->dirty_list.prev == NULL)
180 inode = fs->dirty_list.prev->data;
182 if (bluesky_verbose) {
183 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
184 "Considering flushing inode %"PRIu64, inode->inum);
187 /* Stop processing dirty inodes if we both have enough memory available
188 * and the oldest inode is sufficiently new that it need not be flushed
190 uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
191 if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty
192 && elapsed < WRITEBACK_DELAY)
194 if (inode->change_time > start_time)
197 bluesky_inode_ref(inode);
199 g_mutex_unlock(fs->lock);
201 g_mutex_lock(inode->lock);
202 flushd_dirty_inode(inode);
203 g_mutex_unlock(inode->lock);
204 bluesky_inode_unref(inode);
206 g_mutex_lock(fs->lock);
209 g_mutex_unlock(fs->lock);
212 /* Drop cached data for a given inode, if it is clean. inode must be locked. */
213 static void drop_caches(BlueSkyInode *inode)
215 if (inode->type == BLUESKY_REGULAR)
216 bluesky_file_drop_cached(inode);
219 /* Drop clean data fromt the cache if needed due to memory pressure. */
220 static void flushd_clean(BlueSkyFS *fs)
222 g_mutex_lock(fs->lock);
224 size_t inode_count = g_hash_table_size(fs->inodes);
228 while (inode_count-- > 0) {
229 if (g_atomic_int_get(&fs->cache_total) < bluesky_watermark_medium_total)
233 if (fs->accessed_list.prev == NULL)
235 inode = fs->accessed_list.prev->data;
237 if (bluesky_verbose) {
238 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
239 "Considering dropping cached data for inode %"PRIu64,
243 bluesky_inode_ref(inode);
245 g_mutex_unlock(fs->lock);
247 g_mutex_lock(inode->lock);
249 g_mutex_lock(fs->lock);
250 bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
251 inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
252 g_mutex_unlock(fs->lock);
256 g_mutex_unlock(inode->lock);
257 bluesky_inode_unref(inode);
259 g_mutex_lock(fs->lock);
262 g_mutex_unlock(fs->lock);
265 /* Run the flush daemon for a single iteration, though if it is already
266 * executing returns immediately. */
267 static gpointer flushd_task(BlueSkyFS *fs)
269 if (!g_mutex_trylock(fs->flushd_lock))
273 g_mutex_unlock(fs->flushd_lock);
278 void bluesky_flushd_invoke(BlueSkyFS *fs)
280 g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL);
283 void bluesky_flushd_invoke_conditional(BlueSkyFS *fs)
285 if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_high_dirty
286 && g_atomic_int_get(&fs->cache_total) < bluesky_watermark_high_total)
289 if (bluesky_verbose) {
290 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
291 "Too much data; invoking flushd: dirty=%d total=%d",
292 g_atomic_int_get(&fs->cache_dirty),
293 g_atomic_int_get(&fs->cache_total));
296 bluesky_flushd_invoke(fs);