1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2009 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
14 #include "bluesky-private.h"
16 #define WRITEBACK_DELAY (20 * 1000000)
17 #define CACHE_DROP_DELAY (20 * 1000000)
19 /* Filesystem caching and cache coherency. There are actually a couple of
20 * different tasks that are performed here:
21 * - Forcing data to the log if needed to reclaim memory or simply if the
22 * data has been dirty in memory long enough.
23 * - Writing batches of data to the cloud.
26 static void flushd_dirty_inode(BlueSkyInode *inode)
28 BlueSkyFS *fs = inode->fs;
30 g_mutex_lock(fs->lock);
31 bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
32 inode->unlogged_list = NULL;
33 g_mutex_unlock(fs->lock);
35 /* Inode is clean; nothing to do. */
36 if (inode->change_count == inode->change_commit)
39 if (bluesky_verbose) {
40 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
41 "Starting flush of inode %"PRIu64, inode->inum);
44 bluesky_inode_start_sync(inode);
47 /* Check whether memory usage may have dropped below critical thresholds for
48 * waking up waiting threads. */
49 void flushd_check_wakeup(BlueSkyFS *fs)
51 int dirty = g_atomic_int_get(&fs->cache_dirty);
52 dirty += g_atomic_int_get(&fs->cache_log_dirty);
54 if (dirty <= bluesky_watermark_high_dirty)
55 g_cond_broadcast(fs->flushd_cond);
58 /* Try to flush dirty data to disk, either due to memory pressure or due to
60 static void flushd_dirty(BlueSkyFS *fs)
62 int64_t start_time = bluesky_get_current_time();
63 g_mutex_lock(fs->lock);
67 if (fs->unlogged_list.prev == NULL)
69 inode = fs->unlogged_list.prev->data;
71 if (bluesky_verbose) {
72 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
73 "Considering flushing inode %"PRIu64, inode->inum);
76 /* Stop processing dirty inodes if we both have enough memory available
77 * and the oldest inode is sufficiently new that it need not be flushed
79 uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
80 if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty
81 && elapsed < WRITEBACK_DELAY)
83 if (inode->change_time > start_time)
86 bluesky_inode_ref(inode);
88 g_mutex_unlock(fs->lock);
90 g_mutex_lock(inode->lock);
91 flushd_dirty_inode(inode);
92 g_mutex_unlock(inode->lock);
93 bluesky_inode_unref(inode);
95 g_mutex_lock(fs->lock);
96 flushd_check_wakeup(fs);
99 g_cond_broadcast(fs->flushd_cond);
101 g_mutex_unlock(fs->lock);
104 /* Try to flush dirty data to the cloud. This will take a snapshot of the
105 * entire filesystem (though only point-in-time consistent for isolated inodes
106 * and not the filesystem as a whole) and ensure all data is written to the
107 * cloud. When the write completes, we will allow old journal segments (those
108 * that were fully written _before_ the snapshot process started) to be garbage
109 * collected. Newer journal segments can't be collected yet since they may
110 * still contain data which has not been written persistently to the cloud.
112 * Note that some of this code relies on the fact that only this thread of
113 * control (running flushd_cloud) is manipulating the inode map, and so
114 * concurrent updates to the inode map are prevented even without the
115 * filesystem lock held. Take great care if allowing multi-threaded access to
116 * the inode map... */
117 static void flushd_cloud(BlueSkyFS *fs)
119 g_mutex_lock(fs->lock);
121 /* TODO: Locking? Since we're reading a single variable this is probably
122 * atomic but a lock could be safer. */
123 BlueSkyCloudLog *marker = bluesky_log_get_commit_point(fs);
124 int journal_seq_start = fs->log->seq_num;
128 if (fs->dirty_list.prev == NULL)
130 inode = fs->dirty_list.prev->data;
132 if (bluesky_verbose) {
133 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
134 "Flushing inode %"PRIu64" to cloud", inode->inum);
137 bluesky_inode_ref(inode);
139 g_mutex_unlock(fs->lock);
141 g_mutex_lock(inode->lock);
142 g_assert(inode->change_cloud == inode->change_commit);
143 g_mutex_lock(fs->lock);
144 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
145 inode->dirty_list = NULL;
146 g_mutex_unlock(fs->lock);
148 BlueSkyCloudLog *log = inode->committed_item;
149 inode->committed_item = NULL;
150 g_mutex_unlock(inode->lock);
153 bluesky_cloudlog_serialize(log, fs);
154 bluesky_inode_unref(inode);
155 bluesky_cloudlog_unref(log);
157 g_mutex_lock(fs->lock);
159 g_mutex_unlock(fs->lock);
161 /* Write out any updated inode map entries, so that all inodes just written
162 * can be located, and then a final commit record. */
163 BlueSkyCloudLog *commit_record = bluesky_inode_map_serialize(fs);
164 if (commit_record != NULL) {
165 bluesky_cloudlog_serialize(commit_record, fs);
167 g_print("No need for a checkpoint record...\n");
170 bluesky_cloudlog_flush(fs);
172 /* Wait until all segments have been written to the cloud, so that it
173 * becomes safe to free up journal segments. */
174 while (fs->log_state->pending_segments != NULL) {
175 SerializedRecord *segment
176 = (SerializedRecord *)fs->log_state->pending_segments->data;
177 g_mutex_lock(segment->lock);
178 while (!segment->complete)
179 g_cond_wait(segment->cond, segment->lock);
180 g_mutex_unlock(segment->lock);
182 g_mutex_free(segment->lock);
183 g_cond_free(segment->cond);
186 fs->log_state->pending_segments
187 = g_list_delete_link(fs->log_state->pending_segments,
188 fs->log_state->pending_segments);
191 bluesky_log_write_commit_point(fs, marker);
192 bluesky_cloudlog_unref(commit_record);
194 g_print("All segments have been flushed, journal < %d is clean\n",
197 fs->log->journal_watermark = journal_seq_start;
199 bluesky_inode_map_minimize(fs);
202 /* Drop cached data for a given inode, if it is clean. inode must be locked. */
203 static void drop_caches(BlueSkyInode *inode)
205 if (inode->type == BLUESKY_REGULAR)
206 bluesky_file_drop_cached(inode);
208 BlueSkyCloudLog *log = inode->committed_item;
210 g_mutex_lock(log->lock);
211 if (log->data != NULL
212 && g_atomic_int_get(&log->data_lock_count) == 0
213 && (log->location_flags != 0))
215 bluesky_cloudlog_stats_update(log, -1);
216 bluesky_string_unref(log->data);
218 bluesky_cloudlog_stats_update(log, 1);
220 g_mutex_unlock(log->lock);
224 /* Drop clean data from the cache if needed. Clean data should generally be
225 * memory-mapped from log file or similar, so the kernel can drop this clean
226 * data from memory for us and hence memory management isn't too important.
227 * Mainly, we'll want to drop references to data that hasn't been accessed in a
228 * while so that it is possible to reclaim log segments on disk. */
229 static void flushd_clean(BlueSkyFS *fs)
231 g_mutex_lock(fs->lock);
233 size_t inode_count = g_hash_table_size(fs->inodes);
237 while (inode_count-- > 0) {
239 if (fs->accessed_list.prev == NULL)
241 inode = fs->accessed_list.prev->data;
243 uint64_t elapsed = bluesky_get_current_time() - inode->access_time;
244 if (elapsed < CACHE_DROP_DELAY)
247 if (bluesky_verbose) {
248 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
249 "Considering dropping cached data for inode %"PRIu64,
253 bluesky_inode_ref(inode);
255 g_mutex_unlock(fs->lock);
257 g_mutex_lock(inode->lock);
259 g_mutex_lock(fs->lock);
260 bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
261 inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
262 g_mutex_unlock(fs->lock);
266 g_mutex_unlock(inode->lock);
267 bluesky_inode_unref(inode);
269 g_mutex_lock(fs->lock);
272 g_mutex_unlock(fs->lock);
275 /* Run the flush daemon for a single iteration, though if it is already
276 * executing returns immediately. */
277 static gpointer flushd_task(BlueSkyFS *fs)
279 if (!g_mutex_trylock(fs->flushd_lock))
282 g_print("\nCloudlog cache: %d dirty, %d writeback, %d journal, %d cloud\n",
283 g_atomic_int_get(&fs->cache_log_dirty),
284 g_atomic_int_get(&fs->cache_log_writeback),
285 g_atomic_int_get(&fs->cache_log_journal),
286 g_atomic_int_get(&fs->cache_log_cloud));
291 bluesky_cachefile_gc(fs);
292 g_mutex_unlock(fs->flushd_lock);
297 void bluesky_flushd_invoke(BlueSkyFS *fs)
299 g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL);
302 void bluesky_flushd_invoke_conditional(BlueSkyFS *fs)
304 if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_medium_dirty)
307 if (bluesky_verbose) {
308 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
309 "Too much data; invoking flushd: dirty=%d",
310 g_atomic_int_get(&fs->cache_dirty));
313 bluesky_flushd_invoke(fs);
315 /* If the system is under heavy memory pressure, actually delay execution
316 * so the flush daemon can catch up. */
317 while (g_atomic_int_get(&fs->cache_dirty)
318 + g_atomic_int_get(&fs->cache_log_dirty)
319 > bluesky_watermark_high_dirty) {
320 g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
321 "Waiting due to memory pressure, dirty=%d + %d",
322 g_atomic_int_get(&fs->cache_dirty),
323 g_atomic_int_get(&fs->cache_log_dirty));
324 g_mutex_lock(fs->lock);
325 g_cond_wait(fs->flushd_cond, fs->lock);
326 g_mutex_unlock(fs->lock);
330 /* Start a perpetually-running thread that flushes the cache occasionally. */
331 static gpointer flushd_thread(BlueSkyFS *fs)
334 bluesky_flushd_invoke(fs);
335 struct timespec delay;
338 nanosleep(&delay, NULL);
344 void bluesky_flushd_thread_launch(BlueSkyFS *fs)
346 g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL);