#include "bluesky-private.h"
-#define WRITEBACK_DELAY (5 * 1000000)
-#define CACHE_CLEAN_DELAY (30 * 1000000)
+#define WRITEBACK_DELAY (20 * 1000000)
/* Filesystem caching and cache coherency. */
g_mutex_unlock(inode->lock);
}
-/* Drop cached data for a given inode, if it is clean. inode must be locked. */
-static void drop_caches(BlueSkyInode *inode)
-{
- if (inode->type == BLUESKY_REGULAR)
- bluesky_file_drop_cached(inode);
-}
-
+#if 0
static void flushd_inode(gpointer value, gpointer user_data)
{
BlueSkyFS *fs = (BlueSkyFS *)user_data;
g_mutex_unlock(inode->lock);
bluesky_inode_unref(inode);
}
+#endif
-/* Scan through the cache for dirty data and start flushing it to stable
- * storage. This does not guarantee that data is committed when it returns.
- * Instead, this can be called occasionally to ensure that dirty data is
- * gradually flushed.
- *
- * We do not want to hold the filesystem lock while flushing individual inodes,
- * a that could lead to deadlock. So first scan through the inode table to get
- * a reference to all inodes, then process that queue of inodes after dropping
- * the filesystem lock. */
-static void gather_inodes(gpointer key, gpointer value, gpointer user_data)
+static void flushd_dirty_inode(BlueSkyInode *inode)
{
- GSList **list = (GSList **)user_data;
- *list = g_slist_prepend(*list, value);
- bluesky_inode_ref((BlueSkyInode *)value);
+ BlueSkyFS *fs = inode->fs;
+
+ /* Inode is clean; nothing to do. */
+ if (inode->change_count == inode->change_commit)
+ return;
+
+ /* Inode writeback is in progress; put back on the dirty list. */
+ if (inode->change_pending) {
+ /* Waiting for an earlier writeback to finish, so don't start a new
+ * writeback yet. */
+ g_mutex_lock(fs->lock);
+ inode->change_time = bluesky_get_current_time();
+ bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
+ inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
+ g_mutex_unlock(fs->lock);
+ return;
+ }
+
+ g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
+ "Starting flush of inode %"PRIu64, inode->inum);
+ inode->change_pending = inode->change_count;
+
+ /* Create a store barrier. All operations part of the writeback will be
+ * added to this barrier, so when the barrier completes we know that the
+ * writeback is finished. */
+ BlueSkyStoreAsync *barrier = bluesky_store_async_new(fs->store);
+ barrier->op = STORE_OP_BARRIER;
+
+ bluesky_inode_start_sync(inode, barrier);
+
+ bluesky_store_async_add_notifier(barrier, writeback_complete, inode);
+ bluesky_store_async_submit(barrier);
+ bluesky_store_async_unref(barrier);
}
-void bluesky_flushd_invoke(BlueSkyFS *fs)
+/* Try to flush dirty data to disk, either due to memory pressure or due to
+ * timeouts. */
+static void flushd_dirty(BlueSkyFS *fs)
{
- GSList *list = NULL;
+ int64_t start_time = bluesky_get_current_time();
+ g_mutex_lock(fs->lock);
+
+ while (1) {
+ BlueSkyInode *inode;
+ if (fs->dirty_list.prev == NULL)
+ break;
+ inode = fs->dirty_list.prev->data;
+
+ g_print("Considering flushing inode %"PRIu64"\n", inode->inum);
+
+ /* Stop processing dirty inodes if we both have enough memory available
+ * and the oldest inode is sufficiently new that it need not be flushed
+ * out. */
+ uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
+ if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty
+ && elapsed < WRITEBACK_DELAY)
+ break;
+ if (inode->change_time > start_time)
+ break;
+ bluesky_inode_ref(inode);
+
+ bluesky_list_unlink(&fs->dirty_list, fs->dirty_list.prev);
+ inode->dirty_list = NULL;
+
+ g_mutex_unlock(fs->lock);
+
+ g_mutex_lock(inode->lock);
+ flushd_dirty_inode(inode);
+ g_mutex_unlock(inode->lock);
+ bluesky_inode_unref(inode);
+
+ g_mutex_lock(fs->lock);
+ }
+
+ g_mutex_unlock(fs->lock);
+}
+
+/* Drop cached data for a given inode, if it is clean. inode must be locked. */
+static void drop_caches(BlueSkyInode *inode)
+{
+ if (inode->type == BLUESKY_REGULAR)
+ bluesky_file_drop_cached(inode);
+}
+
+/* Drop clean data fromt the cache if needed due to memory pressure. */
+static void flushd_clean(BlueSkyFS *fs)
+{
g_mutex_lock(fs->lock);
- g_hash_table_foreach(fs->inodes, gather_inodes, &list);
+
+ size_t inode_count = g_hash_table_size(fs->inodes);
+ if (!inode_count)
+ inode_count = 1;
+
+ while (inode_count-- > 0) {
+ if (g_atomic_int_get(&fs->cache_total) < bluesky_watermark_medium_total)
+ break;
+
+ BlueSkyInode *inode;
+ if (fs->accessed_list.prev == NULL)
+ break;
+ inode = fs->accessed_list.prev->data;
+
+ g_print("Considering dropping cached data for inode %"PRIu64"\n",
+ inode->inum);
+
+ bluesky_inode_ref(inode);
+
+ bluesky_list_unlink(&fs->accessed_list, fs->accessed_list.prev);
+ inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
+
+ g_mutex_unlock(fs->lock);
+
+ g_mutex_lock(inode->lock);
+ drop_caches(inode);
+ g_mutex_unlock(inode->lock);
+ bluesky_inode_unref(inode);
+
+ g_mutex_lock(fs->lock);
+ }
+
g_mutex_unlock(fs->lock);
+}
+
+/* Run the flush daemon for a single iteration, though if it is already
+ * executing returns immediately. */
+static gpointer flushd_task(BlueSkyFS *fs)
+{
+ if (!g_mutex_trylock(fs->flushd_lock))
+ return NULL;
+ flushd_dirty(fs);
+ flushd_clean(fs);
+ g_mutex_unlock(fs->flushd_lock);
- list = g_slist_reverse(list);
- g_slist_foreach(list, flushd_inode, fs);
+ return NULL;
+}
+
+void bluesky_flushd_invoke(BlueSkyFS *fs)
+{
+ g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL);
+}
+
+void bluesky_flushd_invoke_conditional(BlueSkyFS *fs)
+{
+ if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_high_dirty
+ && g_atomic_int_get(&fs->cache_total) < bluesky_watermark_high_total)
+ return;
+
+ g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
+ "Too much data; invoking flushd: dirty=%d total=%d",
+ g_atomic_int_get(&fs->cache_dirty),
+ g_atomic_int_get(&fs->cache_total));
- g_slist_free(list);
+ bluesky_flushd_invoke(fs);
}