+
+/* Run the flush daemon for a single iteration, though if it is already
+ * executing returns immediately. */
+static gpointer flushd_task(BlueSkyFS *fs)
+{
+ if (!g_mutex_trylock(fs->flushd_lock))
+ return NULL;
+ flushd_dirty(fs);
+ flushd_cloud(fs);
+ flushd_clean(fs);
+ bluesky_cachefile_gc(fs);
+ g_mutex_unlock(fs->flushd_lock);
+
+ return NULL;
+}
+
+void bluesky_flushd_invoke(BlueSkyFS *fs)
+{
+ g_thread_create((GThreadFunc)flushd_task, fs, FALSE, NULL);
+}
+
+void bluesky_flushd_invoke_conditional(BlueSkyFS *fs)
+{
+ if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_medium_dirty)
+ return;
+
+ if (bluesky_verbose) {
+ g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
+ "Too much data; invoking flushd: dirty=%d",
+ g_atomic_int_get(&fs->cache_dirty));
+ }
+
+ bluesky_flushd_invoke(fs);
+
+ /* If the system is under heavy memory pressure, actually delay execution
+ * so the flush daemon can catch up. */
+ while (g_atomic_int_get(&fs->cache_dirty)
+ + g_atomic_int_get(&fs->cache_log_dirty)
+ > bluesky_watermark_high_dirty) {
+ g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
+ "Waiting due to memory pressure, dirty=%d + %d",
+ g_atomic_int_get(&fs->cache_dirty),
+ g_atomic_int_get(&fs->cache_log_dirty));
+ g_mutex_lock(fs->lock);
+ g_cond_wait(fs->flushd_cond, fs->lock);
+ g_mutex_unlock(fs->lock);
+ }
+}
+
+/* Start a perpetually-running thread that flushes the cache occasionally. */
+static gpointer flushd_thread(BlueSkyFS *fs)
+{
+ while (TRUE) {
+ bluesky_flushd_invoke(fs);
+ struct timespec delay;
+ delay.tv_sec = 2;
+ delay.tv_nsec = 0;
+ nanosleep(&delay, NULL);
+ }
+
+ return NULL;
+}
+
+void bluesky_flushd_thread_launch(BlueSkyFS *fs)
+{
+ g_thread_create((GThreadFunc)flushd_thread, fs, FALSE, NULL);
+}