BlueSkyFS *fs = inode->fs;
g_mutex_lock(fs->lock);
- bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
- inode->dirty_list = NULL;
+ bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
+ inode->unlogged_list = NULL;
g_mutex_unlock(fs->lock);
/* Inode is clean; nothing to do. */
int64_t start_time = bluesky_get_current_time();
g_mutex_lock(fs->lock);
+ while (1) {
+ BlueSkyInode *inode;
+ if (fs->unlogged_list.prev == NULL)
+ break;
+ inode = fs->unlogged_list.prev->data;
+
+ if (bluesky_verbose) {
+ g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
+ "Considering flushing inode %"PRIu64, inode->inum);
+ }
+
+ /* Stop processing dirty inodes if we both have enough memory available
+ * and the oldest inode is sufficiently new that it need not be flushed
+ * out. */
+ uint64_t elapsed = bluesky_get_current_time() - inode->change_time;
+ if (g_atomic_int_get(&fs->cache_dirty) < bluesky_watermark_low_dirty
+ && elapsed < WRITEBACK_DELAY)
+ break;
+ if (inode->change_time > start_time)
+ break;
+
+ bluesky_inode_ref(inode);
+
+ g_mutex_unlock(fs->lock);
+
+ g_mutex_lock(inode->lock);
+ flushd_dirty_inode(inode);
+ g_mutex_unlock(inode->lock);
+ bluesky_inode_unref(inode);
+
+ g_mutex_lock(fs->lock);
+ }
+
+ g_mutex_unlock(fs->lock);
+}
+
+/* Try to flush dirty data to the cloud. */
+static void flushd_cloud(BlueSkyFS *fs)
+{
+ int64_t start_time = bluesky_get_current_time();
+ g_mutex_lock(fs->lock);
+
while (1) {
BlueSkyInode *inode;
if (fs->dirty_list.prev == NULL)
if (bluesky_verbose) {
g_log("bluesky/flushd", G_LOG_LEVEL_DEBUG,
- "Considering flushing inode %"PRIu64, inode->inum);
+ "Flushing inode %"PRIu64" to cloud", inode->inum);
}
/* Stop processing dirty inodes if we both have enough memory available
g_mutex_lock(inode->lock);
flushd_dirty_inode(inode);
+ g_mutex_lock(fs->lock);
+ bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
+ inode->dirty_list = NULL;
+ g_mutex_unlock(fs->lock);
+
+ BlueSkyCloudLog *log = inode->committed_item;
+ bluesky_cloudlog_ref(log);
g_mutex_unlock(inode->lock);
+
+ if (log != NULL)
+ bluesky_cloudlog_serialize(log, fs);
bluesky_inode_unref(inode);
+ bluesky_cloudlog_ref(log);
g_mutex_lock(fs->lock);
}
g_mutex_unlock(fs->lock);
+ bluesky_cloudlog_flush(fs);
}
/* Drop cached data for a given inode, if it is clean. inode must be locked. */
if (!g_mutex_trylock(fs->flushd_lock))
return NULL;
flushd_dirty(fs);
- bluesky_cloudlog_write_log(fs);
+ flushd_cloud(fs);
flushd_clean(fs);
g_mutex_unlock(fs->flushd_lock);
}
BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
- BlueSkyCloudLogState *state)
+ BlueSkyFS *fs)
{
+ BlueSkyCloudLogState *state = fs->log_state;
+
if (log->location_flags & CLOUDLOG_CLOUD) {
return log->location;
}
BlueSkyCloudLog *ref = g_array_index(log->links,
BlueSkyCloudLog *, i);
if (ref != NULL)
- bluesky_cloudlog_serialize(ref, state);
+ bluesky_cloudlog_serialize(ref, fs);
}
g_mutex_lock(log->lock);
log->location_flags |= CLOUDLOG_CLOUD;
g_mutex_unlock(log->lock);
+ if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
+ bluesky_cloudlog_flush(fs);
+
return log->location;
}
while (state->inode_list != NULL) {
BlueSkyCloudLog *log = (BlueSkyCloudLog *)state->inode_list->data;
- bluesky_cloudlog_serialize(log, state);
+ bluesky_cloudlog_serialize(log, fs);
bluesky_cloudlog_unref(log);
state->inode_list = g_list_delete_link(state->inode_list,
state->inode_list);