Limit the number of concurrent log uploads to the cloud
[bluesky.git] / bluesky / cloudlog.c
index 0570c5d..4b49397 100644 (file)
@@ -17,6 +17,9 @@
 // no absolute guarantees on the size of a log segment.
 #define CLOUDLOG_SEGMENT_SIZE (4 << 20)
 
+// Maximum number of segments to attempt to upload concurrently
+int cloudlog_concurrent_uploads = 32;
+
 BlueSkyCloudID bluesky_cloudlog_new_id()
 {
     BlueSkyCloudID id;
@@ -389,6 +392,7 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
             bluesky_cloudlog_serialize(ref, fs);
     }
 
+    /* FIXME: Ought lock to be taken earlier? */
     g_mutex_lock(log->lock);
     bluesky_cloudlog_fetch(log);
     g_assert(log->data != NULL);
@@ -477,6 +481,13 @@ static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
         g_slist_free(record->items);
         record->items = NULL;
         record->complete = TRUE;
+
+        BlueSkyCloudLogState *state = record->fs->log_state;
+        g_mutex_lock(state->uploads_pending_lock);
+        state->uploads_pending--;
+        g_cond_broadcast(state->uploads_pending_cond);
+        g_mutex_unlock(state->uploads_pending_lock);
+
         g_cond_broadcast(record->cond);
     } else {
         g_print("Write should be resubmitted...\n");
@@ -497,38 +508,58 @@ static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
 }
 
 /* Finish up a partially-written cloud log segment and flush it to storage. */
+static void cloud_flush_background(SerializedRecord *record)
+{
+    bluesky_cloudlog_encrypt(record->raw_data, record->fs->keys);
+    record->data = bluesky_string_new_from_gstring(record->raw_data);
+    record->raw_data = NULL;
+
+    BlueSkyStoreAsync *async = bluesky_store_async_new(record->fs->store);
+    async->op = STORE_OP_PUT;
+    async->key = record->key;
+    async->data = record->data;
+    bluesky_string_ref(record->data);
+    bluesky_store_async_submit(async);
+    bluesky_store_async_add_notifier(async,
+                                     (GFunc)cloudlog_flush_complete,
+                                     record);
+    bluesky_store_async_unref(async);
+}
+
 void bluesky_cloudlog_flush(BlueSkyFS *fs)
 {
     BlueSkyCloudLogState *state = fs->log_state;
     if (state->data == NULL || state->data->len == 0)
         return;
 
+    g_mutex_lock(state->uploads_pending_lock);
+    while (state->uploads_pending > cloudlog_concurrent_uploads)
+        g_cond_wait(state->uploads_pending_cond, state->uploads_pending_lock);
+    state->uploads_pending++;
+    g_mutex_unlock(state->uploads_pending_lock);
+
     /* TODO: Append some type of commit record to the log segment? */
 
     g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
     SerializedRecord *record = g_new0(SerializedRecord, 1);
-    bluesky_cloudlog_encrypt(state->data, fs->keys);
-    record->data = bluesky_string_new_from_gstring(state->data);
+    record->fs = fs;
+    record->raw_data = state->data;
+    record->data = NULL;
     record->items = state->writeback_list;
     record->lock = g_mutex_new();
     record->cond = g_cond_new();
     state->writeback_list = NULL;
 
-    BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
-    async->op = STORE_OP_PUT;
-    async->key = g_strdup_printf("log-%08d-%08d",
-                                 state->location.directory,
-                                 state->location.sequence);
-    async->data = record->data;
-    bluesky_string_ref(record->data);
-    bluesky_store_async_submit(async);
-    bluesky_store_async_add_notifier(async,
-                                     (GFunc)cloudlog_flush_complete,
-                                     record);
-    bluesky_store_async_unref(async);
+    record->key = g_strdup_printf("log-%08d-%08d",
+                                  state->location.directory,
+                                  state->location.sequence);
 
     state->pending_segments = g_list_prepend(state->pending_segments, record);
 
+    /* Encryption of data and upload happen in the background, for additional
+     * parallelism when uploading large amounts of data. */
+    g_thread_create((GThreadFunc)cloud_flush_background, record, FALSE, NULL);
+
     state->location.sequence++;
     state->location.offset = 0;
     state->data = g_string_new("");