Add an option to disable aggregating reads in the proxy
[bluesky.git] / bluesky / cloudlog.c
index 0668da7..51e35c0 100644 (file)
@@ -17,6 +17,9 @@
 // no absolute guarantees on the size of a log segment.
 #define CLOUDLOG_SEGMENT_SIZE (4 << 20)
 
+// Maximum number of segments to attempt to upload concurrently
+int cloudlog_concurrent_uploads = 32;
+
 BlueSkyCloudID bluesky_cloudlog_new_id()
 {
     BlueSkyCloudID id;
@@ -189,12 +192,6 @@ void bluesky_cloudlog_unref_delayed(BlueSkyCloudLog *log)
         g_async_queue_push(log->fs->unref_queue, log);
 }
 
-void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
-{
-    fs->unref_queue = g_async_queue_new();
-    g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
-}
-
 /* Erase the information contained within the in-memory cloud log
  * representation.  This does not free up the item itself, but frees the data
  * and references to other log items and resets the type back to unknown.  If
@@ -268,6 +265,25 @@ BlueSkyCloudLog *bluesky_cloudlog_get(BlueSkyFS *fs, BlueSkyCloudID id)
     return item;
 }
 
+/* Work to fetch a cloudlog item in a background thread.  The item will be
+ * locked while the fetch is in progress and unlocked when it completes. */
+static GThreadPool *fetch_pool;
+
+static void background_fetch_task(gpointer p, gpointer unused)
+{
+    BlueSkyCloudLog *item = (BlueSkyCloudLog *)p;
+
+    g_mutex_lock(item->lock);
+    g_mutex_unlock(item->lock);
+    bluesky_cloudlog_unref(item);
+}
+
+void bluesky_cloudlog_background_fetch(BlueSkyCloudLog *item)
+{
+    bluesky_cloudlog_ref(item);
+    g_thread_pool_push(fetch_pool, item, NULL);
+}
+
 /* Attempt to prefetch a cloud log item.  This does not guarantee that it will
  * be made available, but does make it more likely that a future call to
  * bluesky_cloudlog_fetch will complete quickly.  Item must be locked? */
@@ -276,6 +292,14 @@ void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item)
     if (item->data != NULL)
         return;
 
+    /* When operating in a non log-structured mode, simply start a background
+     * fetch immediately when asked to prefetch. */
+    if (bluesky_options.disable_aggregation
+        || bluesky_options.disable_read_aggregation) {
+        bluesky_cloudlog_background_fetch(item);
+        return;
+    }
+
     /* TODO: Some of the code here is duplicated with bluesky_log_map_object.
      * Refactor to fix that. */
     BlueSkyFS *fs = item->fs;
@@ -316,7 +340,8 @@ void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item)
             map->prefetches = bluesky_rangeset_new();
 
         gchar *id = bluesky_cloudlog_id_to_string(item->id);
-        g_print("Need to prefetch %s\n", id);
+        if (bluesky_verbose)
+            g_print("Need to prefetch %s\n", id);
         g_free(id);
 
         bluesky_rangeset_insert(map->prefetches,
@@ -325,7 +350,9 @@ void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item)
 
         uint64_t start, length;
         bluesky_rangeset_get_extents(map->prefetches, &start, &length);
-        g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n", start, length);
+        if (bluesky_verbose)
+            g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n",
+                    start, length);
     }
 
     bluesky_cachefile_unref(map);
@@ -340,6 +367,10 @@ void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
     if (log->data != NULL)
         return;
 
+    BlueSkyProfile *profile = bluesky_profile_get();
+    if (profile != NULL)
+        bluesky_profile_add_event(profile, g_strdup_printf("Fetch log entry"));
+
     /* There are actually two cases: a full deserialization if we have not ever
      * read the object before, and a partial deserialization where the metadata
      * is already in memory and we just need to remap the data.  If the object
@@ -361,6 +392,8 @@ void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
         g_error("Unable to fetch cloudlog entry!");
     }
 
+    if (profile != NULL)
+        bluesky_profile_add_event(profile, g_strdup_printf("Fetch complete"));
     g_cond_broadcast(log->cond);
 }
 
@@ -380,6 +413,7 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
             bluesky_cloudlog_serialize(ref, fs);
     }
 
+    /* FIXME: Ought lock to be taken earlier? */
     g_mutex_lock(log->lock);
     bluesky_cloudlog_fetch(log);
     g_assert(log->data != NULL);
@@ -436,8 +470,11 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
     bluesky_cloudlog_ref(log);
     g_mutex_unlock(log->lock);
 
-    if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
+    if (state->data->len > CLOUDLOG_SEGMENT_SIZE
+        || bluesky_options.disable_aggregation)
+    {
         bluesky_cloudlog_flush(fs);
+    }
 
     return log->location;
 }
@@ -468,6 +505,13 @@ static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
         g_slist_free(record->items);
         record->items = NULL;
         record->complete = TRUE;
+
+        BlueSkyCloudLogState *state = record->fs->log_state;
+        g_mutex_lock(state->uploads_pending_lock);
+        state->uploads_pending--;
+        g_cond_broadcast(state->uploads_pending_cond);
+        g_mutex_unlock(state->uploads_pending_lock);
+
         g_cond_broadcast(record->cond);
     } else {
         g_print("Write should be resubmitted...\n");
@@ -476,6 +520,7 @@ static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
         async2->op = STORE_OP_PUT;
         async2->key = g_strdup(async->key);
         async2->data = record->data;
+        async2->profile = async->profile;
         bluesky_string_ref(record->data);
         bluesky_store_async_submit(async2);
         bluesky_store_async_add_notifier(async2,
@@ -487,38 +532,58 @@ static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
 }
 
 /* Finish up a partially-written cloud log segment and flush it to storage. */
+static void cloud_flush_background(SerializedRecord *record)
+{
+    bluesky_cloudlog_encrypt(record->raw_data, record->fs->keys);
+    record->data = bluesky_string_new_from_gstring(record->raw_data);
+    record->raw_data = NULL;
+
+    BlueSkyStoreAsync *async = bluesky_store_async_new(record->fs->store);
+    async->op = STORE_OP_PUT;
+    async->key = record->key;
+    async->data = record->data;
+    bluesky_string_ref(record->data);
+    bluesky_store_async_submit(async);
+    bluesky_store_async_add_notifier(async,
+                                     (GFunc)cloudlog_flush_complete,
+                                     record);
+    bluesky_store_async_unref(async);
+}
+
 void bluesky_cloudlog_flush(BlueSkyFS *fs)
 {
     BlueSkyCloudLogState *state = fs->log_state;
     if (state->data == NULL || state->data->len == 0)
         return;
 
+    g_mutex_lock(state->uploads_pending_lock);
+    while (state->uploads_pending > cloudlog_concurrent_uploads)
+        g_cond_wait(state->uploads_pending_cond, state->uploads_pending_lock);
+    state->uploads_pending++;
+    g_mutex_unlock(state->uploads_pending_lock);
+
     /* TODO: Append some type of commit record to the log segment? */
 
     g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
     SerializedRecord *record = g_new0(SerializedRecord, 1);
-    bluesky_cloudlog_encrypt(state->data, fs->keys);
-    record->data = bluesky_string_new_from_gstring(state->data);
+    record->fs = fs;
+    record->raw_data = state->data;
+    record->data = NULL;
     record->items = state->writeback_list;
     record->lock = g_mutex_new();
     record->cond = g_cond_new();
     state->writeback_list = NULL;
 
-    BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
-    async->op = STORE_OP_PUT;
-    async->key = g_strdup_printf("log-%08d-%08d",
-                                 state->location.directory,
-                                 state->location.sequence);
-    async->data = record->data;
-    bluesky_string_ref(record->data);
-    bluesky_store_async_submit(async);
-    bluesky_store_async_add_notifier(async,
-                                     (GFunc)cloudlog_flush_complete,
-                                     record);
-    bluesky_store_async_unref(async);
+    record->key = g_strdup_printf("log-%08d-%08d",
+                                  state->location.directory,
+                                  state->location.sequence);
 
     state->pending_segments = g_list_prepend(state->pending_segments, record);
 
+    /* Encryption of data and upload happen in the background, for additional
+     * parallelism when uploading large amounts of data. */
+    g_thread_create((GThreadFunc)cloud_flush_background, record, FALSE, NULL);
+
     state->location.sequence++;
     state->location.offset = 0;
     state->data = g_string_new("");
@@ -548,10 +613,16 @@ void bluesky_cloudlog_encrypt(GString *segment, BlueSkyCryptKeys *keys)
 
 /* Make an decryption pass over a cloud log segment to decrypt items which were
  * encrypted.  Also computes a list of all offsets which at which valid
- * cloud log items are found and adds those offsets to items (if non-NULL). */
+ * cloud log items are found and adds those offsets to items (if non-NULL).
+ *
+ * If allow_unauth is set to true, then allow a limited set of unauthenticated
+ * items that may have been rewritten by a file system cleaner.  These include
+ * the checkpoint and inode map records only; other items must still pass
+ * authentication. */
 void bluesky_cloudlog_decrypt(char *segment, size_t len,
                               BlueSkyCryptKeys *keys,
-                              BlueSkyRangeset *items)
+                              BlueSkyRangeset *items,
+                              gboolean allow_unauth)
 {
     char *data = segment;
     size_t remaining_size = len;
@@ -565,9 +636,10 @@ void bluesky_cloudlog_decrypt(char *segment, size_t len,
                            + GUINT32_FROM_LE(header->size3);
         if (item_size > remaining_size)
             break;
-        if (bluesky_crypt_block_decrypt(data, item_size, keys)) {
+        if (bluesky_crypt_block_decrypt(data, item_size, keys, allow_unauth)) {
             if (items != NULL) {
-                g_print("  data item at %zx\n", offset);
+                if (bluesky_verbose)
+                    g_print("  data item at %zx\n", offset);
                 bluesky_rangeset_insert(items, offset, item_size,
                                         GINT_TO_POINTER(TRUE));
             }
@@ -584,3 +656,11 @@ void bluesky_cloudlog_decrypt(char *segment, size_t len,
         remaining_size -= item_size;
     }
 }
+
+void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
+{
+    fs->unref_queue = g_async_queue_new();
+    g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
+    fetch_pool = g_thread_pool_new(background_fetch_task, NULL, 40, FALSE,
+                                   NULL);
+}