// no absolute guarantees on the size of a log segment.
#define CLOUDLOG_SEGMENT_SIZE (4 << 20)
+// Maximum number of segments to attempt to upload concurrently
+int cloudlog_concurrent_uploads = 32;
+
BlueSkyCloudID bluesky_cloudlog_new_id()
{
BlueSkyCloudID id;
g_async_queue_push(log->fs->unref_queue, log);
}
-void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
-{
- fs->unref_queue = g_async_queue_new();
- g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
-}
-
/* Erase the information contained within the in-memory cloud log
* representation. This does not free up the item itself, but frees the data
* and references to other log items and resets the type back to unknown. If
return item;
}
+/* Work to fetch a cloudlog item in a background thread. The item will be
+ * locked while the fetch is in progress and unlocked when it completes. */
+static GThreadPool *fetch_pool;
+
+static void background_fetch_task(gpointer p, gpointer unused)
+{
+ BlueSkyCloudLog *item = (BlueSkyCloudLog *)p;
+
+ g_mutex_lock(item->lock);
+ g_mutex_unlock(item->lock);
+ bluesky_cloudlog_unref(item);
+}
+
+void bluesky_cloudlog_background_fetch(BlueSkyCloudLog *item)
+{
+ bluesky_cloudlog_ref(item);
+ g_thread_pool_push(fetch_pool, item, NULL);
+}
+
/* Attempt to prefetch a cloud log item. This does not guarantee that it will
* be made available, but does make it more likely that a future call to
* bluesky_cloudlog_fetch will complete quickly. Item must be locked? */
if (item->data != NULL)
return;
+ /* When operating in a non log-structured mode, simply start a background
+ * fetch immediately when asked to prefetch. */
+ if (bluesky_options.disable_aggregation
+ || bluesky_options.disable_read_aggregation) {
+ bluesky_cloudlog_background_fetch(item);
+ return;
+ }
+
/* TODO: Some of the code here is duplicated with bluesky_log_map_object.
* Refactor to fix that. */
BlueSkyFS *fs = item->fs;
map->prefetches = bluesky_rangeset_new();
gchar *id = bluesky_cloudlog_id_to_string(item->id);
- g_print("Need to prefetch %s\n", id);
+ if (bluesky_verbose)
+ g_print("Need to prefetch %s\n", id);
g_free(id);
bluesky_rangeset_insert(map->prefetches,
uint64_t start, length;
bluesky_rangeset_get_extents(map->prefetches, &start, &length);
- g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n", start, length);
+ if (bluesky_verbose)
+ g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n",
+ start, length);
}
bluesky_cachefile_unref(map);
if (log->data != NULL)
return;
+ BlueSkyProfile *profile = bluesky_profile_get();
+ if (profile != NULL)
+ bluesky_profile_add_event(profile, g_strdup_printf("Fetch log entry"));
+
/* There are actually two cases: a full deserialization if we have not ever
* read the object before, and a partial deserialization where the metadata
* is already in memory and we just need to remap the data. If the object
g_error("Unable to fetch cloudlog entry!");
}
+ if (profile != NULL)
+ bluesky_profile_add_event(profile, g_strdup_printf("Fetch complete"));
g_cond_broadcast(log->cond);
}
bluesky_cloudlog_serialize(ref, fs);
}
+ /* FIXME: Ought lock to be taken earlier? */
g_mutex_lock(log->lock);
bluesky_cloudlog_fetch(log);
g_assert(log->data != NULL);
bluesky_cloudlog_ref(log);
g_mutex_unlock(log->lock);
- if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
+ if (state->data->len > CLOUDLOG_SEGMENT_SIZE
+ || bluesky_options.disable_aggregation)
+ {
bluesky_cloudlog_flush(fs);
+ }
return log->location;
}
g_slist_free(record->items);
record->items = NULL;
record->complete = TRUE;
+
+ BlueSkyCloudLogState *state = record->fs->log_state;
+ g_mutex_lock(state->uploads_pending_lock);
+ state->uploads_pending--;
+ g_cond_broadcast(state->uploads_pending_cond);
+ g_mutex_unlock(state->uploads_pending_lock);
+
g_cond_broadcast(record->cond);
} else {
g_print("Write should be resubmitted...\n");
async2->op = STORE_OP_PUT;
async2->key = g_strdup(async->key);
async2->data = record->data;
+ async2->profile = async->profile;
bluesky_string_ref(record->data);
bluesky_store_async_submit(async2);
bluesky_store_async_add_notifier(async2,
}
/* Finish up a partially-written cloud log segment and flush it to storage. */
+static void cloud_flush_background(SerializedRecord *record)
+{
+ bluesky_cloudlog_encrypt(record->raw_data, record->fs->keys);
+ record->data = bluesky_string_new_from_gstring(record->raw_data);
+ record->raw_data = NULL;
+
+ BlueSkyStoreAsync *async = bluesky_store_async_new(record->fs->store);
+ async->op = STORE_OP_PUT;
+ async->key = record->key;
+ async->data = record->data;
+ bluesky_string_ref(record->data);
+ bluesky_store_async_submit(async);
+ bluesky_store_async_add_notifier(async,
+ (GFunc)cloudlog_flush_complete,
+ record);
+ bluesky_store_async_unref(async);
+}
+
void bluesky_cloudlog_flush(BlueSkyFS *fs)
{
BlueSkyCloudLogState *state = fs->log_state;
if (state->data == NULL || state->data->len == 0)
return;
+ g_mutex_lock(state->uploads_pending_lock);
+ while (state->uploads_pending > cloudlog_concurrent_uploads)
+ g_cond_wait(state->uploads_pending_cond, state->uploads_pending_lock);
+ state->uploads_pending++;
+ g_mutex_unlock(state->uploads_pending_lock);
+
/* TODO: Append some type of commit record to the log segment? */
g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
SerializedRecord *record = g_new0(SerializedRecord, 1);
- bluesky_cloudlog_encrypt(state->data, fs->keys);
- record->data = bluesky_string_new_from_gstring(state->data);
+ record->fs = fs;
+ record->raw_data = state->data;
+ record->data = NULL;
record->items = state->writeback_list;
record->lock = g_mutex_new();
record->cond = g_cond_new();
state->writeback_list = NULL;
- BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
- async->op = STORE_OP_PUT;
- async->key = g_strdup_printf("log-%08d-%08d",
- state->location.directory,
- state->location.sequence);
- async->data = record->data;
- bluesky_string_ref(record->data);
- bluesky_store_async_submit(async);
- bluesky_store_async_add_notifier(async,
- (GFunc)cloudlog_flush_complete,
- record);
- bluesky_store_async_unref(async);
+ record->key = g_strdup_printf("log-%08d-%08d",
+ state->location.directory,
+ state->location.sequence);
state->pending_segments = g_list_prepend(state->pending_segments, record);
+ /* Encryption of data and upload happen in the background, for additional
+ * parallelism when uploading large amounts of data. */
+ g_thread_create((GThreadFunc)cloud_flush_background, record, FALSE, NULL);
+
state->location.sequence++;
state->location.offset = 0;
state->data = g_string_new("");
/* Make an decryption pass over a cloud log segment to decrypt items which were
* encrypted. Also computes a list of all offsets which at which valid
- * cloud log items are found and adds those offsets to items (if non-NULL). */
+ * cloud log items are found and adds those offsets to items (if non-NULL).
+ *
+ * If allow_unauth is set to true, then allow a limited set of unauthenticated
+ * items that may have been rewritten by a file system cleaner. These include
+ * the checkpoint and inode map records only; other items must still pass
+ * authentication. */
void bluesky_cloudlog_decrypt(char *segment, size_t len,
BlueSkyCryptKeys *keys,
- BlueSkyRangeset *items)
+ BlueSkyRangeset *items,
+ gboolean allow_unauth)
{
char *data = segment;
size_t remaining_size = len;
+ GUINT32_FROM_LE(header->size3);
if (item_size > remaining_size)
break;
- if (bluesky_crypt_block_decrypt(data, item_size, keys)) {
+ if (bluesky_crypt_block_decrypt(data, item_size, keys, allow_unauth)) {
if (items != NULL) {
- g_print(" data item at %zx\n", offset);
+ if (bluesky_verbose)
+ g_print(" data item at %zx\n", offset);
bluesky_rangeset_insert(items, offset, item_size,
GINT_TO_POINTER(TRUE));
}
remaining_size -= item_size;
}
}
+
+void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
+{
+ fs->unref_queue = g_async_queue_new();
+ g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
+ fetch_pool = g_thread_pool_new(background_fetch_task, NULL, 40, FALSE,
+ NULL);
+}