X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcloudlog.c;h=51e35c030fe6c4defbf67fdfb21d3cb6dd49f09a;hb=4d0f19140e01c9119d9e73ba24f8ef04e0434a5a;hp=134f3d798600f839e8f52e01ba8b958c385800c9;hpb=ddaec40a37a5e65e53546b14632b1b0b35613264;p=bluesky.git diff --git a/bluesky/cloudlog.c b/bluesky/cloudlog.c index 134f3d7..51e35c0 100644 --- a/bluesky/cloudlog.c +++ b/bluesky/cloudlog.c @@ -17,6 +17,9 @@ // no absolute guarantees on the size of a log segment. #define CLOUDLOG_SEGMENT_SIZE (4 << 20) +// Maximum number of segments to attempt to upload concurrently +int cloudlog_concurrent_uploads = 32; + BlueSkyCloudID bluesky_cloudlog_new_id() { BlueSkyCloudID id; @@ -78,7 +81,7 @@ guint bluesky_cloudlog_hash(gconstpointer a) * before writing a batch to the cloud, handling indirection through items like * the inode map, etc. */ -BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs) +BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs, const BlueSkyCloudID *id) { BlueSkyCloudLog *log = g_new0(BlueSkyCloudLog, 1); @@ -86,13 +89,35 @@ BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs) log->cond = g_cond_new(); log->fs = fs; log->type = LOGTYPE_UNKNOWN; - log->id = bluesky_cloudlog_new_id(); + if (id != NULL) + memcpy(&log->id, id, sizeof(BlueSkyCloudID)); + else + log->id = bluesky_cloudlog_new_id(); log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *)); g_atomic_int_set(&log->refcount, 1); return log; } +/* Helper function for updating memory usage statistics for a filesystem (the + * cache_log_* variables). This will increment (type=1) or decrement (type=-1) + * the counter associated with the current state of the cloud log item. The + * item should be locked or otherwise protected from concurrent access. */ +void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type) +{ + BlueSkyFS *fs = log->fs; + + if (log->location_flags & CLOUDLOG_CLOUD) { + g_atomic_int_add(&fs->cache_log_cloud, type); + } else if (log->location_flags & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->pending_write & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->data != NULL) { + g_atomic_int_add(&fs->cache_log_dirty, type); + } +} + /* The reference held by the hash table does not count towards the reference * count. When a new object is created, it initially has a reference count of * 1 for the creator, and similarly fetching an item from the hash table will @@ -125,9 +150,13 @@ void bluesky_cloudlog_unref(BlueSkyCloudLog *log) return; } - g_hash_table_remove(fs->locations, &log->id); + if (!g_hash_table_remove(fs->locations, &log->id)) { + if (bluesky_verbose) + g_warning("Could not find and remove cloud log item from hash table!"); + } g_mutex_unlock(fs->lock); + bluesky_cloudlog_stats_update(log, -1); log->type = LOGTYPE_INVALID; g_mutex_free(log->lock); g_cond_free(log->cond); @@ -142,6 +171,55 @@ void bluesky_cloudlog_unref(BlueSkyCloudLog *log) } } +/* For locking reasons cloudlog unrefs may sometimes need to be performed in + * the future. We launch a thread for handling these delayed unreference + * requests. */ +static gpointer cloudlog_unref_thread(gpointer q) +{ + GAsyncQueue *queue = (GAsyncQueue *)q; + + while (TRUE) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)g_async_queue_pop(queue); + bluesky_cloudlog_unref(item); + } + + return NULL; +} + +void bluesky_cloudlog_unref_delayed(BlueSkyCloudLog *log) +{ + if (log != NULL) + g_async_queue_push(log->fs->unref_queue, log); +} + +/* Erase the information contained within the in-memory cloud log + * representation. This does not free up the item itself, but frees the data + * and references to other log items and resets the type back to unknown. If + * the object was written out to persistent storage, all state about it can be + * recovered by loading the object back in. The object must be locked before + * calling this function. */ +void bluesky_cloudlog_erase(BlueSkyCloudLog *log) +{ + g_assert(log->data_lock_count == 0); + + if (log->type == LOGTYPE_UNKNOWN) + return; + + log->type = LOGTYPE_UNKNOWN; + log->data_size = 0; + bluesky_string_unref(log->data); + log->data = NULL; + log->data_lock_count = 0; + + for (int i = 0; i < log->links->len; i++) { + BlueSkyCloudLog *c = g_array_index(log->links, + BlueSkyCloudLog *, i); + bluesky_cloudlog_unref(c); + } + g_array_unref(log->links); + log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *)); +} + /* Start a write of the object to the local log. */ void bluesky_cloudlog_sync(BlueSkyCloudLog *log) { @@ -150,29 +228,136 @@ void bluesky_cloudlog_sync(BlueSkyCloudLog *log) /* Add the given entry to the global hash table containing cloud log entries. * Takes ownership of the caller's reference. */ +void bluesky_cloudlog_insert_locked(BlueSkyCloudLog *log) +{ + g_hash_table_insert(log->fs->locations, &log->id, log); +} + void bluesky_cloudlog_insert(BlueSkyCloudLog *log) { g_mutex_lock(log->fs->lock); - g_hash_table_insert(log->fs->locations, &log->id, log); + bluesky_cloudlog_insert_locked(log); g_mutex_unlock(log->fs->lock); } -struct log_header { - char magic[4]; - uint32_t size; - BlueSkyCloudID id; - uint32_t pointer_count; -} __attribute__((packed)); +/* Look up the cloud log entry for the given ID. If create is TRUE and the + * item does not exist, create a special pending entry that can later be filled + * in when the real item is loaded. The returned item has a reference held. + * As a special case, if a null ID is provided then NULL is returned. */ +BlueSkyCloudLog *bluesky_cloudlog_get(BlueSkyFS *fs, BlueSkyCloudID id) +{ + static BlueSkyCloudID id0 = {{0}}; -struct logref { - BlueSkyCloudID id; - BlueSkyCloudPointer location; -} __attribute__((packed)); + if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) == 0) + return NULL; + + g_mutex_lock(fs->lock); + BlueSkyCloudLog *item; + item = g_hash_table_lookup(fs->locations, &id); + if (item == NULL) { + item = bluesky_cloudlog_new(fs, &id); + bluesky_cloudlog_stats_update(item, 1); + bluesky_cloudlog_insert_locked(item); + } else { + bluesky_cloudlog_ref(item); + } + g_mutex_unlock(fs->lock); + return item; +} + +/* Work to fetch a cloudlog item in a background thread. The item will be + * locked while the fetch is in progress and unlocked when it completes. */ +static GThreadPool *fetch_pool; -struct log_footer { - char refmagic[4]; - struct logref refs[0]; -}; +static void background_fetch_task(gpointer p, gpointer unused) +{ + BlueSkyCloudLog *item = (BlueSkyCloudLog *)p; + + g_mutex_lock(item->lock); + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); +} + +void bluesky_cloudlog_background_fetch(BlueSkyCloudLog *item) +{ + bluesky_cloudlog_ref(item); + g_thread_pool_push(fetch_pool, item, NULL); +} + +/* Attempt to prefetch a cloud log item. This does not guarantee that it will + * be made available, but does make it more likely that a future call to + * bluesky_cloudlog_fetch will complete quickly. Item must be locked? */ +void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item) +{ + if (item->data != NULL) + return; + + /* When operating in a non log-structured mode, simply start a background + * fetch immediately when asked to prefetch. */ + if (bluesky_options.disable_aggregation + || bluesky_options.disable_read_aggregation) { + bluesky_cloudlog_background_fetch(item); + return; + } + + /* TODO: Some of the code here is duplicated with bluesky_log_map_object. + * Refactor to fix that. */ + BlueSkyFS *fs = item->fs; + BlueSkyCacheFile *map = NULL; + + /* First, check to see if the journal still contains a copy of the item and + * if so update the atime on the journal so it is likely to be kept around + * until we need it. */ + if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) { + map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE); + if (map != NULL) { + map->atime = bluesky_get_current_time(); + bluesky_cachefile_unref(map); + g_mutex_unlock(map->lock); + return; + } + } + + item->location_flags &= ~CLOUDLOG_JOURNAL; + if (!(item->location_flags & CLOUDLOG_CLOUD)) + return; + + map = bluesky_cachefile_lookup(fs, + item->location.directory, + item->location.sequence, + FALSE); + if (map == NULL) + return; + + /* At this point, we have information about the log segment containing the + * item we need. If our item is already fetched, we have nothing to do + * except update the atime. If not, queue up a fetch of our object. */ + const BlueSkyRangesetItem *rangeitem; + rangeitem = bluesky_rangeset_lookup(map->items, + item->location.offset); + if (rangeitem == NULL) { + if (map->prefetches == NULL) + map->prefetches = bluesky_rangeset_new(); + + gchar *id = bluesky_cloudlog_id_to_string(item->id); + if (bluesky_verbose) + g_print("Need to prefetch %s\n", id); + g_free(id); + + bluesky_rangeset_insert(map->prefetches, + item->location.offset, + item->location.size, NULL); + + uint64_t start, length; + bluesky_rangeset_get_extents(map->prefetches, &start, &length); + if (bluesky_verbose) + g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n", + start, length); + } + + bluesky_cachefile_unref(map); + g_mutex_unlock(map->lock); +} /* Ensure that a cloud log item is loaded in memory, and if not read it in. * TODO: Make asynchronous, and make this also fetch from the cloud. Right now @@ -182,11 +367,33 @@ void bluesky_cloudlog_fetch(BlueSkyCloudLog *log) if (log->data != NULL) return; - g_assert(log->location_flags & CLOUDLOG_JOURNAL); + BlueSkyProfile *profile = bluesky_profile_get(); + if (profile != NULL) + bluesky_profile_add_event(profile, g_strdup_printf("Fetch log entry")); + + /* There are actually two cases: a full deserialization if we have not ever + * read the object before, and a partial deserialization where the metadata + * is already in memory and we just need to remap the data. If the object + * type has not yet been set, we'll need to read and parse the metadata. + * Once that is done, we can fall through the case of remapping the data + * itself. */ + if (log->type == LOGTYPE_UNKNOWN) { + BlueSkyRCStr *raw = bluesky_log_map_object(log, FALSE); + g_assert(raw != NULL); + bluesky_deserialize_cloudlog(log, raw->data, raw->len); + bluesky_string_unref(raw); + } + + /* At this point all metadata should be available and we need only remap + * the object data. */ + log->data = bluesky_log_map_object(log, TRUE); - log->data = bluesky_log_map_object(log->fs->log, log->log_seq, - log->log_offset, log->log_size); + if (log->data == NULL) { + g_error("Unable to fetch cloudlog entry!"); + } + if (profile != NULL) + bluesky_profile_add_event(profile, g_strdup_printf("Fetch complete")); g_cond_broadcast(log->cond); } @@ -195,7 +402,7 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, { BlueSkyCloudLogState *state = fs->log_state; - if (log->location_flags & CLOUDLOG_CLOUD) { + if ((log->location_flags | log->pending_write) & CLOUDLOG_CLOUD) { return log->location; } @@ -206,92 +413,254 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, bluesky_cloudlog_serialize(ref, fs); } + /* FIXME: Ought lock to be taken earlier? */ g_mutex_lock(log->lock); bluesky_cloudlog_fetch(log); g_assert(log->data != NULL); + bluesky_cloudlog_stats_update(log, -1); + + GString *data1 = g_string_new(""); + GString *data2 = g_string_new(""); + GString *data3 = g_string_new(""); + bluesky_serialize_cloudlog(log, data1, data2, data3); + log->location = state->location; log->location.offset = state->data->len; - log->location.size - = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0 - + log->data->len; - - struct log_header header; - memcpy(header.magic, "AgI ", 4); - header.size = GUINT32_TO_LE(log->location.size); + log->data_size = data1->len; + + struct cloudlog_header header; + memcpy(header.magic, CLOUDLOG_MAGIC, 4); + memset(header.crypt_auth, sizeof(header.crypt_auth), 0); + memset(header.crypt_iv, sizeof(header.crypt_iv), 0); + header.type = log->type + '0'; + header.size1 = GUINT32_TO_LE(data1->len); + header.size2 = GUINT32_TO_LE(data2->len); + header.size3 = GUINT32_TO_LE(data3->len); header.id = log->id; - header.pointer_count = GUINT32_TO_LE(0); + header.inum = GUINT64_TO_LE(log->inum); g_string_append_len(state->data, (const char *)&header, sizeof(header)); - g_string_append_len(state->data, log->data->data, log->data->len); + g_string_append_len(state->data, data1->str, data1->len); + g_string_append_len(state->data, data2->str, data2->len); + g_string_append_len(state->data, data3->str, data3->len); + + log->location.size = state->data->len - log->location.offset; + + g_string_free(data1, TRUE); + g_string_free(data2, TRUE); + g_string_free(data3, TRUE); + + /* If the object we flushed was an inode, update the inode map. */ + if (log->type == LOGTYPE_INODE) { + g_mutex_lock(fs->lock); + InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map, + log->inum, 1); + bluesky_cloudlog_unref_delayed(entry->item); + entry->item = log; + bluesky_cloudlog_ref(entry->item); + g_mutex_unlock(fs->lock); + } - log->location_flags |= CLOUDLOG_CLOUD; + /* TODO: We should mark the objects as committed on the cloud until the + * data is flushed and acknowledged. */ + log->pending_write |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(log, 1); + state->writeback_list = g_slist_prepend(state->writeback_list, log); + bluesky_cloudlog_ref(log); g_mutex_unlock(log->lock); - if (state->data->len > CLOUDLOG_SEGMENT_SIZE) + if (state->data->len > CLOUDLOG_SEGMENT_SIZE + || bluesky_options.disable_aggregation) + { bluesky_cloudlog_flush(fs); + } return log->location; } -static void find_inodes(gpointer key, gpointer value, gpointer user_data) +static void cloudlog_flush_complete(BlueSkyStoreAsync *async, + SerializedRecord *record) { - BlueSkyCloudLogState *state = (BlueSkyCloudLogState *)user_data; - BlueSkyCloudLog *item = (BlueSkyCloudLog *)value; - - if (item->type != LOGTYPE_INODE) - return; + g_print("Write of %s to cloud complete, status = %d\n", + async->key, async->result); + + g_mutex_lock(record->lock); + if (async->result >= 0) { + while (record->items != NULL) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data; + g_mutex_lock(item->lock); + bluesky_cloudlog_stats_update(item, -1); + item->pending_write &= ~CLOUDLOG_CLOUD; + item->location_flags |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(item, 1); + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); + + record->items = g_slist_delete_link(record->items, record->items); + } - bluesky_cloudlog_ref(item); - state->inode_list = g_list_prepend(state->inode_list, item); + bluesky_string_unref(record->data); + record->data = NULL; + g_slist_free(record->items); + record->items = NULL; + record->complete = TRUE; + + BlueSkyCloudLogState *state = record->fs->log_state; + g_mutex_lock(state->uploads_pending_lock); + state->uploads_pending--; + g_cond_broadcast(state->uploads_pending_cond); + g_mutex_unlock(state->uploads_pending_lock); + + g_cond_broadcast(record->cond); + } else { + g_print("Write should be resubmitted...\n"); + + BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store); + async2->op = STORE_OP_PUT; + async2->key = g_strdup(async->key); + async2->data = record->data; + async2->profile = async->profile; + bluesky_string_ref(record->data); + bluesky_store_async_submit(async2); + bluesky_store_async_add_notifier(async2, + (GFunc)cloudlog_flush_complete, + record); + bluesky_store_async_unref(async2); + } + g_mutex_unlock(record->lock); } /* Finish up a partially-written cloud log segment and flush it to storage. */ +static void cloud_flush_background(SerializedRecord *record) +{ + bluesky_cloudlog_encrypt(record->raw_data, record->fs->keys); + record->data = bluesky_string_new_from_gstring(record->raw_data); + record->raw_data = NULL; + + BlueSkyStoreAsync *async = bluesky_store_async_new(record->fs->store); + async->op = STORE_OP_PUT; + async->key = record->key; + async->data = record->data; + bluesky_string_ref(record->data); + bluesky_store_async_submit(async); + bluesky_store_async_add_notifier(async, + (GFunc)cloudlog_flush_complete, + record); + bluesky_store_async_unref(async); +} + void bluesky_cloudlog_flush(BlueSkyFS *fs) { BlueSkyCloudLogState *state = fs->log_state; if (state->data == NULL || state->data->len == 0) return; + g_mutex_lock(state->uploads_pending_lock); + while (state->uploads_pending > cloudlog_concurrent_uploads) + g_cond_wait(state->uploads_pending_cond, state->uploads_pending_lock); + state->uploads_pending++; + g_mutex_unlock(state->uploads_pending_lock); + /* TODO: Append some type of commit record to the log segment? */ g_print("Serializing %zd bytes of data to cloud\n", state->data->len); + SerializedRecord *record = g_new0(SerializedRecord, 1); + record->fs = fs; + record->raw_data = state->data; + record->data = NULL; + record->items = state->writeback_list; + record->lock = g_mutex_new(); + record->cond = g_cond_new(); + state->writeback_list = NULL; - BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store); - async->op = STORE_OP_PUT; - async->key = g_strdup_printf("log-%08d-%08d", - state->location.directory, - state->location.sequence); - async->data = bluesky_string_new_from_gstring(state->data); - bluesky_store_async_submit(async); - bluesky_store_async_wait(async); - bluesky_store_async_unref(async); + record->key = g_strdup_printf("log-%08d-%08d", + state->location.directory, + state->location.sequence); + + state->pending_segments = g_list_prepend(state->pending_segments, record); + + /* Encryption of data and upload happen in the background, for additional + * parallelism when uploading large amounts of data. */ + g_thread_create((GThreadFunc)cloud_flush_background, record, FALSE, NULL); state->location.sequence++; state->location.offset = 0; state->data = g_string_new(""); } -void bluesky_cloudlog_write_log(BlueSkyFS *fs) +/* Make an encryption pass over a cloud log segment to encrypt private data in + * it. */ +void bluesky_cloudlog_encrypt(GString *segment, BlueSkyCryptKeys *keys) { - BlueSkyCloudLogState *state = fs->log_state; - if (state->data == NULL) - state->data = g_string_new(""); + char *data = segment->str; + size_t remaining_size = segment->len; + + while (remaining_size >= sizeof(struct cloudlog_header)) { + struct cloudlog_header *header = (struct cloudlog_header *)data; + size_t item_size = sizeof(struct cloudlog_header) + + GUINT32_FROM_LE(header->size1) + + GUINT32_FROM_LE(header->size2) + + GUINT32_FROM_LE(header->size3); + if (item_size > remaining_size) + break; + bluesky_crypt_block_encrypt(data, item_size, keys); - g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->locations, find_inodes, state); - g_mutex_unlock(fs->lock); + data += item_size; + remaining_size -= item_size; + } +} - while (state->inode_list != NULL) { - BlueSkyCloudLog *log = (BlueSkyCloudLog *)state->inode_list->data; - bluesky_cloudlog_serialize(log, fs); - bluesky_cloudlog_unref(log); - state->inode_list = g_list_delete_link(state->inode_list, - state->inode_list); +/* Make an decryption pass over a cloud log segment to decrypt items which were + * encrypted. Also computes a list of all offsets which at which valid + * cloud log items are found and adds those offsets to items (if non-NULL). + * + * If allow_unauth is set to true, then allow a limited set of unauthenticated + * items that may have been rewritten by a file system cleaner. These include + * the checkpoint and inode map records only; other items must still pass + * authentication. */ +void bluesky_cloudlog_decrypt(char *segment, size_t len, + BlueSkyCryptKeys *keys, + BlueSkyRangeset *items, + gboolean allow_unauth) +{ + char *data = segment; + size_t remaining_size = len; + size_t offset = 0; + + while (remaining_size >= sizeof(struct cloudlog_header)) { + struct cloudlog_header *header = (struct cloudlog_header *)data; + size_t item_size = sizeof(struct cloudlog_header) + + GUINT32_FROM_LE(header->size1) + + GUINT32_FROM_LE(header->size2) + + GUINT32_FROM_LE(header->size3); + if (item_size > remaining_size) + break; + if (bluesky_crypt_block_decrypt(data, item_size, keys, allow_unauth)) { + if (items != NULL) { + if (bluesky_verbose) + g_print(" data item at %zx\n", offset); + bluesky_rangeset_insert(items, offset, item_size, + GINT_TO_POINTER(TRUE)); + } + } else { + g_warning("Unauthenticated data at offset %zd", offset); + if (items != NULL) { + bluesky_rangeset_insert(items, offset, item_size, + GINT_TO_POINTER(TRUE)); + } + } - if (state->data->len > CLOUDLOG_SEGMENT_SIZE) - bluesky_cloudlog_flush(fs); + data += item_size; + offset += item_size; + remaining_size -= item_size; } +} - bluesky_cloudlog_flush(fs); +void bluesky_cloudlog_threads_init(BlueSkyFS *fs) +{ + fs->unref_queue = g_async_queue_new(); + g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL); + fetch_pool = g_thread_pool_new(background_fetch_task, NULL, 40, FALSE, + NULL); }