X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcloudlog.c;h=40067fd3661f91d4ec9e45a9019a73d9043c8bd6;hb=a82b60b3b683840a7074110831bcbaa16a40f0eb;hp=d7cea634f50f6762675b94b647ac60f75bd59f1d;hpb=a8a9f2181e2e16d24d812ea1e7a7c8af42f0d2f1;p=bluesky.git diff --git a/bluesky/cloudlog.c b/bluesky/cloudlog.c index d7cea63..40067fd 100644 --- a/bluesky/cloudlog.c +++ b/bluesky/cloudlog.c @@ -13,13 +13,9 @@ #include "bluesky-private.h" -/* The locations hash table in the file system is used to map objects to their locations. Objects are named using 128- */ - -typedef struct { - BlueSkyCloudID id; - - BlueSkyCloudPointer *cloud_loc; -} BlueSkyLocationEntry; +// Rough size limit for a log segment. This is not a firm limit and there are +// no absolute guarantees on the size of a log segment. +#define CLOUDLOG_SEGMENT_SIZE (4 << 20) BlueSkyCloudID bluesky_cloudlog_new_id() { @@ -91,12 +87,41 @@ BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs) log->fs = fs; log->type = LOGTYPE_UNKNOWN; log->id = bluesky_cloudlog_new_id(); - log->pointers = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudID)); + log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *)); g_atomic_int_set(&log->refcount, 1); return log; } +/* Helper function for updating memory usage statistics for a filesystem (the + * cache_log_* variables). This will increment (type=1) or decrement (type=-1) + * the counter associated with the current state of the cloud log item. The + * item should be locked or otherwise protected from concurrent access. */ +void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type) +{ + BlueSkyFS *fs = log->fs; + + if (log->location_flags & CLOUDLOG_CLOUD) { + g_atomic_int_add(&fs->cache_log_cloud, type); + } else if (log->location_flags & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->pending_write & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->data != NULL) { + g_atomic_int_add(&fs->cache_log_dirty, type); + } +} + +/* The reference held by the hash table does not count towards the reference + * count. When a new object is created, it initially has a reference count of + * 1 for the creator, and similarly fetching an item from the hash table will + * also create a reference. If the reference count drops to zero, + * bluesky_cloudlog_unref attempts to remove the object from the hash + * table--but there is a potential race since another thread might read the + * object from the hash table at the same time. So an object with a reference + * count of zero may still be resurrected, in which case we need to abort the + * destruction. Once the object is gone from the hash table, and if the + * reference count is still zero, it can actually be deleted. */ void bluesky_cloudlog_ref(BlueSkyCloudLog *log) { if (log == NULL) @@ -111,7 +136,29 @@ void bluesky_cloudlog_unref(BlueSkyCloudLog *log) return; if (g_atomic_int_dec_and_test(&log->refcount)) { - g_print("Cloud log refcount dropped to zero.\n"); + BlueSkyFS *fs = log->fs; + + g_mutex_lock(fs->lock); + if (g_atomic_int_get(&log->refcount) > 0) { + g_mutex_unlock(fs->lock); + return; + } + + g_hash_table_remove(fs->locations, &log->id); + g_mutex_unlock(fs->lock); + + bluesky_cloudlog_stats_update(log, -1); + log->type = LOGTYPE_INVALID; + g_mutex_free(log->lock); + g_cond_free(log->cond); + for (int i = 0; i < log->links->len; i++) { + BlueSkyCloudLog *c = g_array_index(log->links, + BlueSkyCloudLog *, i); + bluesky_cloudlog_unref(c); + } + g_array_unref(log->links); + bluesky_string_unref(log->data); + g_free(log); } } @@ -147,36 +194,67 @@ struct log_footer { struct logref refs[0]; }; +/* Ensure that a cloud log item is loaded in memory, and if not read it in. + * TODO: Make asynchronous, and make this also fetch from the cloud. Right now + * we only read from the log. Log item must be locked. */ +void bluesky_cloudlog_fetch(BlueSkyCloudLog *log) +{ + if (log->data != NULL) + return; + + if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) { + bluesky_cloudlog_stats_update(log, -1); + log->data = bluesky_log_map_object(log->fs, -1, log->log_seq, + log->log_offset, log->log_size); + bluesky_cloudlog_stats_update(log, 1); + } + + if (log->data == NULL && (log->location_flags & CLOUDLOG_CLOUD)) { + log->location_flags &= ~CLOUDLOG_JOURNAL; + bluesky_cloudlog_stats_update(log, -1); + log->data = bluesky_log_map_object(log->fs, log->location.directory, + log->location.sequence, + log->location.offset, + log->location.size); + bluesky_cloudlog_stats_update(log, 1); + } + + if (log->data == NULL) { + g_error("Unable to fetch cloudlog entry!"); + } + + g_cond_broadcast(log->cond); +} + BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, - BlueSkyCloudLogState *state) + BlueSkyFS *fs) { - if (log->location_flags & CLOUDLOG_CLOUD) { + BlueSkyCloudLogState *state = fs->log_state; + + if ((log->location_flags | log->pending_write) & CLOUDLOG_CLOUD) { return log->location; } - g_print("Flushing object %s to cloud...\n", - bluesky_cloudlog_id_to_string(log->id)); - - for (int i = 0; i < log->pointers->len; i++) { - BlueSkyCloudID id = g_array_index(log->pointers, BlueSkyCloudID, i); - g_print(" ...checking reference %s...\n", - bluesky_cloudlog_id_to_string(id)); - g_mutex_lock(log->fs->lock); - BlueSkyCloudLog *log2 - = (BlueSkyCloudLog *)g_hash_table_lookup(log->fs->locations, &id); - // TODO: refcount - g_mutex_unlock(log->fs->lock); - g_assert(log2 != NULL); - bluesky_cloudlog_serialize(log2, state); + for (int i = 0; i < log->links->len; i++) { + BlueSkyCloudLog *ref = g_array_index(log->links, + BlueSkyCloudLog *, i); + if (ref != NULL) + bluesky_cloudlog_serialize(ref, fs); } + g_mutex_lock(log->lock); + bluesky_cloudlog_fetch(log); g_assert(log->data != NULL); + bluesky_cloudlog_stats_update(log, -1); + + /* TODO: Right now offset/size are set to the raw data, but we should add + * header parsing to the code which loads objects back in. */ log->location = state->location; - log->location.offset = state->data->len; - log->location.size - = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0 - + log->data->len; + log->location.offset = state->data->len + sizeof(struct log_header); + log->location.size = log->data->len; + /* = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0 + + log->data->len; */ struct log_header header; memcpy(header.magic, "AgI ", 4); @@ -187,75 +265,107 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, g_string_append_len(state->data, (const char *)&header, sizeof(header)); g_string_append_len(state->data, log->data->data, log->data->len); - log->location_flags |= CLOUDLOG_CLOUD; - - return log->location; -} + /* If the object we flushed was an inode, update the inode map. */ + if (log->type == LOGTYPE_INODE) { + g_mutex_lock(fs->lock); + InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map, + log->inum, 1); + entry->id = log->id; + entry->location = log->location; + g_mutex_unlock(fs->lock); + } -static void find_inodes(gpointer key, gpointer value, gpointer user_data) -{ - BlueSkyCloudLogState *state = (BlueSkyCloudLogState *)user_data; - BlueSkyCloudLog *item = (BlueSkyCloudLog *)value; + /* TODO: We should mark the objects as committed on the cloud until the + * data is flushed and acknowledged. */ + log->pending_write |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(log, 1); + state->writeback_list = g_slist_prepend(state->writeback_list, log); + bluesky_cloudlog_ref(log); + g_mutex_unlock(log->lock); - if (item->type != LOGTYPE_INODE) - return; + if (state->data->len > CLOUDLOG_SEGMENT_SIZE) + bluesky_cloudlog_flush(fs); - bluesky_cloudlog_ref(item); - state->inode_list = g_list_prepend(state->inode_list, item); + return log->location; } -void bluesky_cloudlog_write_log(BlueSkyFS *fs) +static void cloudlog_flush_complete(BlueSkyStoreAsync *async, + SerializedRecord *record) { - g_print("Starting cloudlog write...\n"); - - BlueSkyCloudLogState *state = fs->log_state; - if (state->data == NULL) - state->data = g_string_new(""); - - g_mutex_lock(fs->lock); - g_hash_table_foreach(fs->locations, find_inodes, state); - g_mutex_unlock(fs->lock); - - while (state->inode_list != NULL) { - BlueSkyCloudLog *log = (BlueSkyCloudLog *)state->inode_list->data; - bluesky_cloudlog_serialize(log, state); - state->inode_list = g_list_delete_link(state->inode_list, - state->inode_list); - } + g_print("Write of %s to cloud complete, status = %d\n", + async->key, async->result); + + g_mutex_lock(record->lock); + if (async->result >= 0) { + while (record->items != NULL) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data; + g_mutex_lock(item->lock); + bluesky_cloudlog_stats_update(item, -1); + item->pending_write &= ~CLOUDLOG_CLOUD; + item->location_flags |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(item, 1); + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); + + record->items = g_slist_delete_link(record->items, record->items); + } - if (state->data->len > 0) { - g_print("Serialized %zd bytes of data\n", state->data->len); - - BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store); - async->op = STORE_OP_PUT; - async->key = g_strdup_printf("log-%08d-%08d", - state->location.directory, - state->location.sequence); - async->data = bluesky_string_new_from_gstring(state->data); - bluesky_store_async_submit(async); - bluesky_store_async_wait(async); - bluesky_store_async_unref(async); - - state->location.sequence++; - state->location.offset = 0; + bluesky_string_unref(record->data); + record->data = NULL; + g_slist_free(record->items); + record->items = NULL; + record->complete = TRUE; + g_cond_broadcast(record->cond); + } else { + g_print("Write should be resubmitted...\n"); + + BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store); + async2->op = STORE_OP_PUT; + async2->key = g_strdup(async->key); + async2->data = record->data; + bluesky_string_ref(record->data); + bluesky_store_async_submit(async2); + bluesky_store_async_add_notifier(async2, + (GFunc)cloudlog_flush_complete, + record); + bluesky_store_async_unref(async2); } - - state->data = NULL; + g_mutex_unlock(record->lock); } -/* Ensure that a cloud log item is loaded in memory, and if not read it in. - * TODO: Make asynchronous, and make this also fetch from the cloud. Right now - * we only read from the log. Log item must be locked. */ -void bluesky_cloudlog_fetch(BlueSkyCloudLog *log) +/* Finish up a partially-written cloud log segment and flush it to storage. */ +void bluesky_cloudlog_flush(BlueSkyFS *fs) { - if (log->data != NULL) + BlueSkyCloudLogState *state = fs->log_state; + if (state->data == NULL || state->data->len == 0) return; - g_print("Re-mapping log entry %d/%d/%d...\n", - log->log_seq, log->log_offset, log->log_size); - - g_assert(log->location_flags & CLOUDLOG_JOURNAL); - - log->data = bluesky_log_map_object(log->fs->log, log->log_seq, - log->log_offset, log->log_size); + /* TODO: Append some type of commit record to the log segment? */ + + g_print("Serializing %zd bytes of data to cloud\n", state->data->len); + SerializedRecord *record = g_new0(SerializedRecord, 1); + record->data = bluesky_string_new_from_gstring(state->data); + record->items = state->writeback_list; + record->lock = g_mutex_new(); + record->cond = g_cond_new(); + state->writeback_list = NULL; + + BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store); + async->op = STORE_OP_PUT; + async->key = g_strdup_printf("log-%08d-%08d", + state->location.directory, + state->location.sequence); + async->data = record->data; + bluesky_string_ref(record->data); + bluesky_store_async_submit(async); + bluesky_store_async_add_notifier(async, + (GFunc)cloudlog_flush_complete, + record); + bluesky_store_async_unref(async); + + state->pending_segments = g_list_prepend(state->pending_segments, record); + + state->location.sequence++; + state->location.offset = 0; + state->data = g_string_new(""); }