X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Fcloudlog.c;h=40067fd3661f91d4ec9e45a9019a73d9043c8bd6;hb=a82b60b3b683840a7074110831bcbaa16a40f0eb;hp=4c671becf7b2eeac7f4e1c9f285c20494052b50d;hpb=3380ff6d419c7d1370c95cf476520ed5398e5b6d;p=bluesky.git diff --git a/bluesky/cloudlog.c b/bluesky/cloudlog.c index 4c671be..40067fd 100644 --- a/bluesky/cloudlog.c +++ b/bluesky/cloudlog.c @@ -93,6 +93,25 @@ BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs) return log; } +/* Helper function for updating memory usage statistics for a filesystem (the + * cache_log_* variables). This will increment (type=1) or decrement (type=-1) + * the counter associated with the current state of the cloud log item. The + * item should be locked or otherwise protected from concurrent access. */ +void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type) +{ + BlueSkyFS *fs = log->fs; + + if (log->location_flags & CLOUDLOG_CLOUD) { + g_atomic_int_add(&fs->cache_log_cloud, type); + } else if (log->location_flags & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->pending_write & CLOUDLOG_JOURNAL) { + g_atomic_int_add(&fs->cache_log_journal, type); + } else if (log->data != NULL) { + g_atomic_int_add(&fs->cache_log_dirty, type); + } +} + /* The reference held by the hash table does not count towards the reference * count. When a new object is created, it initially has a reference count of * 1 for the creator, and similarly fetching an item from the hash table will @@ -128,6 +147,7 @@ void bluesky_cloudlog_unref(BlueSkyCloudLog *log) g_hash_table_remove(fs->locations, &log->id); g_mutex_unlock(fs->lock); + bluesky_cloudlog_stats_update(log, -1); log->type = LOGTYPE_INVALID; g_mutex_free(log->lock); g_cond_free(log->cond); @@ -182,10 +202,26 @@ void bluesky_cloudlog_fetch(BlueSkyCloudLog *log) if (log->data != NULL) return; - g_assert((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL); + if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) { + bluesky_cloudlog_stats_update(log, -1); + log->data = bluesky_log_map_object(log->fs, -1, log->log_seq, + log->log_offset, log->log_size); + bluesky_cloudlog_stats_update(log, 1); + } + + if (log->data == NULL && (log->location_flags & CLOUDLOG_CLOUD)) { + log->location_flags &= ~CLOUDLOG_JOURNAL; + bluesky_cloudlog_stats_update(log, -1); + log->data = bluesky_log_map_object(log->fs, log->location.directory, + log->location.sequence, + log->location.offset, + log->location.size); + bluesky_cloudlog_stats_update(log, 1); + } - log->data = bluesky_log_map_object(log->fs->log, log->log_seq, - log->log_offset, log->log_size); + if (log->data == NULL) { + g_error("Unable to fetch cloudlog entry!"); + } g_cond_broadcast(log->cond); } @@ -195,7 +231,7 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, { BlueSkyCloudLogState *state = fs->log_state; - if (log->location_flags & CLOUDLOG_CLOUD) { + if ((log->location_flags | log->pending_write) & CLOUDLOG_CLOUD) { return log->location; } @@ -210,11 +246,15 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, bluesky_cloudlog_fetch(log); g_assert(log->data != NULL); + bluesky_cloudlog_stats_update(log, -1); + + /* TODO: Right now offset/size are set to the raw data, but we should add + * header parsing to the code which loads objects back in. */ log->location = state->location; - log->location.offset = state->data->len; - log->location.size - = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0 - + log->data->len; + log->location.offset = state->data->len + sizeof(struct log_header); + log->location.size = log->data->len; + /* = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0 + + log->data->len; */ struct log_header header; memcpy(header.magic, "AgI ", 4); @@ -225,7 +265,22 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, g_string_append_len(state->data, (const char *)&header, sizeof(header)); g_string_append_len(state->data, log->data->data, log->data->len); - log->location_flags |= CLOUDLOG_CLOUD; + /* If the object we flushed was an inode, update the inode map. */ + if (log->type == LOGTYPE_INODE) { + g_mutex_lock(fs->lock); + InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map, + log->inum, 1); + entry->id = log->id; + entry->location = log->location; + g_mutex_unlock(fs->lock); + } + + /* TODO: We should mark the objects as committed on the cloud until the + * data is flushed and acknowledged. */ + log->pending_write |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(log, 1); + state->writeback_list = g_slist_prepend(state->writeback_list, log); + bluesky_cloudlog_ref(log); g_mutex_unlock(log->lock); if (state->data->len > CLOUDLOG_SEGMENT_SIZE) @@ -234,6 +289,50 @@ BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log, return log->location; } +static void cloudlog_flush_complete(BlueSkyStoreAsync *async, + SerializedRecord *record) +{ + g_print("Write of %s to cloud complete, status = %d\n", + async->key, async->result); + + g_mutex_lock(record->lock); + if (async->result >= 0) { + while (record->items != NULL) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data; + g_mutex_lock(item->lock); + bluesky_cloudlog_stats_update(item, -1); + item->pending_write &= ~CLOUDLOG_CLOUD; + item->location_flags |= CLOUDLOG_CLOUD; + bluesky_cloudlog_stats_update(item, 1); + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); + + record->items = g_slist_delete_link(record->items, record->items); + } + + bluesky_string_unref(record->data); + record->data = NULL; + g_slist_free(record->items); + record->items = NULL; + record->complete = TRUE; + g_cond_broadcast(record->cond); + } else { + g_print("Write should be resubmitted...\n"); + + BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store); + async2->op = STORE_OP_PUT; + async2->key = g_strdup(async->key); + async2->data = record->data; + bluesky_string_ref(record->data); + bluesky_store_async_submit(async2); + bluesky_store_async_add_notifier(async2, + (GFunc)cloudlog_flush_complete, + record); + bluesky_store_async_unref(async2); + } + g_mutex_unlock(record->lock); +} + /* Finish up a partially-written cloud log segment and flush it to storage. */ void bluesky_cloudlog_flush(BlueSkyFS *fs) { @@ -244,17 +343,28 @@ void bluesky_cloudlog_flush(BlueSkyFS *fs) /* TODO: Append some type of commit record to the log segment? */ g_print("Serializing %zd bytes of data to cloud\n", state->data->len); + SerializedRecord *record = g_new0(SerializedRecord, 1); + record->data = bluesky_string_new_from_gstring(state->data); + record->items = state->writeback_list; + record->lock = g_mutex_new(); + record->cond = g_cond_new(); + state->writeback_list = NULL; BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store); async->op = STORE_OP_PUT; async->key = g_strdup_printf("log-%08d-%08d", state->location.directory, state->location.sequence); - async->data = bluesky_string_new_from_gstring(state->data); + async->data = record->data; + bluesky_string_ref(record->data); bluesky_store_async_submit(async); - bluesky_store_async_wait(async); + bluesky_store_async_add_notifier(async, + (GFunc)cloudlog_flush_complete, + record); bluesky_store_async_unref(async); + state->pending_segments = g_list_prepend(state->pending_segments, record); + state->location.sequence++; state->location.offset = 0; state->data = g_string_new("");