#include "bluesky-private.h"
-/* The locations hash table in the file system is used to map objects to their locations. Objects are named using 128- */
-
-typedef struct {
- BlueSkyCloudID id;
-
- BlueSkyCloudPointer *cloud_loc;
-} BlueSkyLocationEntry;
+// Rough size limit for a log segment. This is not a firm limit and there are
+// no absolute guarantees on the size of a log segment.
+#define CLOUDLOG_SEGMENT_SIZE (4 << 20)
BlueSkyCloudID bluesky_cloudlog_new_id()
{
log->fs = fs;
log->type = LOGTYPE_UNKNOWN;
log->id = bluesky_cloudlog_new_id();
- log->pointers = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudID));
+ log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
g_atomic_int_set(&log->refcount, 1);
return log;
}
+/* Helper function for updating memory usage statistics for a filesystem (the
+ * cache_log_* variables). This will increment (type=1) or decrement (type=-1)
+ * the counter associated with the current state of the cloud log item. The
+ * item should be locked or otherwise protected from concurrent access. */
+void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type)
+{
+ BlueSkyFS *fs = log->fs;
+
+ if (log->location_flags & CLOUDLOG_CLOUD) {
+ g_atomic_int_add(&fs->cache_log_cloud, type);
+ } else if (log->location_flags & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->pending_write & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->data != NULL) {
+ g_atomic_int_add(&fs->cache_log_dirty, type);
+ }
+}
+
/* The reference held by the hash table does not count towards the reference
* count. When a new object is created, it initially has a reference count of
* 1 for the creator, and similarly fetching an item from the hash table will
g_hash_table_remove(fs->locations, &log->id);
g_mutex_unlock(fs->lock);
+ bluesky_cloudlog_stats_update(log, -1);
log->type = LOGTYPE_INVALID;
g_mutex_free(log->lock);
g_cond_free(log->cond);
- g_array_unref(log->pointers);
+ for (int i = 0; i < log->links->len; i++) {
+ BlueSkyCloudLog *c = g_array_index(log->links,
+ BlueSkyCloudLog *, i);
+ bluesky_cloudlog_unref(c);
+ }
+ g_array_unref(log->links);
bluesky_string_unref(log->data);
g_free(log);
}
struct logref refs[0];
};
+/* Ensure that a cloud log item is loaded in memory, and if not read it in.
+ * TODO: Make asynchronous, and make this also fetch from the cloud. Right now
+ * we only read from the log. Log item must be locked. */
+void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
+{
+ if (log->data != NULL)
+ return;
+
+ if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
+ bluesky_cloudlog_stats_update(log, -1);
+ log->data = bluesky_log_map_object(log->fs, -1, log->log_seq,
+ log->log_offset, log->log_size);
+ bluesky_cloudlog_stats_update(log, 1);
+ }
+
+ if (log->data == NULL && (log->location_flags & CLOUDLOG_CLOUD)) {
+ log->location_flags &= ~CLOUDLOG_JOURNAL;
+ bluesky_cloudlog_stats_update(log, -1);
+ log->data = bluesky_log_map_object(log->fs, log->location.directory,
+ log->location.sequence,
+ log->location.offset,
+ log->location.size);
+ bluesky_cloudlog_stats_update(log, 1);
+ }
+
+ if (log->data == NULL) {
+ g_error("Unable to fetch cloudlog entry!");
+ }
+
+ g_cond_broadcast(log->cond);
+}
+
BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
- BlueSkyCloudLogState *state)
+ BlueSkyFS *fs)
{
+ BlueSkyCloudLogState *state = fs->log_state;
+
if (log->location_flags & CLOUDLOG_CLOUD) {
return log->location;
}
- g_print("Flushing object %s to cloud...\n",
- bluesky_cloudlog_id_to_string(log->id));
-
- for (int i = 0; i < log->pointers->len; i++) {
- BlueSkyCloudID id = g_array_index(log->pointers, BlueSkyCloudID, i);
- g_print(" ...checking reference %s...\n",
- bluesky_cloudlog_id_to_string(id));
- g_mutex_lock(log->fs->lock);
- BlueSkyCloudLog *log2
- = (BlueSkyCloudLog *)g_hash_table_lookup(log->fs->locations, &id);
- // TODO: refcount
- bluesky_cloudlog_fetch(log2);
- g_assert(log2 != NULL);
- bluesky_cloudlog_ref(log2);
- g_mutex_unlock(log->fs->lock);
- bluesky_cloudlog_serialize(log2, state);
- bluesky_cloudlog_unref(log2);
+ for (int i = 0; i < log->links->len; i++) {
+ BlueSkyCloudLog *ref = g_array_index(log->links,
+ BlueSkyCloudLog *, i);
+ if (ref != NULL)
+ bluesky_cloudlog_serialize(ref, fs);
}
g_mutex_lock(log->lock);
bluesky_cloudlog_fetch(log);
g_assert(log->data != NULL);
+ bluesky_cloudlog_stats_update(log, -1);
+
+ /* TODO: Right now offset/size are set to the raw data, but we should add
+ * header parsing to the code which loads objects back in. */
log->location = state->location;
- log->location.offset = state->data->len;
- log->location.size
- = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0
- + log->data->len;
+ log->location.offset = state->data->len + sizeof(struct log_header);
+ log->location.size = log->data->len;
+ /* = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0
+ + log->data->len; */
struct log_header header;
memcpy(header.magic, "AgI ", 4);
g_string_append_len(state->data, (const char *)&header, sizeof(header));
g_string_append_len(state->data, log->data->data, log->data->len);
- log->location_flags |= CLOUDLOG_CLOUD;
+ /* TODO: We should mark the objects as committed on the cloud until the
+ * data is flushed and acknowledged. */
+ log->pending_write |= CLOUDLOG_CLOUD;
+ bluesky_cloudlog_stats_update(log, 1);
+ state->writeback_list = g_slist_prepend(state->writeback_list, log);
+ bluesky_cloudlog_ref(log);
g_mutex_unlock(log->lock);
- return log->location;
-}
+ if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
+ bluesky_cloudlog_flush(fs);
-static void find_inodes(gpointer key, gpointer value, gpointer user_data)
-{
- BlueSkyCloudLogState *state = (BlueSkyCloudLogState *)user_data;
- BlueSkyCloudLog *item = (BlueSkyCloudLog *)value;
-
- if (item->type != LOGTYPE_INODE)
- return;
-
- bluesky_cloudlog_ref(item);
- state->inode_list = g_list_prepend(state->inode_list, item);
+ return log->location;
}
-void bluesky_cloudlog_write_log(BlueSkyFS *fs)
+static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
+ SerializedRecord *record)
{
- BlueSkyCloudLogState *state = fs->log_state;
- if (state->data == NULL)
- state->data = g_string_new("");
-
- g_mutex_lock(fs->lock);
- g_hash_table_foreach(fs->locations, find_inodes, state);
- g_mutex_unlock(fs->lock);
-
- while (state->inode_list != NULL) {
- BlueSkyCloudLog *log = (BlueSkyCloudLog *)state->inode_list->data;
- bluesky_cloudlog_serialize(log, state);
- bluesky_cloudlog_unref(log);
- state->inode_list = g_list_delete_link(state->inode_list,
- state->inode_list);
- }
+ g_print("Write of %s to cloud complete, status = %d\n",
+ async->key, async->result);
+
+ g_mutex_lock(record->lock);
+ if (async->result >= 0) {
+ while (record->items != NULL) {
+ BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data;
+ g_mutex_lock(item->lock);
+ bluesky_cloudlog_stats_update(item, -1);
+ item->pending_write &= ~CLOUDLOG_CLOUD;
+ item->location_flags |= CLOUDLOG_CLOUD;
+ bluesky_cloudlog_stats_update(item, 1);
+ g_mutex_unlock(item->lock);
+ bluesky_cloudlog_unref(item);
+
+ record->items = g_slist_delete_link(record->items, record->items);
+ }
- if (state->data->len > 0) {
- g_print("Serialized %zd bytes of data to cloud\n", state->data->len);
-
- BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
- async->op = STORE_OP_PUT;
- async->key = g_strdup_printf("log-%08d-%08d",
- state->location.directory,
- state->location.sequence);
- async->data = bluesky_string_new_from_gstring(state->data);
- bluesky_store_async_submit(async);
- bluesky_store_async_wait(async);
- bluesky_store_async_unref(async);
-
- state->location.sequence++;
- state->location.offset = 0;
+ bluesky_string_unref(record->data);
+ record->data = NULL;
+ g_slist_free(record->items);
+ record->items = NULL;
+ record->complete = TRUE;
+ g_cond_broadcast(record->cond);
+ } else {
+ g_print("Write should be resubmitted...\n");
+
+ BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store);
+ async2->op = STORE_OP_PUT;
+ async2->key = g_strdup(async->key);
+ async2->data = record->data;
+ bluesky_string_ref(record->data);
+ bluesky_store_async_submit(async2);
+ bluesky_store_async_add_notifier(async2,
+ (GFunc)cloudlog_flush_complete,
+ record);
+ bluesky_store_async_unref(async2);
}
-
- state->data = NULL;
+ g_mutex_unlock(record->lock);
}
-/* Ensure that a cloud log item is loaded in memory, and if not read it in.
- * TODO: Make asynchronous, and make this also fetch from the cloud. Right now
- * we only read from the log. Log item must be locked. */
-void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
+/* Finish up a partially-written cloud log segment and flush it to storage. */
+void bluesky_cloudlog_flush(BlueSkyFS *fs)
{
- if (log->data != NULL)
+ BlueSkyCloudLogState *state = fs->log_state;
+ if (state->data == NULL || state->data->len == 0)
return;
- g_print("Re-mapping log entry %d/%d/%d...\n",
- log->log_seq, log->log_offset, log->log_size);
-
- g_assert(log->location_flags & CLOUDLOG_JOURNAL);
-
- log->data = bluesky_log_map_object(log->fs->log, log->log_seq,
- log->log_offset, log->log_size);
-
- g_cond_broadcast(log->cond);
+ /* TODO: Append some type of commit record to the log segment? */
+
+ g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
+ SerializedRecord *record = g_new0(SerializedRecord, 1);
+ record->data = bluesky_string_new_from_gstring(state->data);
+ record->items = state->writeback_list;
+ record->lock = g_mutex_new();
+ record->cond = g_cond_new();
+ state->writeback_list = NULL;
+
+ BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
+ async->op = STORE_OP_PUT;
+ async->key = g_strdup_printf("log-%08d-%08d",
+ state->location.directory,
+ state->location.sequence);
+ async->data = record->data;
+ bluesky_string_ref(record->data);
+ bluesky_store_async_submit(async);
+ bluesky_store_async_add_notifier(async,
+ (GFunc)cloudlog_flush_complete,
+ record);
+ bluesky_store_async_unref(async);
+
+ state->pending_segments = g_list_prepend(state->pending_segments, record);
+
+ state->location.sequence++;
+ state->location.offset = 0;
+ state->data = g_string_new("");
}