* before writing a batch to the cloud, handling indirection through items like
* the inode map, etc. */
-BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs)
+BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs, const BlueSkyCloudID *id)
{
BlueSkyCloudLog *log = g_new0(BlueSkyCloudLog, 1);
log->cond = g_cond_new();
log->fs = fs;
log->type = LOGTYPE_UNKNOWN;
- log->id = bluesky_cloudlog_new_id();
+ if (id != NULL)
+ memcpy(&log->id, id, sizeof(BlueSkyCloudID));
+ else
+ log->id = bluesky_cloudlog_new_id();
log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
g_atomic_int_set(&log->refcount, 1);
return log;
}
+/* Helper function for updating memory usage statistics for a filesystem (the
+ * cache_log_* variables). This will increment (type=1) or decrement (type=-1)
+ * the counter associated with the current state of the cloud log item. The
+ * item should be locked or otherwise protected from concurrent access. */
+void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type)
+{
+ BlueSkyFS *fs = log->fs;
+
+ if (log->location_flags & CLOUDLOG_CLOUD) {
+ g_atomic_int_add(&fs->cache_log_cloud, type);
+ } else if (log->location_flags & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->pending_write & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->data != NULL) {
+ g_atomic_int_add(&fs->cache_log_dirty, type);
+ }
+}
+
/* The reference held by the hash table does not count towards the reference
* count. When a new object is created, it initially has a reference count of
* 1 for the creator, and similarly fetching an item from the hash table will
g_hash_table_remove(fs->locations, &log->id);
g_mutex_unlock(fs->lock);
+ bluesky_cloudlog_stats_update(log, -1);
log->type = LOGTYPE_INVALID;
g_mutex_free(log->lock);
g_cond_free(log->cond);
g_mutex_unlock(log->fs->lock);
}
-struct log_header {
- char magic[4];
- uint32_t size;
- BlueSkyCloudID id;
- uint32_t pointer_count;
-} __attribute__((packed));
-
-struct logref {
- BlueSkyCloudID id;
- BlueSkyCloudPointer location;
-} __attribute__((packed));
-
-struct log_footer {
- char refmagic[4];
- struct logref refs[0];
-};
+/* Look up the cloud log entry for the given ID. If create is TRUE and the
+ * item does not exist, create a special pending entry that can later be filled
+ * in when the real item is loaded. The returned item has a reference held.
+ * As a special case, if a null ID is provided then NULL is returned. */
+BlueSkyCloudLog *bluesky_cloudlog_get(BlueSkyFS *fs, BlueSkyCloudID id)
+{
+ static BlueSkyCloudID id0 = {{0}};
+
+ if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) == 0)
+ return NULL;
+
+ g_mutex_lock(fs->lock);
+ BlueSkyCloudLog *item;
+ item = g_hash_table_lookup(fs->locations, &id);
+ if (item == NULL) {
+ item = bluesky_cloudlog_new(fs, &id);
+ g_hash_table_insert(fs->locations, &id, item);
+ } else {
+ bluesky_cloudlog_ref(item);
+ }
+ g_mutex_unlock(fs->lock);
+ return item;
+}
/* Ensure that a cloud log item is loaded in memory, and if not read it in.
* TODO: Make asynchronous, and make this also fetch from the cloud. Right now
if (log->data != NULL)
return;
- g_assert((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL);
+ /* There are actually two cases: a full deserialization if we have not ever
+ * read the object before, and a partial deserialization where the metadata
+ * is already in memory and we just need to remap the data. If the object
+ * type has not yet been set, we'll need to read and parse the metadata.
+ * Once that is done, we can fall through the case of remapping the data
+ * itself. */
+ if (log->type == LOGTYPE_UNKNOWN) {
+ BlueSkyRCStr *raw = NULL;
+ if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
+ raw = bluesky_log_map_object(log->fs, -1, log->log_seq,
+ log->log_offset, log->log_size);
+ }
+
+ if (raw == NULL && (log->location_flags & CLOUDLOG_CLOUD)) {
+ log->location_flags &= ~CLOUDLOG_JOURNAL;
+ raw = bluesky_log_map_object(log->fs,
+ log->location.directory,
+ log->location.sequence,
+ log->location.offset,
+ log->location.size);
+ }
+
+ g_assert(raw != NULL);
+ bluesky_deserialize_cloudlog(log, raw->data, raw->len);
+ bluesky_string_unref(raw);
+ }
+
+ /* At this point all metadata should be available and we need only remap
+ * the object data. */
+
+ int offset;
+ if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
+ bluesky_cloudlog_stats_update(log, -1);
+ offset = log->log_offset + sizeof(struct log_header);
+ log->data = bluesky_log_map_object(log->fs, -1, log->log_seq,
+ offset, log->data_size);
+ bluesky_cloudlog_stats_update(log, 1);
+ }
- log->data = bluesky_log_map_object(log->fs->log, log->log_seq,
- log->log_offset, log->log_size);
+ if (log->data == NULL && (log->location_flags & CLOUDLOG_CLOUD)) {
+ log->location_flags &= ~CLOUDLOG_JOURNAL;
+ bluesky_cloudlog_stats_update(log, -1);
+ offset = log->location.offset + sizeof(struct cloudlog_header);
+ log->data = bluesky_log_map_object(log->fs, log->location.directory,
+ log->location.sequence,
+ offset, log->data_size);
+ bluesky_cloudlog_stats_update(log, 1);
+ }
+
+ if (log->data == NULL) {
+ g_error("Unable to fetch cloudlog entry!");
+ }
g_cond_broadcast(log->cond);
}
{
BlueSkyCloudLogState *state = fs->log_state;
- if (log->location_flags & CLOUDLOG_CLOUD) {
+ if ((log->location_flags | log->pending_write) & CLOUDLOG_CLOUD) {
return log->location;
}
bluesky_cloudlog_fetch(log);
g_assert(log->data != NULL);
+ bluesky_cloudlog_stats_update(log, -1);
+
+ GString *data1 = g_string_new("");
+ GString *data2 = g_string_new("");
+ GString *data3 = g_string_new("");
+ bluesky_serialize_cloudlog(log, data1, data2, data3);
+
+ /* TODO: Right now offset/size are set to the raw data, but we should add
+ * header parsing to the code which loads objects back in. */
log->location = state->location;
log->location.offset = state->data->len;
- log->location.size
- = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0
- + log->data->len;
-
- struct log_header header;
- memcpy(header.magic, "AgI ", 4);
- header.size = GUINT32_TO_LE(log->location.size);
+ log->data_size = data1->len;
+
+ struct cloudlog_header header;
+ memcpy(header.magic, CLOUDLOG_MAGIC, 4);
+ header.type = log->type + '0';
+ header.size1 = GUINT32_TO_LE(data1->len);
+ header.size2 = GUINT32_TO_LE(data2->len);
+ header.size3 = GUINT32_TO_LE(data3->len);
header.id = log->id;
- header.pointer_count = GUINT32_TO_LE(0);
+ header.inum = GUINT64_TO_LE(log->inum);
g_string_append_len(state->data, (const char *)&header, sizeof(header));
- g_string_append_len(state->data, log->data->data, log->data->len);
+ g_string_append_len(state->data, data1->str, data1->len);
+ g_string_append_len(state->data, data2->str, data2->len);
+ g_string_append_len(state->data, data3->str, data3->len);
+
+ log->location.size = state->data->len - log->location.offset;
+
+ /* If the object we flushed was an inode, update the inode map. */
+ if (log->type == LOGTYPE_INODE) {
+ g_mutex_lock(fs->lock);
+ InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map,
+ log->inum, 1);
+ entry->id = log->id;
+ entry->location = log->location;
+ entry->item = log;
+ bluesky_cloudlog_ref(entry->item);
+ g_mutex_unlock(fs->lock);
+ }
- log->location_flags |= CLOUDLOG_CLOUD;
+ /* TODO: We should mark the objects as committed on the cloud until the
+ * data is flushed and acknowledged. */
+ log->pending_write |= CLOUDLOG_CLOUD;
+ bluesky_cloudlog_stats_update(log, 1);
+ state->writeback_list = g_slist_prepend(state->writeback_list, log);
+ bluesky_cloudlog_ref(log);
g_mutex_unlock(log->lock);
if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
return log->location;
}
+static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
+ SerializedRecord *record)
+{
+ g_print("Write of %s to cloud complete, status = %d\n",
+ async->key, async->result);
+
+ g_mutex_lock(record->lock);
+ if (async->result >= 0) {
+ while (record->items != NULL) {
+ BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data;
+ g_mutex_lock(item->lock);
+ bluesky_cloudlog_stats_update(item, -1);
+ item->pending_write &= ~CLOUDLOG_CLOUD;
+ item->location_flags |= CLOUDLOG_CLOUD;
+ bluesky_cloudlog_stats_update(item, 1);
+ g_mutex_unlock(item->lock);
+ bluesky_cloudlog_unref(item);
+
+ record->items = g_slist_delete_link(record->items, record->items);
+ }
+
+ bluesky_string_unref(record->data);
+ record->data = NULL;
+ g_slist_free(record->items);
+ record->items = NULL;
+ record->complete = TRUE;
+ g_cond_broadcast(record->cond);
+ } else {
+ g_print("Write should be resubmitted...\n");
+
+ BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store);
+ async2->op = STORE_OP_PUT;
+ async2->key = g_strdup(async->key);
+ async2->data = record->data;
+ bluesky_string_ref(record->data);
+ bluesky_store_async_submit(async2);
+ bluesky_store_async_add_notifier(async2,
+ (GFunc)cloudlog_flush_complete,
+ record);
+ bluesky_store_async_unref(async2);
+ }
+ g_mutex_unlock(record->lock);
+}
+
/* Finish up a partially-written cloud log segment and flush it to storage. */
void bluesky_cloudlog_flush(BlueSkyFS *fs)
{
/* TODO: Append some type of commit record to the log segment? */
g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
+ SerializedRecord *record = g_new0(SerializedRecord, 1);
+ record->data = bluesky_string_new_from_gstring(state->data);
+ record->items = state->writeback_list;
+ record->lock = g_mutex_new();
+ record->cond = g_cond_new();
+ state->writeback_list = NULL;
BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
async->op = STORE_OP_PUT;
async->key = g_strdup_printf("log-%08d-%08d",
state->location.directory,
state->location.sequence);
- async->data = bluesky_string_new_from_gstring(state->data);
+ async->data = record->data;
+ bluesky_string_ref(record->data);
bluesky_store_async_submit(async);
- bluesky_store_async_wait(async);
+ bluesky_store_async_add_notifier(async,
+ (GFunc)cloudlog_flush_complete,
+ record);
bluesky_store_async_unref(async);
+ state->pending_segments = g_list_prepend(state->pending_segments, record);
+
state->location.sequence++;
state->location.offset = 0;
state->data = g_string_new("");