#include "bluesky-private.h"
-/* The locations hash table in the file system is used to map objects to their locations. Objects are named using 128- */
-
-typedef struct {
- BlueSkyCloudID id;
-
- BlueSkyCloudPointer *cloud_loc;
-} BlueSkyLocationEntry;
+// Rough size limit for a log segment. This is not a firm limit and there are
+// no absolute guarantees on the size of a log segment.
+#define CLOUDLOG_SEGMENT_SIZE (4 << 20)
BlueSkyCloudID bluesky_cloudlog_new_id()
{
log->fs = fs;
log->type = LOGTYPE_UNKNOWN;
log->id = bluesky_cloudlog_new_id();
- log->pointers = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudID));
+ log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
g_atomic_int_set(&log->refcount, 1);
return log;
}
+/* Helper function for updating memory usage statistics for a filesystem (the
+ * cache_log_* variables). This will increment (type=1) or decrement (type=-1)
+ * the counter associated with the current state of the cloud log item. The
+ * item should be locked or otherwise protected from concurrent access. */
+void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type)
+{
+ BlueSkyFS *fs = log->fs;
+
+ if (log->location_flags & CLOUDLOG_CLOUD) {
+ g_atomic_int_add(&fs->cache_log_cloud, type);
+ } else if (log->location_flags & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->pending_write & CLOUDLOG_JOURNAL) {
+ g_atomic_int_add(&fs->cache_log_journal, type);
+ } else if (log->data != NULL) {
+ g_atomic_int_add(&fs->cache_log_dirty, type);
+ }
+}
+
+/* The reference held by the hash table does not count towards the reference
+ * count. When a new object is created, it initially has a reference count of
+ * 1 for the creator, and similarly fetching an item from the hash table will
+ * also create a reference. If the reference count drops to zero,
+ * bluesky_cloudlog_unref attempts to remove the object from the hash
+ * table--but there is a potential race since another thread might read the
+ * object from the hash table at the same time. So an object with a reference
+ * count of zero may still be resurrected, in which case we need to abort the
+ * destruction. Once the object is gone from the hash table, and if the
+ * reference count is still zero, it can actually be deleted. */
void bluesky_cloudlog_ref(BlueSkyCloudLog *log)
{
if (log == NULL)
return;
if (g_atomic_int_dec_and_test(&log->refcount)) {
- g_print("Cloud log refcount dropped to zero.\n");
+ BlueSkyFS *fs = log->fs;
+
+ g_mutex_lock(fs->lock);
+ if (g_atomic_int_get(&log->refcount) > 0) {
+ g_mutex_unlock(fs->lock);
+ return;
+ }
+
+ g_hash_table_remove(fs->locations, &log->id);
+ g_mutex_unlock(fs->lock);
+
+ bluesky_cloudlog_stats_update(log, -1);
+ log->type = LOGTYPE_INVALID;
+ g_mutex_free(log->lock);
+ g_cond_free(log->cond);
+ for (int i = 0; i < log->links->len; i++) {
+ BlueSkyCloudLog *c = g_array_index(log->links,
+ BlueSkyCloudLog *, i);
+ bluesky_cloudlog_unref(c);
+ }
+ g_array_unref(log->links);
+ bluesky_string_unref(log->data);
+ g_free(log);
}
}
struct logref refs[0];
};
+/* Ensure that a cloud log item is loaded in memory, and if not read it in.
+ * TODO: Make asynchronous, and make this also fetch from the cloud. Right now
+ * we only read from the log. Log item must be locked. */
+void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
+{
+ if (log->data != NULL)
+ return;
+
+ if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
+ bluesky_cloudlog_stats_update(log, -1);
+ log->data = bluesky_log_map_object(log->fs, -1, log->log_seq,
+ log->log_offset, log->log_size);
+ bluesky_cloudlog_stats_update(log, 1);
+ } else if (log->location_flags & CLOUDLOG_CLOUD) {
+ bluesky_cloudlog_stats_update(log, -1);
+ log->data = bluesky_log_map_object(log->fs, log->location.directory,
+ log->location.sequence,
+ log->location.offset,
+ log->location.size);
+ bluesky_cloudlog_stats_update(log, 1);
+ } else {
+ g_error("Unable to fetch cloudlog entry!");
+ }
+
+ g_cond_broadcast(log->cond);
+}
+
BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
- BlueSkyCloudLogState *state)
+ BlueSkyFS *fs)
{
+ BlueSkyCloudLogState *state = fs->log_state;
+
if (log->location_flags & CLOUDLOG_CLOUD) {
return log->location;
}
- g_print("Flushing object %s to cloud...\n",
- bluesky_cloudlog_id_to_string(log->id));
-
- for (int i = 0; i < log->pointers->len; i++) {
- BlueSkyCloudID id = g_array_index(log->pointers, BlueSkyCloudID, i);
- g_print(" ...checking reference %s...\n",
- bluesky_cloudlog_id_to_string(id));
- g_mutex_lock(log->fs->lock);
- BlueSkyCloudLog *log2
- = (BlueSkyCloudLog *)g_hash_table_lookup(log->fs->locations, &id);
- // TODO: refcount
- g_mutex_unlock(log->fs->lock);
- g_assert(log2 != NULL);
- bluesky_cloudlog_serialize(log2, state);
+ for (int i = 0; i < log->links->len; i++) {
+ BlueSkyCloudLog *ref = g_array_index(log->links,
+ BlueSkyCloudLog *, i);
+ if (ref != NULL)
+ bluesky_cloudlog_serialize(ref, fs);
}
+ g_mutex_lock(log->lock);
+ bluesky_cloudlog_fetch(log);
g_assert(log->data != NULL);
+ bluesky_cloudlog_stats_update(log, -1);
+
+ /* TODO: Right now offset/size are set to the raw data, but we should add
+ * header parsing to the code which loads objects back in. */
log->location = state->location;
- log->location.offset = state->data->len;
- log->location.size
- = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0
- + log->data->len;
+ log->location.offset = state->data->len + sizeof(struct log_header);
+ log->location.size = log->data->len;
+ /* = sizeof(struct log_header) + sizeof(BlueSkyCloudID) * 0
+ + log->data->len; */
struct log_header header;
memcpy(header.magic, "AgI ", 4);
g_string_append_len(state->data, log->data->data, log->data->len);
log->location_flags |= CLOUDLOG_CLOUD;
+ bluesky_cloudlog_stats_update(log, 1);
+ g_mutex_unlock(log->lock);
- return log->location;
-}
+ if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
+ bluesky_cloudlog_flush(fs);
-static void find_inodes(gpointer key, gpointer value, gpointer user_data)
-{
- BlueSkyCloudLogState *state = (BlueSkyCloudLogState *)user_data;
- BlueSkyCloudLog *item = (BlueSkyCloudLog *)value;
-
- if (item->type != LOGTYPE_INODE)
- return;
-
- bluesky_cloudlog_ref(item);
- state->inode_list = g_list_prepend(state->inode_list, item);
+ return log->location;
}
-void bluesky_cloudlog_write_log(BlueSkyFS *fs)
+/* Finish up a partially-written cloud log segment and flush it to storage. */
+void bluesky_cloudlog_flush(BlueSkyFS *fs)
{
- g_print("Starting cloudlog write...\n");
-
BlueSkyCloudLogState *state = fs->log_state;
- if (state->data == NULL)
- state->data = g_string_new("");
-
- g_mutex_lock(fs->lock);
- g_hash_table_foreach(fs->locations, find_inodes, state);
- g_mutex_unlock(fs->lock);
-
- while (state->inode_list != NULL) {
- BlueSkyCloudLog *log = (BlueSkyCloudLog *)state->inode_list->data;
- bluesky_cloudlog_serialize(log, state);
- state->inode_list = g_list_delete_link(state->inode_list,
- state->inode_list);
- }
-
- if (state->data->len > 0) {
- g_print("Serialized %zd bytes of data\n", state->data->len);
-
- BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
- async->op = STORE_OP_PUT;
- async->key = g_strdup_printf("log-%08d-%08d",
- state->location.directory,
- state->location.sequence);
- async->data = bluesky_string_new_from_gstring(state->data);
- bluesky_store_async_submit(async);
- bluesky_store_async_wait(async);
- bluesky_store_async_unref(async);
-
- state->location.sequence++;
- state->location.offset = 0;
- }
-
- state->data = NULL;
-}
-
-/* Ensure that a cloud log item is loaded in memory, and if not read it in.
- * TODO: Make asynchronous, and make this also fetch from the cloud. Right now
- * we only read from the log. Log item must be locked. */
-void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
-{
- if (log->data != NULL)
+ if (state->data == NULL || state->data->len == 0)
return;
- g_print("Re-mapping log entry %d/%d/%d...\n",
- log->log_seq, log->log_offset, log->log_size);
+ /* TODO: Append some type of commit record to the log segment? */
- g_assert(log->location_flags & CLOUDLOG_JOURNAL);
+ g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
- log->data = bluesky_log_map_object(log->fs->log, log->log_seq,
- log->log_offset, log->log_size);
+ BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
+ async->op = STORE_OP_PUT;
+ async->key = g_strdup_printf("log-%08d-%08d",
+ state->location.directory,
+ state->location.sequence);
+ async->data = bluesky_string_new_from_gstring(state->data);
+ bluesky_store_async_submit(async);
+ //bluesky_store_async_wait(async);
+ bluesky_store_async_unref(async);
- g_cond_broadcast(log->cond);
+ state->location.sequence++;
+ state->location.offset = 0;
+ state->data = g_string_new("");
}