+ g_print("Fetch of %s from cloud complete, status = %d\n",
+ async->key, async->result);
+
+ g_mutex_lock(cachefile->lock);
+ if (async->result >= 0) {
+ char *pathname = g_strdup_printf("%s/%s",
+ cachefile->log->log_directory,
+ cachefile->filename);
+ if (!g_file_set_contents(pathname, async->data->data, async->data->len,
+ NULL))
+ g_print("Error writing out fetched file to cache!\n");
+ g_free(pathname);
+
+ cachefile->fetching = FALSE;
+ cachefile->ready = TRUE;
+ } else {
+ g_print("Error fetching from cloud, retrying...\n");
+ cloudlog_fetch_start(cachefile);
+ }
+
+ bluesky_cachefile_unref(cachefile);
+ g_cond_broadcast(cachefile->cond);
+ g_mutex_unlock(cachefile->lock);
+}
+
+/* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
+ * Returns the object in the locked state and with a reference taken. */
+BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
+ int clouddir, int log_seq)
+{
+ if (page_size == 0) {
+ page_size = getpagesize();
+ }
+
+ BlueSkyLog *log = fs->log;
+
+ struct stat statbuf;
+ char logname[64];
+ int type;
+
+ // A request for a local log file
+ if (clouddir < 0) {
+ sprintf(logname, "journal-%08d", log_seq);
+ type = CLOUDLOG_JOURNAL;
+ } else {
+ sprintf(logname, "log-%08d-%08d", clouddir, log_seq);
+ type = CLOUDLOG_CLOUD;
+ }
+
+ BlueSkyCacheFile *map;
+ g_mutex_lock(log->mmap_lock);
+ map = g_hash_table_lookup(log->mmap_cache, logname);
+
+ if (map == NULL
+ && type == CLOUDLOG_JOURNAL
+ && fstatat(log->dirfd, logname, &statbuf, 0) < 0) {
+ /* A stale reference to a journal file which doesn't exist any longer
+ * because it was reclaimed. Return NULL. */
+ } else if (map == NULL) {
+ g_print("Adding cache file %s\n", logname);
+
+ map = g_new0(BlueSkyCacheFile, 1);
+ map->fs = fs;
+ map->type = type;
+ map->lock = g_mutex_new();
+ map->type = type;
+ g_mutex_lock(map->lock);
+ map->cond = g_cond_new();
+ map->filename = g_strdup(logname);
+ map->log_seq = log_seq;
+ map->log = log;
+ g_atomic_int_set(&map->mapcount, 0);
+ g_atomic_int_set(&map->refcount, 0);
+
+ g_hash_table_insert(log->mmap_cache, map->filename, map);
+
+ // If the log file is stored in the cloud, we may need to fetch it
+ if (clouddir >= 0)
+ cloudlog_fetch_start(map);
+ } else {
+ g_mutex_lock(map->lock);
+ }
+
+ g_mutex_unlock(log->mmap_lock);
+ if (map != NULL)
+ g_atomic_int_inc(&map->refcount);
+ return map;
+}
+
+BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
+ int log_seq, int log_offset, int log_size)
+{
+ if (page_size == 0) {
+ page_size = getpagesize();
+ }
+
+ BlueSkyLog *log = fs->log;
+ BlueSkyCacheFile *map = bluesky_cachefile_lookup(fs, log_dir, log_seq);
+
+ if (map == NULL) {
+ return NULL;
+ }
+
+ if (map->addr == NULL) {
+ while (!map->ready && map->fetching) {
+ g_print("Waiting for log segment to be fetched from cloud...\n");
+ g_cond_wait(map->cond, map->lock);
+ }
+
+ int fd = openat(log->dirfd, map->filename, O_RDONLY);
+
+ if (fd < 0) {
+ fprintf(stderr, "Error opening logfile %s: %m\n", map->filename);
+ bluesky_cachefile_unref(map);
+ g_mutex_unlock(map->lock);
+ return NULL;
+ }
+
+ off_t length = lseek(fd, 0, SEEK_END);
+ map->addr = (const char *)mmap(NULL, length, PROT_READ, MAP_SHARED,
+ fd, 0);
+ g_atomic_int_add(&log->disk_used, -(map->len / 1024));
+ map->len = length;
+ g_atomic_int_add(&log->disk_used, map->len / 1024);
+
+ g_print("Re-mapped log segment %d...\n", log_seq);
+ g_atomic_int_inc(&map->refcount);
+
+ close(fd);
+ }
+
+ g_mutex_unlock(log->mmap_lock);
+
+ BlueSkyRCStr *str;
+ map->atime = bluesky_get_current_time();
+ str = bluesky_string_new_from_mmap(map, log_offset, log_size);
+ bluesky_cachefile_unref(map);
+ g_mutex_unlock(map->lock);
+ return str;
+}
+
+void bluesky_mmap_unref(BlueSkyCacheFile *mmap)
+{
+ if (mmap == NULL)
+ return;
+
+ if (g_atomic_int_dec_and_test(&mmap->mapcount)) {
+ g_mutex_lock(mmap->lock);
+ if (g_atomic_int_get(&mmap->mapcount) == 0) {
+ g_print("Unmapped log segment %d...\n", mmap->log_seq);
+ munmap((void *)mmap->addr, mmap->len);
+ mmap->addr = NULL;
+ g_atomic_int_add(&mmap->refcount, -1);
+ }
+ g_mutex_unlock(mmap->lock);
+ }
+}
+
+/* Scan through all currently-stored files in the journal/cache and garbage
+ * collect old unused ones, if needed. */
+static void gather_cachefiles(gpointer key, gpointer value, gpointer user_data)
+{
+ GList **files = (GList **)user_data;
+ *files = g_list_prepend(*files, value);
+}
+
+static gint compare_cachefiles(gconstpointer a, gconstpointer b)
+{
+ int64_t ta, tb;
+
+ ta = ((BlueSkyCacheFile *)a)->atime;
+ tb = ((BlueSkyCacheFile *)b)->atime;
+ if (ta < tb)
+ return -1;
+ else if (ta > tb)
+ return 1;
+ else
+ return 0;
+}
+
+void bluesky_cachefile_gc(BlueSkyFS *fs)
+{
+ GList *files = NULL;
+
+ g_mutex_lock(fs->log->mmap_lock);
+ g_hash_table_foreach(fs->log->mmap_cache, gather_cachefiles, &files);
+
+ /* Sort based on atime. The atime should be stable since it shouln't be
+ * updated except by threads which can grab the mmap_lock, which we already
+ * hold. */
+ files = g_list_sort(files, compare_cachefiles);
+
+ /* Walk the list of files, starting with the oldest, deleting files if
+ * possible until enough space has been reclaimed. */
+ g_print("\nScanning cache: (total size = %d kB)\n", fs->log->disk_used);
+ while (files != NULL) {
+ BlueSkyCacheFile *cachefile = (BlueSkyCacheFile *)files->data;
+ /* Try to lock the structure, but if the lock is held by another thread
+ * then we'll just skip the file on this pass. */
+ if (g_mutex_trylock(cachefile->lock)) {
+ int64_t age = bluesky_get_current_time() - cachefile->atime;
+ g_print("%s addr=%p mapcount=%d refcount=%d atime_age=%f",
+ cachefile->filename, cachefile->addr, cachefile->mapcount,
+ cachefile->refcount, age / 1e6);
+ if (cachefile->fetching)
+ g_print(" (fetching)");
+ g_print("\n");
+
+ gboolean deletion_candidate = FALSE;
+ if (g_atomic_int_get(&fs->log->disk_used)
+ > bluesky_options.cache_size
+ && g_atomic_int_get(&cachefile->refcount) == 0
+ && g_atomic_int_get(&cachefile->mapcount) == 0)
+ {
+ deletion_candidate = TRUE;
+ }
+
+ /* Don't allow journal files to be reclaimed until all data is
+ * known to be durably stored in the cloud. */
+ if (cachefile->type == CLOUDLOG_JOURNAL
+ && cachefile->log_seq >= fs->log->journal_watermark)
+ {
+ deletion_candidate = FALSE;
+ }
+
+ if (deletion_candidate) {
+ g_print(" ...deleting\n");
+ if (unlinkat(fs->log->dirfd, cachefile->filename, 0) < 0) {
+ fprintf(stderr, "Unable to unlink journal %s: %m\n",
+ cachefile->filename);
+ }
+
+ g_atomic_int_add(&fs->log->disk_used, -(cachefile->len / 1024));
+ g_hash_table_remove(fs->log->mmap_cache, cachefile->filename);
+ g_mutex_unlock(cachefile->lock);
+ g_mutex_free(cachefile->lock);
+ g_cond_free(cachefile->cond);
+ g_free(cachefile->filename);
+ g_free(cachefile);
+ } else {
+ g_mutex_unlock(cachefile->lock);
+ }
+ }
+ files = g_list_delete_link(files, files);
+ }
+ g_list_free(files);
+
+ g_mutex_unlock(fs->log->mmap_lock);
+}
+
+/******************************* JOURNAL REPLAY *******************************
+ * The journal replay code is used to recover filesystem state after a
+ * filesystem restart. We first look for the most recent commit record in the
+ * journal, which indicates the point before which all data in the journal has
+ * also been committed to the cloud. Then, we read in all data in the log past
+ * that point.
+ */
+static GList *directory_contents(const char *dirname)
+{
+ GList *contents = NULL;
+ GDir *dir = g_dir_open(dirname, 0, NULL);
+ if (dir == NULL) {
+ g_warning("Unable to open journal directory: %s", dirname);
+ return NULL;
+ }
+
+ const gchar *file;
+ while ((file = g_dir_read_name(dir)) != NULL) {
+ if (strncmp(file, "journal-", 8) == 0)
+ contents = g_list_prepend(contents, g_strdup(file));
+ }
+ g_dir_close(dir);
+
+ contents = g_list_sort(contents, (GCompareFunc)strcmp);
+
+ return contents;
+}
+
+static gboolean validate_journal_item(const char *buf, size_t len, off_t offset)
+{
+ const struct log_header *header;
+ const struct log_footer *footer;
+
+ if (offset + sizeof(struct log_header) + sizeof(struct log_footer) > len)
+ return FALSE;
+
+ header = (const struct log_header *)(buf + offset);
+ if (GUINT32_FROM_LE(header->magic) != HEADER_MAGIC)
+ return FALSE;
+ if (GUINT32_FROM_LE(header->offset) != offset)
+ return FALSE;
+ size_t size = GUINT32_FROM_LE(header->size1)
+ + GUINT32_FROM_LE(header->size2)
+ + GUINT32_FROM_LE(header->size3);
+
+ off_t footer_offset = offset + sizeof(struct log_header) + size;
+ if (footer_offset + sizeof(struct log_footer) > len)
+ return FALSE;
+ footer = (const struct log_footer *)(buf + footer_offset);
+
+ if (GUINT32_FROM_LE(footer->magic) != FOOTER_MAGIC)
+ return FALSE;
+
+ uint32_t crc = crc32c(BLUESKY_CRC32C_SEED, buf + offset,
+ sizeof(struct log_header) + sizeof(struct log_footer)
+ + size);
+ if (crc != BLUESKY_CRC32C_VALIDATOR) {
+ g_warning("Journal entry failed to validate: CRC %08x != %08x",
+ crc, BLUESKY_CRC32C_VALIDATOR);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Scan through a journal segment to extract correctly-written items (those
+ * that pass sanity checks and have a valid checksum). */
+static void bluesky_replay_scan_journal(const char *buf, size_t len,
+ uint32_t *seq, uint32_t *start_offset)
+{
+ const struct log_header *header;
+ off_t offset = 0;
+
+ while (validate_journal_item(buf, len, offset)) {
+ header = (const struct log_header *)(buf + offset);
+ size_t size = GUINT32_FROM_LE(header->size1)
+ + GUINT32_FROM_LE(header->size2)
+ + GUINT32_FROM_LE(header->size3);
+
+ if (header->type - '0' == LOGTYPE_JOURNAL_CHECKPOINT) {
+ const uint32_t *data = (const uint32_t *)((const char *)header + sizeof(struct log_header));
+ *seq = GUINT32_FROM_LE(data[0]);
+ *start_offset = GUINT32_FROM_LE(data[1]);
+ }
+
+ offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
+ }
+}
+
+static void reload_item(BlueSkyCloudLog *log_item,
+ const char *data,
+ size_t len1, size_t len2, size_t len3)
+{
+ BlueSkyFS *fs = log_item->fs;
+ /*const char *data1 = data;*/
+ const BlueSkyCloudID *data2
+ = (const BlueSkyCloudID *)(data + len1);
+ /*const BlueSkyCloudPointer *data3
+ = (const BlueSkyCloudPointer *)(data + len1 + len2);*/
+
+ bluesky_string_unref(log_item->data);
+ log_item->data = NULL;
+ log_item->location_flags = CLOUDLOG_JOURNAL;
+
+ BlueSkyCloudID id0;
+ memset(&id0, 0, sizeof(id0));
+
+ int link_count = len2 / sizeof(BlueSkyCloudID);
+ GArray *new_links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
+ for (int i = 0; i < link_count; i++) {
+ BlueSkyCloudID id = data2[i];
+ BlueSkyCloudLog *ref = NULL;
+ if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) != 0) {
+ g_mutex_lock(fs->lock);
+ ref = g_hash_table_lookup(fs->locations, &id);
+ if (ref != NULL) {
+ bluesky_cloudlog_ref(ref);
+ }
+ g_mutex_unlock(fs->lock);
+ }
+ g_array_append_val(new_links, ref);
+ }
+
+ for (int i = 0; i < log_item->links->len; i++) {
+ BlueSkyCloudLog *c = g_array_index(log_item->links,
+ BlueSkyCloudLog *, i);
+ bluesky_cloudlog_unref(c);
+ }
+ g_array_unref(log_item->links);
+ log_item->links = new_links;
+}
+
+static void bluesky_replay_scan_journal2(BlueSkyFS *fs, GList **objects,
+ int log_seq, int start_offset,
+ const char *buf, size_t len)
+{
+ const struct log_header *header;
+ off_t offset = start_offset;
+
+ while (validate_journal_item(buf, len, offset)) {
+ header = (const struct log_header *)(buf + offset);
+ g_print("In replay found valid item at offset %zd\n", offset);
+ size_t size = GUINT32_FROM_LE(header->size1)
+ + GUINT32_FROM_LE(header->size2)
+ + GUINT32_FROM_LE(header->size3);
+
+ BlueSkyCloudLog *log_item = bluesky_cloudlog_get(fs, header->id);
+ g_mutex_lock(log_item->lock);
+ *objects = g_list_prepend(*objects, log_item);
+
+ log_item->inum = GUINT64_FROM_LE(header->inum);
+ reload_item(log_item, buf + offset + sizeof(struct log_header),
+ GUINT32_FROM_LE(header->size1),
+ GUINT32_FROM_LE(header->size2),
+ GUINT32_FROM_LE(header->size3));
+ log_item->log_seq = log_seq;
+ log_item->log_offset = offset + sizeof(struct log_header);
+ log_item->log_size = header->size1;
+
+ bluesky_string_unref(log_item->data);
+ log_item->data = bluesky_string_new(g_memdup(buf + offset + sizeof(struct log_header), GUINT32_FROM_LE(header->size1)), GUINT32_FROM_LE(header->size1));
+
+ /* For any inodes which were read from the journal, deserialize the
+ * inode information, overwriting any old inode data. */
+ if (header->type - '0' == LOGTYPE_INODE) {
+ uint64_t inum = GUINT64_FROM_LE(header->inum);
+ BlueSkyInode *inode;
+ g_mutex_lock(fs->lock);
+ inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum);
+ if (inode == NULL) {
+ inode = bluesky_new_inode(inum, fs, BLUESKY_PENDING);
+ inode->change_count = 0;
+ bluesky_insert_inode(fs, inode);
+ }
+ g_mutex_lock(inode->lock);
+ bluesky_inode_free_resources(inode);
+ if (!bluesky_deserialize_inode(inode, log_item))
+ g_print("Error deserializing inode %"PRIu64"\n", inum);
+ fs->next_inum = MAX(fs->next_inum, inum + 1);
+ bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
+ inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
+ bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
+ inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
+ bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
+ inode->unlogged_list = NULL;
+ inode->change_cloud = inode->change_commit;
+ bluesky_cloudlog_ref(log_item);
+ bluesky_cloudlog_unref(inode->committed_item);
+ inode->committed_item = log_item;
+ g_mutex_unlock(inode->lock);
+ g_mutex_unlock(fs->lock);
+ }
+ bluesky_string_unref(log_item->data);
+ log_item->data = NULL;
+ g_mutex_unlock(log_item->lock);
+
+ offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
+ }
+}
+
+void bluesky_replay(BlueSkyFS *fs)
+{
+ BlueSkyLog *log = fs->log;
+ GList *logfiles = directory_contents(log->log_directory);
+
+ /* Scan through log files in reverse order to find the most recent commit
+ * record. */
+ logfiles = g_list_reverse(logfiles);
+ uint32_t seq_num = 0, start_offset = 0;
+ while (logfiles != NULL) {
+ char *filename = g_strdup_printf("%s/%s", log->log_directory,
+ (char *)logfiles->data);
+ g_print("Scanning file %s\n", filename);
+ GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
+ if (map == NULL) {
+ g_warning("Mapping logfile %s failed!\n", filename);
+ } else {
+ bluesky_replay_scan_journal(g_mapped_file_get_contents(map),
+ g_mapped_file_get_length(map),
+ &seq_num, &start_offset);
+ g_mapped_file_unref(map);
+ }
+ g_free(filename);
+
+ g_free(logfiles->data);
+ logfiles = g_list_delete_link(logfiles, logfiles);
+ if (seq_num != 0 || start_offset != 0)
+ break;
+ }
+ g_list_foreach(logfiles, (GFunc)g_free, NULL);
+ g_list_free(logfiles);
+
+ /* Now, scan forward starting from the given point in the log to
+ * reconstruct all filesystem state. As we reload objects we hold a
+ * reference to each loaded object. At the end we free all these
+ * references, so that any objects which were not linked into persistent
+ * filesystem data structures are freed. */
+ GList *objects = NULL;
+ while (TRUE) {
+ char *filename = g_strdup_printf("%s/journal-%08d",
+ log->log_directory, seq_num);
+ g_print("Replaying file %s from offset %d\n", filename, start_offset);
+ GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
+ g_free(filename);
+ if (map == NULL) {
+ g_warning("Mapping logfile failed, assuming end of journal\n");
+ break;
+ }
+
+ bluesky_replay_scan_journal2(fs, &objects, seq_num, start_offset,
+ g_mapped_file_get_contents(map),
+ g_mapped_file_get_length(map));
+ g_mapped_file_unref(map);
+ seq_num++;
+ start_offset = 0;
+ }
+
+ while (objects != NULL) {
+ bluesky_cloudlog_unref((BlueSkyCloudLog *)objects->data);
+ objects = g_list_delete_link(objects, objects);
+ }