X-Git-Url: http://git.vrable.net/?a=blobdiff_plain;f=bluesky%2Flog.c;h=a3acf104239fe56e7a24b5d44c39d94d1aac5dc0;hb=d27b934a06369794d21a3eeaf86c55f942518956;hp=c3d0c2b77e490e541d518b34051e16671c89dd0d;hpb=7298b7a416aed5be1b82b54015c6944b9379eee6;p=bluesky.git diff --git a/bluesky/log.c b/bluesky/log.c index c3d0c2b..a3acf10 100644 --- a/bluesky/log.c +++ b/bluesky/log.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "bluesky-private.h" @@ -35,7 +36,24 @@ // Rough size limit for a log segment. This is not a firm limit and there are // no absolute guarantees on the size of a log segment. -#define LOG_SEGMENT_SIZE (1 << 23) +#define LOG_SEGMENT_SIZE (1 << 22) + +#define HEADER_MAGIC 0x676f4c0a +#define FOOTER_MAGIC 0x2e435243 + +struct log_header { + uint32_t magic; // HEADER_MAGIC + uint8_t type; // Object type + '0' + uint32_t offset; // Starting byte offset of the log header + uint32_t size; // Size of the data item (bytes) + uint64_t inum; // Inode which owns this data, if any + BlueSkyCloudID id; // Object identifier +} __attribute__((packed)); + +struct log_footer { + uint32_t magic; // FOOTER_MAGIC + uint32_t crc; // Computed from log_header to log_footer.magic +} __attribute__((packed)); static void writebuf(int fd, const char *buf, size_t len) { @@ -50,6 +68,73 @@ static void writebuf(int fd, const char *buf, size_t len) } } +static void log_commit(BlueSkyLog *log) +{ + int batchsize = 0; + + if (log->fd < 0) + return; + + fdatasync(log->fd); + while (log->committed != NULL) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data; + g_mutex_lock(item->lock); + bluesky_cloudlog_stats_update(item, -1); + item->pending_write &= ~CLOUDLOG_JOURNAL; + item->location_flags |= CLOUDLOG_JOURNAL; + bluesky_cloudlog_stats_update(item, 1); + g_cond_signal(item->cond); + g_mutex_unlock(item->lock); + log->committed = g_slist_delete_link(log->committed, log->committed); + bluesky_cloudlog_unref(item); + batchsize++; + } + + if (bluesky_verbose && batchsize > 1) + g_print("Log batch size: %d\n", batchsize); +} + +static gboolean log_open(BlueSkyLog *log) +{ + char logname[64]; + + if (log->fd >= 0) { + log_commit(log); + close(log->fd); + log->seq_num++; + log->fd = -1; + } + + if (log->current_log != NULL) { + bluesky_cachefile_unref(log->current_log); + log->current_log = NULL; + } + + while (log->fd < 0) { + g_snprintf(logname, sizeof(logname), "journal-%08d", log->seq_num); + log->fd = openat(log->dirfd, logname, O_CREAT|O_WRONLY|O_EXCL, 0600); + if (log->fd < 0 && errno == EEXIST) { + fprintf(stderr, "Log file %s already exists...\n", logname); + log->seq_num++; + continue; + } else if (log->fd < 0) { + fprintf(stderr, "Error opening logfile %s: %m\n", logname); + return FALSE; + } + } + + log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num); + g_assert(log->current_log != NULL); + g_mutex_unlock(log->current_log->lock); + + if (ftruncate(log->fd, LOG_SEGMENT_SIZE) < 0) { + fprintf(stderr, "Unable to truncate logfile %s: %m\n", logname); + } + fsync(log->fd); + fsync(log->dirfd); + return TRUE; +} + /* All log writes (at least for a single log) are made by one thread, so we * don't need to worry about concurrent access to the log file. Log items to * write are pulled off a queue (and so may be posted by any thread). @@ -66,67 +151,88 @@ static gpointer log_thread(gpointer d) { BlueSkyLog *log = (BlueSkyLog *)d; - /* If there are multiple log items to write, we may write more than one - * before calling fsync(). The committed list is used to track all the - * items that should be marked as committed once that final fsync() is - * done. */ - GSList *committed = NULL; - - int dirfd = open(log->log_directory, O_DIRECTORY); - if (dirfd < 0) { - fprintf(stderr, "Unable to open logging directory: %m\n"); - return NULL; - } - while (TRUE) { if (log->fd < 0) { - char logfile[64]; - g_snprintf(logfile, sizeof(logfile), "log-%08d", log->seq_num); - log->fd = openat(dirfd, logfile, O_CREAT|O_WRONLY|O_EXCL, 0600); - if (log->fd < 0 && errno == EEXIST) { - fprintf(stderr, "Log file %s already exists...\n", logfile); - log->seq_num++; - continue; - } else if (log->fd < 0) { - fprintf(stderr, "Error opening logfile %s: %m\n", logfile); + if (!log_open(log)) { return NULL; } - fsync(log->fd); - fsync(dirfd); } - BlueSkyLogItem *item = (BlueSkyLogItem *)g_async_queue_pop(log->queue); + BlueSkyCloudLog *item + = (BlueSkyCloudLog *)g_async_queue_pop(log->queue); g_mutex_lock(item->lock); - writebuf(log->fd, item->key, strlen(item->key)); - writebuf(log->fd, item->data->data, item->data->len); - committed = g_slist_prepend(committed, item); - - /* Force an fsync either if we will be closing this log segment and - * opening a new file, or if there are no other log items currently - * waiting to be written. */ - off_t logsize = lseek(log->fd, 0, SEEK_CUR); - if (logsize >= LOG_SEGMENT_SIZE - || g_async_queue_length(log->queue) <= 0) - { - int batchsize = 0; - fdatasync(log->fd); - while (committed != NULL) { - item = (BlueSkyLogItem *)committed->data; - item->committed = TRUE; - g_cond_signal(item->cond); - g_mutex_unlock(item->lock); - committed = g_slist_delete_link(committed, committed); - batchsize++; - } - /* if (batchsize > 1) - g_print("Log batch size: %d\n", batchsize); */ + g_assert(item->data != NULL); + + /* The item may have already been written to the journal... */ + if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) { + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); + g_atomic_int_add(&item->data_lock_count, -1); + continue; } - if (logsize < 0 || logsize >= LOG_SEGMENT_SIZE) { - close(log->fd); - log->fd = -1; - log->seq_num++; + bluesky_cloudlog_stats_update(item, -1); + item->pending_write |= CLOUDLOG_JOURNAL; + bluesky_cloudlog_stats_update(item, 1); + + struct log_header header; + struct log_footer footer; + size_t size = sizeof(header) + sizeof(footer) + item->data->len; + off_t offset = 0; + if (log->fd >= 0) + offset = lseek(log->fd, 0, SEEK_CUR); + + /* Check whether the item would overflow the allocated journal size. + * If so, start a new log segment. We only allow oversized log + * segments if they contain a single log entry. */ + if (offset + size >= LOG_SEGMENT_SIZE && offset > 0) { + log_open(log); + offset = 0; } + + header.magic = GUINT32_TO_LE(HEADER_MAGIC); + header.offset = GUINT32_TO_LE(offset); + header.size = GUINT32_TO_LE(item->data->len); + header.type = item->type + '0'; + header.id = item->id; + header.inum = GUINT64_TO_LE(item->inum); + footer.magic = GUINT32_TO_LE(FOOTER_MAGIC); + + uint32_t crc = BLUESKY_CRC32C_SEED; + + writebuf(log->fd, (const char *)&header, sizeof(header)); + crc = crc32c(crc, (const char *)&header, sizeof(header)); + + writebuf(log->fd, item->data->data, item->data->len); + crc = crc32c(crc, item->data->data, item->data->len); + + crc = crc32c(crc, (const char *)&footer, + sizeof(footer) - sizeof(uint32_t)); + footer.crc = crc32c_finalize(crc); + writebuf(log->fd, (const char *)&footer, sizeof(footer)); + + item->log_seq = log->seq_num; + item->log_offset = offset + sizeof(header); + item->log_size = item->data->len; + + offset += sizeof(header) + sizeof(footer) + item->data->len; + + /* Replace the log item's string data with a memory-mapped copy of the + * data, now that it has been written to the log file. (Even if it + * isn't yet on disk, it should at least be in the page cache and so + * available to memory map.) */ + bluesky_string_unref(item->data); + item->data = NULL; + bluesky_cloudlog_fetch(item); + + log->committed = g_slist_prepend(log->committed, item); + g_atomic_int_add(&item->data_lock_count, -1); + g_mutex_unlock(item->lock); + + /* Force an if there are no other log items currently waiting to be + * written. */ + if (g_async_queue_length(log->queue) <= 0) + log_commit(log); } return NULL; @@ -140,42 +246,492 @@ BlueSkyLog *bluesky_log_new(const char *log_directory) log->fd = -1; log->seq_num = 0; log->queue = g_async_queue_new(); + log->mmap_lock = g_mutex_new(); + log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal); + + log->dirfd = open(log->log_directory, O_DIRECTORY); + if (log->dirfd < 0) { + fprintf(stderr, "Unable to open logging directory: %m\n"); + return NULL; + } g_thread_create(log_thread, log, FALSE, NULL); return log; } -BlueSkyLogItem *bluesky_log_item_new() +void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log) { - BlueSkyLogItem *item = g_new(BlueSkyLogItem, 1); - item->committed = FALSE; - item->lock = g_mutex_new(); - item->cond = g_cond_new(); - item->key = NULL; - item->data = NULL; - return item; + bluesky_cloudlog_ref(item); + g_atomic_int_add(&item->data_lock_count, 1); + g_async_queue_push(log->queue, item); } -void bluesky_log_item_submit(BlueSkyLogItem *item, BlueSkyLog *log) +void bluesky_log_finish_all(GList *log_items) { - g_async_queue_push(log->queue, item); + while (log_items != NULL) { + BlueSkyCloudLog *item = (BlueSkyCloudLog *)log_items->data; + + g_mutex_lock(item->lock); + while ((item->pending_write & CLOUDLOG_JOURNAL)) + g_cond_wait(item->cond, item->lock); + g_mutex_unlock(item->lock); + bluesky_cloudlog_unref(item); + + log_items = g_list_delete_link(log_items, log_items); + } } -static void bluesky_log_item_free(BlueSkyLogItem *item) +/* Memory-map the given log object into memory (read-only) and return a pointer + * to it. */ +static int page_size = 0; + +void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile) { - g_free(item->key); - bluesky_string_unref(item->data); - g_mutex_free(item->lock); - g_cond_free(item->cond); - g_free(item); + g_atomic_int_add(&cachefile->refcount, -1); } -void bluesky_log_item_finish(BlueSkyLogItem *item) +static void cloudlog_fetch_complete(BlueSkyStoreAsync *async, + BlueSkyCacheFile *cachefile); + +static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile) { - g_mutex_lock(item->lock); - while (!item->committed) - g_cond_wait(item->cond, item->lock); - g_mutex_unlock(item->lock); - bluesky_log_item_free(item); + g_atomic_int_inc(&cachefile->refcount); + cachefile->fetching = TRUE; + g_print("Starting fetch of %s from cloud\n", cachefile->filename); + BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store); + async->op = STORE_OP_GET; + async->key = g_strdup(cachefile->filename); + bluesky_store_async_add_notifier(async, + (GFunc)cloudlog_fetch_complete, + cachefile); + bluesky_store_async_submit(async); + bluesky_store_async_unref(async); +} + +static void cloudlog_fetch_complete(BlueSkyStoreAsync *async, + BlueSkyCacheFile *cachefile) +{ + g_print("Fetch of %s from cloud complete, status = %d\n", + async->key, async->result); + + g_mutex_lock(cachefile->lock); + if (async->result >= 0) { + char *pathname = g_strdup_printf("%s/%s", + cachefile->log->log_directory, + cachefile->filename); + if (!g_file_set_contents(pathname, async->data->data, async->data->len, + NULL)) + g_print("Error writing out fetched file to cache!\n"); + g_free(pathname); + + cachefile->fetching = FALSE; + cachefile->ready = TRUE; + } else { + g_print("Error fetching from cloud, retrying...\n"); + cloudlog_fetch_start(cachefile); + } + + bluesky_cachefile_unref(cachefile); + g_cond_broadcast(cachefile->cond); + g_mutex_unlock(cachefile->lock); +} + +/* Find the BlueSkyCacheFile object for the given journal or cloud log segment. + * Returns the object in the locked state and with a reference taken. */ +BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs, + int clouddir, int log_seq) +{ + if (page_size == 0) { + page_size = getpagesize(); + } + + BlueSkyLog *log = fs->log; + + struct stat statbuf; + char logname[64]; + int type; + + // A request for a local log file + if (clouddir < 0) { + sprintf(logname, "journal-%08d", log_seq); + type = CLOUDLOG_JOURNAL; + } else { + sprintf(logname, "log-%08d-%08d", clouddir, log_seq); + type = CLOUDLOG_CLOUD; + } + + BlueSkyCacheFile *map; + g_mutex_lock(log->mmap_lock); + map = g_hash_table_lookup(log->mmap_cache, logname); + + if (map == NULL + && type == CLOUDLOG_JOURNAL + && fstatat(log->dirfd, logname, &statbuf, 0) < 0) { + /* A stale reference to a journal file which doesn't exist any longer + * because it was reclaimed. Return NULL. */ + } else if (map == NULL) { + g_print("Adding cache file %s\n", logname); + + map = g_new0(BlueSkyCacheFile, 1); + map->fs = fs; + map->type = type; + map->lock = g_mutex_new(); + map->type = type; + g_mutex_lock(map->lock); + map->cond = g_cond_new(); + map->filename = g_strdup(logname); + map->log_seq = log_seq; + map->log = log; + g_atomic_int_set(&map->mapcount, 0); + g_atomic_int_set(&map->refcount, 0); + + g_hash_table_insert(log->mmap_cache, map->filename, map); + + // If the log file is stored in the cloud, we may need to fetch it + if (clouddir >= 0) + cloudlog_fetch_start(map); + } else { + g_mutex_lock(map->lock); + } + + g_mutex_unlock(log->mmap_lock); + if (map != NULL) + g_atomic_int_inc(&map->refcount); + return map; +} + +BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir, + int log_seq, int log_offset, int log_size) +{ + if (page_size == 0) { + page_size = getpagesize(); + } + + BlueSkyLog *log = fs->log; + BlueSkyCacheFile *map = bluesky_cachefile_lookup(fs, log_dir, log_seq); + + if (map == NULL) { + return NULL; + } + + if (map->addr == NULL) { + while (!map->ready && map->fetching) { + g_print("Waiting for log segment to be fetched from cloud...\n"); + g_cond_wait(map->cond, map->lock); + } + + int fd = openat(log->dirfd, map->filename, O_RDONLY); + + if (fd < 0) { + fprintf(stderr, "Error opening logfile %s: %m\n", map->filename); + bluesky_cachefile_unref(map); + g_mutex_unlock(map->lock); + return NULL; + } + + off_t length = lseek(fd, 0, SEEK_END); + map->addr = (const char *)mmap(NULL, length, PROT_READ, MAP_SHARED, + fd, 0); + g_atomic_int_add(&log->disk_used, -(map->len / 1024)); + map->len = length; + g_atomic_int_add(&log->disk_used, map->len / 1024); + + g_print("Re-mapped log segment %d...\n", log_seq); + g_atomic_int_inc(&map->refcount); + + close(fd); + } + + g_mutex_unlock(log->mmap_lock); + + BlueSkyRCStr *str; + map->atime = bluesky_get_current_time(); + str = bluesky_string_new_from_mmap(map, log_offset, log_size); + bluesky_cachefile_unref(map); + g_mutex_unlock(map->lock); + return str; +} + +void bluesky_mmap_unref(BlueSkyCacheFile *mmap) +{ + if (mmap == NULL) + return; + + if (g_atomic_int_dec_and_test(&mmap->mapcount)) { + g_mutex_lock(mmap->lock); + if (g_atomic_int_get(&mmap->mapcount) == 0) { + g_print("Unmapped log segment %d...\n", mmap->log_seq); + munmap((void *)mmap->addr, mmap->len); + mmap->addr = NULL; + g_atomic_int_add(&mmap->refcount, -1); + } + g_mutex_unlock(mmap->lock); + } +} + +/* Scan through all currently-stored files in the journal/cache and garbage + * collect old unused ones, if needed. */ +static void gather_cachefiles(gpointer key, gpointer value, gpointer user_data) +{ + GList **files = (GList **)user_data; + *files = g_list_prepend(*files, value); +} + +static gint compare_cachefiles(gconstpointer a, gconstpointer b) +{ + int64_t ta, tb; + + ta = ((BlueSkyCacheFile *)a)->atime; + tb = ((BlueSkyCacheFile *)b)->atime; + if (ta < tb) + return -1; + else if (ta > tb) + return 1; + else + return 0; +} + +void bluesky_cachefile_gc(BlueSkyFS *fs) +{ + GList *files = NULL; + + g_mutex_lock(fs->log->mmap_lock); + g_hash_table_foreach(fs->log->mmap_cache, gather_cachefiles, &files); + + /* Sort based on atime. The atime should be stable since it shouln't be + * updated except by threads which can grab the mmap_lock, which we already + * hold. */ + files = g_list_sort(files, compare_cachefiles); + + /* Walk the list of files, starting with the oldest, deleting files if + * possible until enough space has been reclaimed. */ + g_print("\nScanning cache: (total size = %d kB)\n", fs->log->disk_used); + while (files != NULL) { + BlueSkyCacheFile *cachefile = (BlueSkyCacheFile *)files->data; + /* Try to lock the structure, but if the lock is held by another thread + * then we'll just skip the file on this pass. */ + if (g_mutex_trylock(cachefile->lock)) { + int64_t age = bluesky_get_current_time() - cachefile->atime; + g_print("%s addr=%p mapcount=%d refcount=%d atime_age=%f", + cachefile->filename, cachefile->addr, cachefile->mapcount, + cachefile->refcount, age / 1e6); + if (cachefile->fetching) + g_print(" (fetching)"); + g_print("\n"); + + gboolean deletion_candidate = FALSE; + if (g_atomic_int_get(&fs->log->disk_used) + > bluesky_options.cache_size + && g_atomic_int_get(&cachefile->refcount) == 0 + && g_atomic_int_get(&cachefile->mapcount) == 0) + { + deletion_candidate = TRUE; + } + + /* Don't allow journal files to be reclaimed until all data is + * known to be durably stored in the cloud. */ + if (cachefile->type == CLOUDLOG_JOURNAL + && cachefile->log_seq >= fs->log->journal_watermark) + { + deletion_candidate = FALSE; + } + + if (deletion_candidate) { + g_print(" ...deleting\n"); + if (unlinkat(fs->log->dirfd, cachefile->filename, 0) < 0) { + fprintf(stderr, "Unable to unlink journal %s: %m\n", + cachefile->filename); + } + + g_atomic_int_add(&fs->log->disk_used, -(cachefile->len / 1024)); + g_hash_table_remove(fs->log->mmap_cache, cachefile->filename); + g_mutex_unlock(cachefile->lock); + g_mutex_free(cachefile->lock); + g_cond_free(cachefile->cond); + g_free(cachefile->filename); + g_free(cachefile); + } else { + g_mutex_unlock(cachefile->lock); + } + } + files = g_list_delete_link(files, files); + } + g_list_free(files); + + g_mutex_unlock(fs->log->mmap_lock); +} + +/******************************* JOURNAL REPLAY ******************************* + * The journal replay code is used to recover filesystem state after a + * filesystem restart. We first look for the most recent commit record in the + * journal, which indicates the point before which all data in the journal has + * also been committed to the cloud. Then, we read in all data in the log past + * that point. + */ +static GList *directory_contents(const char *dirname) +{ + GList *contents = NULL; + GDir *dir = g_dir_open(dirname, 0, NULL); + if (dir == NULL) { + g_warning("Unable to open journal directory: %s", dirname); + return NULL; + } + + const gchar *file; + while ((file = g_dir_read_name(dir)) != NULL) { + if (strncmp(file, "journal-", 8) == 0) + contents = g_list_prepend(contents, g_strdup(file)); + } + g_dir_close(dir); + + contents = g_list_sort(contents, (GCompareFunc)strcmp); + + return contents; +} + +static gboolean validate_journal_item(const char *buf, size_t len, off_t offset) +{ + const struct log_header *header; + const struct log_footer *footer; + + if (offset + sizeof(struct log_header) + sizeof(struct log_footer) > len) + return FALSE; + + header = (const struct log_header *)(buf + offset); + if (GUINT32_FROM_LE(header->magic) != HEADER_MAGIC) + return FALSE; + if (GUINT32_FROM_LE(header->offset) != offset) + return FALSE; + size_t size = GUINT32_FROM_LE(header->size); + + off_t footer_offset = offset + sizeof(struct log_header) + size; + if (footer_offset + sizeof(struct log_footer) > len) + return FALSE; + footer = (const struct log_footer *)(buf + footer_offset); + + if (GUINT32_FROM_LE(footer->magic) != FOOTER_MAGIC) + return FALSE; + + uint32_t crc = crc32c(BLUESKY_CRC32C_SEED, buf + offset, + sizeof(struct log_header) + sizeof(struct log_footer) + + size); + if (crc != BLUESKY_CRC32C_VALIDATOR) { + g_warning("Journal entry failed to validate: CRC %08x != %08x", + crc, BLUESKY_CRC32C_VALIDATOR); + return FALSE; + } + + return TRUE; +} + +/* Scan through a journal segment to extract correctly-written items (those + * that pass sanity checks and have a valid checksum). */ +static void bluesky_replay_scan_journal(const char *buf, size_t len) +{ + const struct log_header *header; + off_t offset = 0; + + while (validate_journal_item(buf, len, offset)) { + header = (const struct log_header *)(buf + offset); + size_t size = GUINT32_FROM_LE(header->size); + offset += sizeof(struct log_header) + size + sizeof(struct log_footer); + } +} + +static void bluesky_replay_scan_journal2(BlueSkyFS *fs, GList **objects, + int log_seq, + const char *buf, size_t len) +{ + const struct log_header *header; + off_t offset = 0; + + while (validate_journal_item(buf, len, offset)) { + header = (const struct log_header *)(buf + offset); + g_print("In replay found valid item at offset %zd\n", offset); + size_t size = GUINT32_FROM_LE(header->size); + + g_mutex_lock(fs->lock); + BlueSkyCloudLog *log_item; + log_item = g_hash_table_lookup(fs->locations, &header->id); + if (log_item == NULL) { + log_item = bluesky_cloudlog_new(fs, &header->id); + g_hash_table_insert(fs->locations, &log_item->id, log_item); + g_mutex_lock(log_item->lock); + } else { + bluesky_cloudlog_ref(log_item); + g_mutex_lock(log_item->lock); + } + g_mutex_unlock(fs->lock); + *objects = g_list_prepend(*objects, log_item); + + bluesky_string_unref(log_item->data); + log_item->location_flags = CLOUDLOG_JOURNAL; + log_item->data = NULL; + log_item->log_seq = log_seq; + log_item->log_offset = offset + sizeof(struct log_header); + log_item->log_size = header->size; + g_mutex_unlock(log_item->lock); + + offset += sizeof(struct log_header) + size + sizeof(struct log_footer); + } +} + +void bluesky_replay(BlueSkyFS *fs) +{ + BlueSkyLog *log = fs->log; + GList *logfiles = directory_contents(log->log_directory); + + /* Scan through log files in reverse order to find the most recent commit + * record. */ + logfiles = g_list_reverse(logfiles); + while (logfiles != NULL) { + char *filename = g_strdup_printf("%s/%s", log->log_directory, + (char *)logfiles->data); + g_print("Scanning file %s\n", filename); + GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL); + if (map == NULL) { + g_warning("Mapping logfile %s failed!\n", filename); + } else { + bluesky_replay_scan_journal(g_mapped_file_get_contents(map), + g_mapped_file_get_length(map)); + g_mapped_file_unref(map); + } + g_free(filename); + + g_free(logfiles->data); + logfiles = g_list_delete_link(logfiles, logfiles); + } + g_list_foreach(logfiles, (GFunc)g_free, NULL); + g_list_free(logfiles); + + /* Now, scan forward starting from the given point in the log to + * reconstruct all filesystem state. As we reload objects we hold a + * reference to each loaded object. At the end we free all these + * references, so that any objects which were not linked into persistent + * filesystem data structures are freed. */ + GList *objects = NULL; + int seq_num = 0; + while (TRUE) { + char *filename = g_strdup_printf("%s/journal-%08d", + log->log_directory, seq_num); + g_print("Replaying file %s\n", filename); + GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL); + g_free(filename); + if (map == NULL) { + g_warning("Mapping logfile failed, assuming end of journal\n"); + break; + } + + bluesky_replay_scan_journal2(fs, &objects, seq_num, + g_mapped_file_get_contents(map), + g_mapped_file_get_length(map)); + g_mapped_file_unref(map); + seq_num++; + } + + while (objects != NULL) { + bluesky_cloudlog_unref((BlueSkyCloudLog *)objects->data); + objects = g_list_delete_link(objects, objects); + } }