1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2010 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
10 #define _ATFILE_SOURCE
18 #include <sys/types.h>
24 #include "bluesky-private.h"
26 /* The logging layer for BlueSky. This is used to write filesystem changes
27 * durably to disk so that they can be recovered in the event of a system
30 /* The logging layer takes care out writing out a sequence of log records to
31 * disk. On disk, each record consists of a header, a data payload, and a
32 * footer. The footer contains a checksum of the record, meant to help with
33 * identifying corrupt log records (we would assume because the log record was
34 * only incompletely written out before a crash, which should only happen for
35 * log records that were not considered committed). */
37 // Rough size limit for a log segment. This is not a firm limit and there are
38 // no absolute guarantees on the size of a log segment.
39 #define LOG_SEGMENT_SIZE (1 << 22)
41 #define HEADER_MAGIC 0x676f4c0a
42 #define FOOTER_MAGIC 0x2e435243
44 static void writebuf(int fd, const char *buf, size_t len)
48 written = write(fd, buf, len);
49 if (written < 0 && errno == EINTR)
51 g_assert(written >= 0);
57 static void log_commit(BlueSkyLog *log)
65 while (log->committed != NULL) {
66 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data;
67 g_mutex_lock(item->lock);
68 bluesky_cloudlog_stats_update(item, -1);
69 item->pending_write &= ~CLOUDLOG_JOURNAL;
71 = (item->location_flags & ~CLOUDLOG_UNCOMMITTED) | CLOUDLOG_JOURNAL;
72 bluesky_cloudlog_stats_update(item, 1);
73 g_cond_signal(item->cond);
74 g_mutex_unlock(item->lock);
75 log->committed = g_slist_delete_link(log->committed, log->committed);
76 bluesky_cloudlog_unref(item);
80 if (bluesky_verbose && batchsize > 1)
81 g_print("Log batch size: %d\n", batchsize);
84 static gboolean log_open(BlueSkyLog *log)
95 if (log->current_log != NULL) {
96 bluesky_cachefile_unref(log->current_log);
97 log->current_log = NULL;
100 while (log->fd < 0) {
101 g_snprintf(logname, sizeof(logname), "journal-%08d", log->seq_num);
102 log->fd = openat(log->dirfd, logname, O_CREAT|O_WRONLY|O_EXCL, 0600);
103 if (log->fd < 0 && errno == EEXIST) {
104 fprintf(stderr, "Log file %s already exists...\n", logname);
107 } else if (log->fd < 0) {
108 fprintf(stderr, "Error opening logfile %s: %m\n", logname);
113 log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num);
114 g_assert(log->current_log != NULL);
115 g_mutex_unlock(log->current_log->lock);
117 if (ftruncate(log->fd, LOG_SEGMENT_SIZE) < 0) {
118 fprintf(stderr, "Unable to truncate logfile %s: %m\n", logname);
125 /* All log writes (at least for a single log) are made by one thread, so we
126 * don't need to worry about concurrent access to the log file. Log items to
127 * write are pulled off a queue (and so may be posted by any thread).
128 * fdatasync() is used to ensure the log items are stable on disk.
130 * The log is broken up into separate files, roughly of size LOG_SEGMENT_SIZE
131 * each. If a log segment is not currently open (log->fd is negative), a new
132 * one is created. Log segment filenames are assigned sequentially.
134 * Log replay ought to be implemented later, and ought to set the initial
135 * sequence number appropriately.
137 static gpointer log_thread(gpointer d)
139 BlueSkyLog *log = (BlueSkyLog *)d;
143 if (!log_open(log)) {
148 BlueSkyCloudLog *item
149 = (BlueSkyCloudLog *)g_async_queue_pop(log->queue);
150 g_mutex_lock(item->lock);
151 g_assert(item->data != NULL);
153 /* The item may have already been written to the journal... */
154 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
155 g_mutex_unlock(item->lock);
156 bluesky_cloudlog_unref(item);
157 g_atomic_int_add(&item->data_lock_count, -1);
161 bluesky_cloudlog_stats_update(item, -1);
162 item->pending_write |= CLOUDLOG_JOURNAL;
163 bluesky_cloudlog_stats_update(item, 1);
165 GString *data1 = g_string_new("");
166 GString *data2 = g_string_new("");
167 GString *data3 = g_string_new("");
168 bluesky_serialize_cloudlog(item, data1, data2, data3);
170 struct log_header header;
171 struct log_footer footer;
172 size_t size = sizeof(header) + sizeof(footer);
173 size += data1->len + data2->len + data3->len;
176 offset = lseek(log->fd, 0, SEEK_CUR);
178 /* Check whether the item would overflow the allocated journal size.
179 * If so, start a new log segment. We only allow oversized log
180 * segments if they contain a single log entry. */
181 if (offset + size >= LOG_SEGMENT_SIZE && offset > 0) {
186 header.magic = GUINT32_TO_LE(HEADER_MAGIC);
187 header.offset = GUINT32_TO_LE(offset);
188 header.size1 = GUINT32_TO_LE(data1->len);
189 header.size2 = GUINT32_TO_LE(data2->len);
190 header.size3 = GUINT32_TO_LE(data3->len);
191 header.type = item->type + '0';
192 header.id = item->id;
193 header.inum = GUINT64_TO_LE(item->inum);
194 footer.magic = GUINT32_TO_LE(FOOTER_MAGIC);
196 uint32_t crc = BLUESKY_CRC32C_SEED;
198 writebuf(log->fd, (const char *)&header, sizeof(header));
199 crc = crc32c(crc, (const char *)&header, sizeof(header));
201 writebuf(log->fd, data1->str, data1->len);
202 crc = crc32c(crc, data1->str, data1->len);
203 writebuf(log->fd, data2->str, data2->len);
204 crc = crc32c(crc, data2->str, data2->len);
205 writebuf(log->fd, data3->str, data3->len);
206 crc = crc32c(crc, data3->str, data3->len);
208 crc = crc32c(crc, (const char *)&footer,
209 sizeof(footer) - sizeof(uint32_t));
210 footer.crc = crc32c_finalize(crc);
211 writebuf(log->fd, (const char *)&footer, sizeof(footer));
213 item->log_seq = log->seq_num;
214 item->log_offset = offset;
215 item->log_size = size;
216 item->data_size = item->data->len;
220 g_string_free(data1, TRUE);
221 g_string_free(data2, TRUE);
222 g_string_free(data3, TRUE);
224 /* Replace the log item's string data with a memory-mapped copy of the
225 * data, now that it has been written to the log file. (Even if it
226 * isn't yet on disk, it should at least be in the page cache and so
227 * available to memory map.) */
228 bluesky_string_unref(item->data);
230 bluesky_cloudlog_fetch(item);
232 log->committed = g_slist_prepend(log->committed, item);
233 g_atomic_int_add(&item->data_lock_count, -1);
234 g_mutex_unlock(item->lock);
236 /* Force an if there are no other log items currently waiting to be
238 if (g_async_queue_length(log->queue) <= 0)
245 BlueSkyLog *bluesky_log_new(const char *log_directory)
247 BlueSkyLog *log = g_new0(BlueSkyLog, 1);
249 log->log_directory = g_strdup(log_directory);
252 log->queue = g_async_queue_new();
253 log->mmap_lock = g_mutex_new();
254 log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal);
256 /* Determine the highest-numbered log file, so that we can start writing
257 * out new journal entries at the next sequence number. */
258 GDir *dir = g_dir_open(log_directory, 0, NULL);
261 while ((file = g_dir_read_name(dir)) != NULL) {
262 if (strncmp(file, "journal-", 8) == 0) {
263 log->seq_num = MAX(log->seq_num, atoi(&file[8]) + 1);
267 g_print("Starting journal at sequence number %d\n", log->seq_num);
270 log->dirfd = open(log->log_directory, O_DIRECTORY);
271 if (log->dirfd < 0) {
272 fprintf(stderr, "Unable to open logging directory: %m\n");
276 g_thread_create(log_thread, log, FALSE, NULL);
281 void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log)
283 if (!(item->location_flags & CLOUDLOG_JOURNAL)) {
284 bluesky_cloudlog_ref(item);
285 item->location_flags |= CLOUDLOG_UNCOMMITTED;
286 g_atomic_int_add(&item->data_lock_count, 1);
287 g_async_queue_push(log->queue, item);
291 void bluesky_log_finish_all(GList *log_items)
293 while (log_items != NULL) {
294 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log_items->data;
296 g_mutex_lock(item->lock);
297 while ((item->location_flags & CLOUDLOG_UNCOMMITTED))
298 g_cond_wait(item->cond, item->lock);
299 g_mutex_unlock(item->lock);
300 bluesky_cloudlog_unref(item);
302 log_items = g_list_delete_link(log_items, log_items);
306 /* Return a committed cloud log record that can be used as a watermark for how
307 * much of the journal has been written. */
308 BlueSkyCloudLog *bluesky_log_get_commit_point(BlueSkyFS *fs)
310 BlueSkyCloudLog *marker = bluesky_cloudlog_new(fs, NULL);
311 marker->type = LOGTYPE_JOURNAL_MARKER;
312 marker->data = bluesky_string_new(g_strdup(""), 0);
313 bluesky_cloudlog_stats_update(marker, 1);
314 bluesky_cloudlog_sync(marker);
316 g_mutex_lock(marker->lock);
317 while ((marker->pending_write & CLOUDLOG_JOURNAL))
318 g_cond_wait(marker->cond, marker->lock);
319 g_mutex_unlock(marker->lock);
324 void bluesky_log_write_commit_point(BlueSkyFS *fs, BlueSkyCloudLog *marker)
326 BlueSkyCloudLog *commit = bluesky_cloudlog_new(fs, NULL);
327 commit->type = LOGTYPE_JOURNAL_CHECKPOINT;
329 uint32_t seq, offset;
330 seq = GUINT32_TO_LE(marker->log_seq);
331 offset = GUINT32_TO_LE(marker->log_offset);
332 GString *loc = g_string_new("");
333 g_string_append_len(loc, (const gchar *)&seq, sizeof(seq));
334 g_string_append_len(loc, (const gchar *)&offset, sizeof(offset));
335 commit->data = bluesky_string_new_from_gstring(loc);
336 bluesky_cloudlog_stats_update(commit, 1);
337 bluesky_cloudlog_sync(commit);
339 g_mutex_lock(commit->lock);
340 while ((commit->location_flags & CLOUDLOG_UNCOMMITTED))
341 g_cond_wait(commit->cond, commit->lock);
342 g_mutex_unlock(commit->lock);
344 bluesky_cloudlog_unref(marker);
345 bluesky_cloudlog_unref(commit);
348 /* Memory-map the given log object into memory (read-only) and return a pointer
350 static int page_size = 0;
352 void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile)
354 g_atomic_int_add(&cachefile->refcount, -1);
357 static void cloudlog_fetch_complete(BlueSkyStoreAsync *async,
358 BlueSkyCacheFile *cachefile);
360 static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile)
362 g_atomic_int_inc(&cachefile->refcount);
363 cachefile->fetching = TRUE;
364 g_print("Starting fetch of %s from cloud\n", cachefile->filename);
365 BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
366 async->op = STORE_OP_GET;
367 async->key = g_strdup(cachefile->filename);
368 bluesky_store_async_add_notifier(async,
369 (GFunc)cloudlog_fetch_complete,
371 bluesky_store_async_submit(async);
372 bluesky_store_async_unref(async);
375 static void cloudlog_fetch_complete(BlueSkyStoreAsync *async,
376 BlueSkyCacheFile *cachefile)
378 g_print("Fetch of %s from cloud complete, status = %d\n",
379 async->key, async->result);
381 g_mutex_lock(cachefile->lock);
382 if (async->result >= 0) {
383 char *pathname = g_strdup_printf("%s/%s",
384 cachefile->log->log_directory,
385 cachefile->filename);
386 async->data = bluesky_string_dup(async->data);
387 bluesky_cloudlog_decrypt(async->data->data, async->data->len,
388 cachefile->fs->keys, cachefile->items);
389 if (!g_file_set_contents(pathname, async->data->data, async->data->len,
391 g_print("Error writing out fetched file to cache!\n");
394 cachefile->fetching = FALSE;
395 cachefile->ready = TRUE;
397 g_print("Error fetching from cloud, retrying...\n");
398 cloudlog_fetch_start(cachefile);
401 bluesky_cachefile_unref(cachefile);
402 g_cond_broadcast(cachefile->cond);
403 g_mutex_unlock(cachefile->lock);
406 /* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
407 * Returns the object in the locked state and with a reference taken. */
408 BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
409 int clouddir, int log_seq)
411 if (page_size == 0) {
412 page_size = getpagesize();
415 BlueSkyLog *log = fs->log;
421 // A request for a local log file
423 sprintf(logname, "journal-%08d", log_seq);
424 type = CLOUDLOG_JOURNAL;
426 sprintf(logname, "log-%08d-%08d", clouddir, log_seq);
427 type = CLOUDLOG_CLOUD;
430 BlueSkyCacheFile *map;
431 g_mutex_lock(log->mmap_lock);
432 map = g_hash_table_lookup(log->mmap_cache, logname);
435 && type == CLOUDLOG_JOURNAL
436 && fstatat(log->dirfd, logname, &statbuf, 0) < 0) {
437 /* A stale reference to a journal file which doesn't exist any longer
438 * because it was reclaimed. Return NULL. */
439 } else if (map == NULL) {
440 g_print("Adding cache file %s\n", logname);
442 map = g_new0(BlueSkyCacheFile, 1);
445 map->lock = g_mutex_new();
447 g_mutex_lock(map->lock);
448 map->cond = g_cond_new();
449 map->filename = g_strdup(logname);
450 map->log_seq = log_seq;
452 g_atomic_int_set(&map->mapcount, 0);
453 g_atomic_int_set(&map->refcount, 0);
454 map->items = bluesky_rangeset_new();
456 g_hash_table_insert(log->mmap_cache, map->filename, map);
458 // If the log file is stored in the cloud, we may need to fetch it
460 cloudlog_fetch_start(map);
462 g_mutex_lock(map->lock);
465 g_mutex_unlock(log->mmap_lock);
467 g_atomic_int_inc(&map->refcount);
471 /* The arguments are mostly straightforward. log_dir is -1 for access from the
472 * journal, and non-negative for access to a cloud log segment. map_data
473 * should be TRUE for the case that are mapping just the data of an item where
474 * we have already parsed the item headers; this surpresses the error when the
475 * access is not to the first bytes of the item. */
476 BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
477 int log_seq, int log_offset, int log_size,
480 if (page_size == 0) {
481 page_size = getpagesize();
484 BlueSkyLog *log = fs->log;
485 BlueSkyCacheFile *map = bluesky_cachefile_lookup(fs, log_dir, log_seq);
491 /* Log segments fetched from the cloud might only be partially-fetched.
492 * Check whether the object we are interested in is available. */
494 const BlueSkyRangesetItem *rangeitem;
495 rangeitem = bluesky_rangeset_lookup(map->items, log_offset);
496 if (rangeitem == NULL || rangeitem->start != log_offset) {
497 g_warning("log-%d: Item at offset %d does not seem to be available\n", log_seq, log_offset);
499 if (map_data && rangeitem != NULL
500 && log_offset > rangeitem->start
501 && log_size <= rangeitem->length - (log_offset - rangeitem->start))
503 g_warning(" ...allowing access to middle of log item");
507 if (map->addr == NULL) {
508 while (!map->ready && map->fetching) {
509 g_print("Waiting for log segment to be fetched from cloud...\n");
510 g_cond_wait(map->cond, map->lock);
513 int fd = openat(log->dirfd, map->filename, O_RDONLY);
516 fprintf(stderr, "Error opening logfile %s: %m\n", map->filename);
517 bluesky_cachefile_unref(map);
518 g_mutex_unlock(map->lock);
522 off_t length = lseek(fd, 0, SEEK_END);
523 map->addr = (const char *)mmap(NULL, length, PROT_READ, MAP_SHARED,
525 g_atomic_int_add(&log->disk_used, -(map->len / 1024));
527 g_atomic_int_add(&log->disk_used, map->len / 1024);
529 g_print("Re-mapped log segment %d...\n", log_seq);
530 g_atomic_int_inc(&map->refcount);
536 map->atime = bluesky_get_current_time();
537 str = bluesky_string_new_from_mmap(map, log_offset, log_size);
538 bluesky_cachefile_unref(map);
539 g_mutex_unlock(map->lock);
543 void bluesky_mmap_unref(BlueSkyCacheFile *mmap)
548 if (g_atomic_int_dec_and_test(&mmap->mapcount)) {
549 g_mutex_lock(mmap->lock);
550 if (g_atomic_int_get(&mmap->mapcount) == 0) {
551 g_print("Unmapped log segment %d...\n", mmap->log_seq);
552 munmap((void *)mmap->addr, mmap->len);
554 g_atomic_int_add(&mmap->refcount, -1);
556 g_mutex_unlock(mmap->lock);
560 /******************************* JOURNAL REPLAY *******************************
561 * The journal replay code is used to recover filesystem state after a
562 * filesystem restart. We first look for the most recent commit record in the
563 * journal, which indicates the point before which all data in the journal has
564 * also been committed to the cloud. Then, we read in all data in the log past
567 static GList *directory_contents(const char *dirname)
569 GList *contents = NULL;
570 GDir *dir = g_dir_open(dirname, 0, NULL);
572 g_warning("Unable to open journal directory: %s", dirname);
577 while ((file = g_dir_read_name(dir)) != NULL) {
578 if (strncmp(file, "journal-", 8) == 0)
579 contents = g_list_prepend(contents, g_strdup(file));
583 contents = g_list_sort(contents, (GCompareFunc)strcmp);
588 static gboolean validate_journal_item(const char *buf, size_t len, off_t offset)
590 const struct log_header *header;
591 const struct log_footer *footer;
593 if (offset + sizeof(struct log_header) + sizeof(struct log_footer) > len)
596 header = (const struct log_header *)(buf + offset);
597 if (GUINT32_FROM_LE(header->magic) != HEADER_MAGIC)
599 if (GUINT32_FROM_LE(header->offset) != offset)
601 size_t size = GUINT32_FROM_LE(header->size1)
602 + GUINT32_FROM_LE(header->size2)
603 + GUINT32_FROM_LE(header->size3);
605 off_t footer_offset = offset + sizeof(struct log_header) + size;
606 if (footer_offset + sizeof(struct log_footer) > len)
608 footer = (const struct log_footer *)(buf + footer_offset);
610 if (GUINT32_FROM_LE(footer->magic) != FOOTER_MAGIC)
613 uint32_t crc = crc32c(BLUESKY_CRC32C_SEED, buf + offset,
614 sizeof(struct log_header) + sizeof(struct log_footer)
616 if (crc != BLUESKY_CRC32C_VALIDATOR) {
617 g_warning("Journal entry failed to validate: CRC %08x != %08x",
618 crc, BLUESKY_CRC32C_VALIDATOR);
625 /* Scan through a journal segment to extract correctly-written items (those
626 * that pass sanity checks and have a valid checksum). */
627 static void bluesky_replay_scan_journal(const char *buf, size_t len,
628 uint32_t *seq, uint32_t *start_offset)
630 const struct log_header *header;
633 while (validate_journal_item(buf, len, offset)) {
634 header = (const struct log_header *)(buf + offset);
635 size_t size = GUINT32_FROM_LE(header->size1)
636 + GUINT32_FROM_LE(header->size2)
637 + GUINT32_FROM_LE(header->size3);
639 if (header->type - '0' == LOGTYPE_JOURNAL_CHECKPOINT) {
640 const uint32_t *data = (const uint32_t *)((const char *)header + sizeof(struct log_header));
641 *seq = GUINT32_FROM_LE(data[0]);
642 *start_offset = GUINT32_FROM_LE(data[1]);
645 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
649 static void reload_item(BlueSkyCloudLog *log_item,
651 size_t len1, size_t len2, size_t len3)
653 BlueSkyFS *fs = log_item->fs;
654 /*const char *data1 = data;*/
655 const BlueSkyCloudID *data2
656 = (const BlueSkyCloudID *)(data + len1);
657 /*const BlueSkyCloudPointer *data3
658 = (const BlueSkyCloudPointer *)(data + len1 + len2);*/
660 bluesky_cloudlog_stats_update(log_item, -1);
661 bluesky_string_unref(log_item->data);
662 log_item->data = NULL;
663 log_item->location_flags = CLOUDLOG_JOURNAL;
664 bluesky_cloudlog_stats_update(log_item, 1);
667 memset(&id0, 0, sizeof(id0));
669 int link_count = len2 / sizeof(BlueSkyCloudID);
670 GArray *new_links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
671 for (int i = 0; i < link_count; i++) {
672 BlueSkyCloudID id = data2[i];
673 BlueSkyCloudLog *ref = NULL;
674 if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) != 0) {
675 g_mutex_lock(fs->lock);
676 ref = g_hash_table_lookup(fs->locations, &id);
678 bluesky_cloudlog_ref(ref);
680 g_mutex_unlock(fs->lock);
682 g_array_append_val(new_links, ref);
685 for (int i = 0; i < log_item->links->len; i++) {
686 BlueSkyCloudLog *c = g_array_index(log_item->links,
687 BlueSkyCloudLog *, i);
688 bluesky_cloudlog_unref(c);
690 g_array_unref(log_item->links);
691 log_item->links = new_links;
694 static void bluesky_replay_scan_journal2(BlueSkyFS *fs, GList **objects,
695 int log_seq, int start_offset,
696 const char *buf, size_t len)
698 const struct log_header *header;
699 off_t offset = start_offset;
701 while (validate_journal_item(buf, len, offset)) {
702 header = (const struct log_header *)(buf + offset);
703 g_print("In replay found valid item at offset %zd\n", offset);
704 size_t size = GUINT32_FROM_LE(header->size1)
705 + GUINT32_FROM_LE(header->size2)
706 + GUINT32_FROM_LE(header->size3);
708 BlueSkyCloudLog *log_item = bluesky_cloudlog_get(fs, header->id);
709 g_mutex_lock(log_item->lock);
710 *objects = g_list_prepend(*objects, log_item);
712 log_item->inum = GUINT64_FROM_LE(header->inum);
713 reload_item(log_item, buf + offset + sizeof(struct log_header),
714 GUINT32_FROM_LE(header->size1),
715 GUINT32_FROM_LE(header->size2),
716 GUINT32_FROM_LE(header->size3));
717 log_item->log_seq = log_seq;
718 log_item->log_offset = offset + sizeof(struct log_header);
719 log_item->log_size = header->size1;
721 bluesky_string_unref(log_item->data);
722 log_item->data = bluesky_string_new(g_memdup(buf + offset + sizeof(struct log_header), GUINT32_FROM_LE(header->size1)), GUINT32_FROM_LE(header->size1));
724 /* For any inodes which were read from the journal, deserialize the
725 * inode information, overwriting any old inode data. */
726 if (header->type - '0' == LOGTYPE_INODE) {
727 uint64_t inum = GUINT64_FROM_LE(header->inum);
729 g_mutex_lock(fs->lock);
730 inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum);
732 inode = bluesky_new_inode(inum, fs, BLUESKY_PENDING);
733 inode->change_count = 0;
734 bluesky_insert_inode(fs, inode);
736 g_mutex_lock(inode->lock);
737 bluesky_inode_free_resources(inode);
738 if (!bluesky_deserialize_inode(inode, log_item))
739 g_print("Error deserializing inode %"PRIu64"\n", inum);
740 fs->next_inum = MAX(fs->next_inum, inum + 1);
741 bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
742 inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
743 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
744 inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
745 bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
746 inode->unlogged_list = NULL;
747 inode->change_cloud = inode->change_commit;
748 bluesky_cloudlog_ref(log_item);
749 bluesky_cloudlog_unref(inode->committed_item);
750 inode->committed_item = log_item;
751 g_mutex_unlock(inode->lock);
752 g_mutex_unlock(fs->lock);
754 bluesky_string_unref(log_item->data);
755 log_item->data = NULL;
756 g_mutex_unlock(log_item->lock);
758 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
762 void bluesky_replay(BlueSkyFS *fs)
764 BlueSkyLog *log = fs->log;
765 GList *logfiles = directory_contents(log->log_directory);
767 /* Scan through log files in reverse order to find the most recent commit
769 logfiles = g_list_reverse(logfiles);
770 uint32_t seq_num = 0, start_offset = 0;
771 while (logfiles != NULL) {
772 char *filename = g_strdup_printf("%s/%s", log->log_directory,
773 (char *)logfiles->data);
774 g_print("Scanning file %s\n", filename);
775 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
777 g_warning("Mapping logfile %s failed!\n", filename);
779 bluesky_replay_scan_journal(g_mapped_file_get_contents(map),
780 g_mapped_file_get_length(map),
781 &seq_num, &start_offset);
782 g_mapped_file_unref(map);
786 g_free(logfiles->data);
787 logfiles = g_list_delete_link(logfiles, logfiles);
788 if (seq_num != 0 || start_offset != 0)
791 g_list_foreach(logfiles, (GFunc)g_free, NULL);
792 g_list_free(logfiles);
794 /* Now, scan forward starting from the given point in the log to
795 * reconstruct all filesystem state. As we reload objects we hold a
796 * reference to each loaded object. At the end we free all these
797 * references, so that any objects which were not linked into persistent
798 * filesystem data structures are freed. */
799 GList *objects = NULL;
801 char *filename = g_strdup_printf("%s/journal-%08d",
802 log->log_directory, seq_num);
803 g_print("Replaying file %s from offset %d\n", filename, start_offset);
804 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
807 g_warning("Mapping logfile failed, assuming end of journal\n");
811 bluesky_replay_scan_journal2(fs, &objects, seq_num, start_offset,
812 g_mapped_file_get_contents(map),
813 g_mapped_file_get_length(map));
814 g_mapped_file_unref(map);
819 while (objects != NULL) {
820 bluesky_cloudlog_unref((BlueSkyCloudLog *)objects->data);
821 objects = g_list_delete_link(objects, objects);