1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2010 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
10 #define _ATFILE_SOURCE
18 #include <sys/types.h>
24 #include "bluesky-private.h"
26 /* The logging layer for BlueSky. This is used to write filesystem changes
27 * durably to disk so that they can be recovered in the event of a system
30 /* The logging layer takes care out writing out a sequence of log records to
31 * disk. On disk, each record consists of a header, a data payload, and a
32 * footer. The footer contains a checksum of the record, meant to help with
33 * identifying corrupt log records (we would assume because the log record was
34 * only incompletely written out before a crash, which should only happen for
35 * log records that were not considered committed). */
37 // Rough size limit for a log segment. This is not a firm limit and there are
38 // no absolute guarantees on the size of a log segment.
39 #define LOG_SEGMENT_SIZE (1 << 22)
41 #define HEADER_MAGIC 0x676f4c0a
42 #define FOOTER_MAGIC 0x2e435243
44 static void writebuf(int fd, const char *buf, size_t len)
48 written = write(fd, buf, len);
49 if (written < 0 && errno == EINTR)
51 g_assert(written >= 0);
57 static void log_commit(BlueSkyLog *log)
66 /* Update disk-space usage statistics for the journal file. */
67 g_atomic_int_add(&log->disk_used, -log->current_log->disk_used);
69 if (fstat(log->fd, &statbuf) >= 0) {
70 /* Convert from 512-byte blocks to 1-kB units */
71 log->current_log->disk_used = (statbuf.st_blocks + 1) / 2;
73 g_atomic_int_add(&log->disk_used, log->current_log->disk_used);
75 while (log->committed != NULL) {
76 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data;
77 g_mutex_lock(item->lock);
78 bluesky_cloudlog_stats_update(item, -1);
79 item->pending_write &= ~CLOUDLOG_JOURNAL;
81 = (item->location_flags & ~CLOUDLOG_UNCOMMITTED) | CLOUDLOG_JOURNAL;
82 bluesky_cloudlog_stats_update(item, 1);
83 g_cond_signal(item->cond);
84 g_mutex_unlock(item->lock);
85 log->committed = g_slist_delete_link(log->committed, log->committed);
86 bluesky_cloudlog_unref(item);
90 if (bluesky_verbose && batchsize > 1)
91 g_print("Log batch size: %d\n", batchsize);
94 static gboolean log_open(BlueSkyLog *log)
105 if (log->current_log != NULL) {
106 bluesky_cachefile_unref(log->current_log);
107 log->current_log = NULL;
110 while (log->fd < 0) {
111 g_snprintf(logname, sizeof(logname), "journal-%08d", log->seq_num);
112 log->fd = openat(log->dirfd, logname, O_CREAT|O_WRONLY|O_EXCL, 0600);
113 if (log->fd < 0 && errno == EEXIST) {
114 fprintf(stderr, "Log file %s already exists...\n", logname);
117 } else if (log->fd < 0) {
118 fprintf(stderr, "Error opening logfile %s: %m\n", logname);
123 log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num,
125 g_assert(log->current_log != NULL);
126 g_mutex_unlock(log->current_log->lock);
128 if (ftruncate(log->fd, LOG_SEGMENT_SIZE) < 0) {
129 fprintf(stderr, "Unable to truncate logfile %s: %m\n", logname);
136 /* All log writes (at least for a single log) are made by one thread, so we
137 * don't need to worry about concurrent access to the log file. Log items to
138 * write are pulled off a queue (and so may be posted by any thread).
139 * fdatasync() is used to ensure the log items are stable on disk.
141 * The log is broken up into separate files, roughly of size LOG_SEGMENT_SIZE
142 * each. If a log segment is not currently open (log->fd is negative), a new
143 * one is created. Log segment filenames are assigned sequentially.
145 * Log replay ought to be implemented later, and ought to set the initial
146 * sequence number appropriately.
148 static gpointer log_thread(gpointer d)
150 BlueSkyLog *log = (BlueSkyLog *)d;
154 if (!log_open(log)) {
159 BlueSkyCloudLog *item
160 = (BlueSkyCloudLog *)g_async_queue_pop(log->queue);
161 g_mutex_lock(item->lock);
162 g_assert(item->data != NULL);
164 /* The item may have already been written to the journal... */
165 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
166 g_mutex_unlock(item->lock);
167 bluesky_cloudlog_unref(item);
168 g_atomic_int_add(&item->data_lock_count, -1);
172 bluesky_cloudlog_stats_update(item, -1);
173 item->pending_write |= CLOUDLOG_JOURNAL;
174 bluesky_cloudlog_stats_update(item, 1);
176 GString *data1 = g_string_new("");
177 GString *data2 = g_string_new("");
178 GString *data3 = g_string_new("");
179 bluesky_serialize_cloudlog(item, data1, data2, data3);
181 struct log_header header;
182 struct log_footer footer;
183 size_t size = sizeof(header) + sizeof(footer);
184 size += data1->len + data2->len + data3->len;
187 offset = lseek(log->fd, 0, SEEK_CUR);
189 /* Check whether the item would overflow the allocated journal size.
190 * If so, start a new log segment. We only allow oversized log
191 * segments if they contain a single log entry. */
192 if (offset + size >= LOG_SEGMENT_SIZE && offset > 0) {
197 header.magic = GUINT32_TO_LE(HEADER_MAGIC);
198 header.offset = GUINT32_TO_LE(offset);
199 header.size1 = GUINT32_TO_LE(data1->len);
200 header.size2 = GUINT32_TO_LE(data2->len);
201 header.size3 = GUINT32_TO_LE(data3->len);
202 header.type = item->type + '0';
203 header.id = item->id;
204 header.inum = GUINT64_TO_LE(item->inum);
205 footer.magic = GUINT32_TO_LE(FOOTER_MAGIC);
207 uint32_t crc = BLUESKY_CRC32C_SEED;
209 writebuf(log->fd, (const char *)&header, sizeof(header));
210 crc = crc32c(crc, (const char *)&header, sizeof(header));
212 writebuf(log->fd, data1->str, data1->len);
213 crc = crc32c(crc, data1->str, data1->len);
214 writebuf(log->fd, data2->str, data2->len);
215 crc = crc32c(crc, data2->str, data2->len);
216 writebuf(log->fd, data3->str, data3->len);
217 crc = crc32c(crc, data3->str, data3->len);
219 crc = crc32c(crc, (const char *)&footer,
220 sizeof(footer) - sizeof(uint32_t));
221 footer.crc = crc32c_finalize(crc);
222 writebuf(log->fd, (const char *)&footer, sizeof(footer));
224 item->log_seq = log->seq_num;
225 item->log_offset = offset;
226 item->log_size = size;
227 item->data_size = item->data->len;
231 g_string_free(data1, TRUE);
232 g_string_free(data2, TRUE);
233 g_string_free(data3, TRUE);
235 /* Replace the log item's string data with a memory-mapped copy of the
236 * data, now that it has been written to the log file. (Even if it
237 * isn't yet on disk, it should at least be in the page cache and so
238 * available to memory map.) */
239 bluesky_string_unref(item->data);
241 bluesky_cloudlog_fetch(item);
243 log->committed = g_slist_prepend(log->committed, item);
244 g_atomic_int_add(&item->data_lock_count, -1);
245 g_mutex_unlock(item->lock);
247 /* Force an if there are no other log items currently waiting to be
249 if (g_async_queue_length(log->queue) <= 0)
256 BlueSkyLog *bluesky_log_new(const char *log_directory)
258 BlueSkyLog *log = g_new0(BlueSkyLog, 1);
260 log->log_directory = g_strdup(log_directory);
263 log->queue = g_async_queue_new();
264 log->mmap_lock = g_mutex_new();
265 log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal);
267 /* Determine the highest-numbered log file, so that we can start writing
268 * out new journal entries at the next sequence number. */
269 GDir *dir = g_dir_open(log_directory, 0, NULL);
272 while ((file = g_dir_read_name(dir)) != NULL) {
273 if (strncmp(file, "journal-", 8) == 0) {
274 log->seq_num = MAX(log->seq_num, atoi(&file[8]) + 1);
278 g_print("Starting journal at sequence number %d\n", log->seq_num);
281 log->dirfd = open(log->log_directory, O_DIRECTORY);
282 if (log->dirfd < 0) {
283 fprintf(stderr, "Unable to open logging directory: %m\n");
287 g_thread_create(log_thread, log, FALSE, NULL);
292 void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log)
294 if (!(item->location_flags & CLOUDLOG_JOURNAL)) {
295 bluesky_cloudlog_ref(item);
296 item->location_flags |= CLOUDLOG_UNCOMMITTED;
297 g_atomic_int_add(&item->data_lock_count, 1);
298 g_async_queue_push(log->queue, item);
302 void bluesky_log_finish_all(GList *log_items)
304 while (log_items != NULL) {
305 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log_items->data;
307 g_mutex_lock(item->lock);
308 while ((item->location_flags & CLOUDLOG_UNCOMMITTED))
309 g_cond_wait(item->cond, item->lock);
310 g_mutex_unlock(item->lock);
311 bluesky_cloudlog_unref(item);
313 log_items = g_list_delete_link(log_items, log_items);
317 /* Return a committed cloud log record that can be used as a watermark for how
318 * much of the journal has been written. */
319 BlueSkyCloudLog *bluesky_log_get_commit_point(BlueSkyFS *fs)
321 BlueSkyCloudLog *marker = bluesky_cloudlog_new(fs, NULL);
322 marker->type = LOGTYPE_JOURNAL_MARKER;
323 marker->data = bluesky_string_new(g_strdup(""), 0);
324 bluesky_cloudlog_stats_update(marker, 1);
325 bluesky_cloudlog_sync(marker);
327 g_mutex_lock(marker->lock);
328 while ((marker->pending_write & CLOUDLOG_JOURNAL))
329 g_cond_wait(marker->cond, marker->lock);
330 g_mutex_unlock(marker->lock);
335 void bluesky_log_write_commit_point(BlueSkyFS *fs, BlueSkyCloudLog *marker)
337 BlueSkyCloudLog *commit = bluesky_cloudlog_new(fs, NULL);
338 commit->type = LOGTYPE_JOURNAL_CHECKPOINT;
340 uint32_t seq, offset;
341 seq = GUINT32_TO_LE(marker->log_seq);
342 offset = GUINT32_TO_LE(marker->log_offset);
343 GString *loc = g_string_new("");
344 g_string_append_len(loc, (const gchar *)&seq, sizeof(seq));
345 g_string_append_len(loc, (const gchar *)&offset, sizeof(offset));
346 commit->data = bluesky_string_new_from_gstring(loc);
347 bluesky_cloudlog_stats_update(commit, 1);
348 bluesky_cloudlog_sync(commit);
350 g_mutex_lock(commit->lock);
351 while ((commit->location_flags & CLOUDLOG_UNCOMMITTED))
352 g_cond_wait(commit->cond, commit->lock);
353 g_mutex_unlock(commit->lock);
355 bluesky_cloudlog_unref(marker);
356 bluesky_cloudlog_unref(commit);
359 /* Memory-map the given log object into memory (read-only) and return a pointer
361 static int page_size = 0;
363 void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile)
365 g_atomic_int_add(&cachefile->refcount, -1);
368 static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile);
370 /* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
371 * Returns the object in the locked state and with a reference taken. */
372 BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
373 int clouddir, int log_seq,
374 gboolean start_fetch)
376 if (page_size == 0) {
377 page_size = getpagesize();
380 BlueSkyLog *log = fs->log;
386 // A request for a local log file
388 sprintf(logname, "journal-%08d", log_seq);
389 type = CLOUDLOG_JOURNAL;
391 sprintf(logname, "log-%08d-%08d", clouddir, log_seq);
392 type = CLOUDLOG_CLOUD;
395 BlueSkyCacheFile *map;
396 g_mutex_lock(log->mmap_lock);
397 map = g_hash_table_lookup(log->mmap_cache, logname);
400 && type == CLOUDLOG_JOURNAL
401 && fstatat(log->dirfd, logname, &statbuf, 0) < 0) {
402 /* A stale reference to a journal file which doesn't exist any longer
403 * because it was reclaimed. Return NULL. */
404 } else if (map == NULL) {
406 g_print("Adding cache file %s\n", logname);
408 map = g_new0(BlueSkyCacheFile, 1);
411 map->lock = g_mutex_new();
413 g_mutex_lock(map->lock);
414 map->cond = g_cond_new();
415 map->filename = g_strdup(logname);
416 map->log_seq = log_seq;
418 g_atomic_int_set(&map->mapcount, 0);
419 g_atomic_int_set(&map->refcount, 0);
420 map->items = bluesky_rangeset_new();
422 g_hash_table_insert(log->mmap_cache, map->filename, map);
424 int fd = openat(log->dirfd, logname, O_WRONLY | O_CREAT, 0600);
426 ftruncate(fd, 5 << 20); // FIXME
430 // If the log file is stored in the cloud, we may need to fetch it
431 if (clouddir >= 0 && start_fetch)
432 cloudlog_fetch_start(map);
434 g_mutex_lock(map->lock);
437 g_mutex_unlock(log->mmap_lock);
439 g_atomic_int_inc(&map->refcount);
443 static void robust_pwrite(int fd, const char *buf, ssize_t count, off_t offset)
446 ssize_t written = pwrite(fd, buf, count, offset);
450 g_warning("pwrite failure: %m");
459 static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
460 BlueSkyCacheFile *cachefile);
462 static void cloudlog_partial_fetch_start(BlueSkyCacheFile *cachefile,
463 size_t offset, size_t length)
465 g_atomic_int_inc(&cachefile->refcount);
466 g_print("Starting fetch of %s from cloud\n", cachefile->filename);
467 BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
468 async->op = STORE_OP_GET;
469 async->key = g_strdup(cachefile->filename);
470 async->start = offset;
472 bluesky_store_async_add_notifier(async,
473 (GFunc)cloudlog_partial_fetch_complete,
475 bluesky_store_async_submit(async);
476 bluesky_store_async_unref(async);
479 static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
480 BlueSkyCacheFile *cachefile)
482 g_print("Partial fetch of %s from cloud complete, status = %d\n",
483 async->key, async->result);
485 g_mutex_lock(cachefile->lock);
486 if (async->result >= 0) {
487 /* Descrypt items fetched and write valid items out to the local log,
488 * but only if they do not overlap existing objects. This will protect
489 * against an attack by the cloud provider where one valid object is
490 * moved to another offset and used to overwrite data that we already
492 BlueSkyRangeset *items = bluesky_rangeset_new();
493 int fd = openat(cachefile->log->dirfd, cachefile->filename, O_WRONLY);
495 async->data = bluesky_string_dup(async->data);
496 bluesky_cloudlog_decrypt(async->data->data, async->data->len,
497 cachefile->fs->keys, items);
498 uint64_t item_offset = 0;
500 const BlueSkyRangesetItem *item;
501 item = bluesky_rangeset_lookup_next(items, item_offset);
504 if (bluesky_verbose) {
505 g_print(" item offset from range request: %d\n",
506 (int)(item->start + async->start));
508 if (bluesky_rangeset_insert(cachefile->items,
509 async->start + item->start,
510 item->length, item->data))
512 robust_pwrite(fd, async->data->data + item->start,
513 item->length, async->start + item->start);
515 g_print(" item overlaps existing data!\n");
517 item_offset = item->start + 1;
519 /* TODO: Iterate over items and merge into cached file. */
522 g_warning("Unable to open and write to cache file %s: %m",
523 cachefile->filename);
526 g_print("Error fetching from cloud, retrying...\n");
527 cloudlog_partial_fetch_start(cachefile, async->start, async->len);
530 /* Update disk-space usage statistics, since the writes above may have
531 * consumed more space. */
532 g_atomic_int_add(&cachefile->log->disk_used, -cachefile->disk_used);
534 if (fstatat(cachefile->log->dirfd, cachefile->filename, &statbuf, 0) >= 0) {
535 /* Convert from 512-byte blocks to 1-kB units */
536 cachefile->disk_used = (statbuf.st_blocks + 1) / 2;
538 g_atomic_int_add(&cachefile->log->disk_used, cachefile->disk_used);
540 bluesky_cachefile_unref(cachefile);
541 g_cond_broadcast(cachefile->cond);
542 g_mutex_unlock(cachefile->lock);
545 static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile)
547 g_atomic_int_inc(&cachefile->refcount);
548 cachefile->fetching = TRUE;
549 g_print("Starting fetch of %s from cloud\n", cachefile->filename);
550 BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
551 async->op = STORE_OP_GET;
552 async->key = g_strdup(cachefile->filename);
553 bluesky_store_async_add_notifier(async,
554 (GFunc)cloudlog_partial_fetch_complete,
556 bluesky_store_async_submit(async);
557 bluesky_store_async_unref(async);
560 /* The arguments are mostly straightforward. log_dir is -1 for access from the
561 * journal, and non-negative for access to a cloud log segment. map_data
562 * should be TRUE for the case that are mapping just the data of an item where
563 * we have already parsed the item headers; this surpresses the error when the
564 * access is not to the first bytes of the item. */
565 BlueSkyRCStr *bluesky_log_map_object(BlueSkyCloudLog *item, gboolean map_data)
567 BlueSkyFS *fs = item->fs;
568 BlueSkyLog *log = fs->log;
569 BlueSkyCacheFile *map = NULL;
570 BlueSkyRCStr *str = NULL;
572 size_t file_offset = 0, file_size = 0;
573 gboolean range_request = TRUE;
575 if (page_size == 0) {
576 page_size = getpagesize();
579 bluesky_cloudlog_stats_update(item, -1);
581 /* First, check to see if the journal still contains a copy of the item and
583 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
584 map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE);
586 location = CLOUDLOG_JOURNAL;
587 file_offset = item->log_offset;
588 file_size = item->log_size;
592 if (location == 0 && (item->location_flags & CLOUDLOG_CLOUD)) {
593 item->location_flags &= ~CLOUDLOG_JOURNAL;
594 map = bluesky_cachefile_lookup(fs,
595 item->location.directory,
596 item->location.sequence,
599 g_warning("Unable to remap cloud log segment!");
602 location = CLOUDLOG_CLOUD;
603 file_offset = item->location.offset;
604 file_size = item->location.size;
607 /* Log segments fetched from the cloud might only be partially-fetched.
608 * Check whether the object we are interested in is available. */
609 if (location == CLOUDLOG_CLOUD) {
611 const BlueSkyRangesetItem *rangeitem;
612 rangeitem = bluesky_rangeset_lookup(map->items, file_offset);
613 if (rangeitem != NULL && (rangeitem->start != file_offset
614 || rangeitem->length != file_size)) {
615 g_warning("log-%d: Item offset %zd seems to be invalid!",
616 (int)item->location.sequence, file_offset);
619 if (rangeitem == NULL) {
620 if (bluesky_verbose) {
621 g_print("Item at offset 0x%zx not available, need to fetch.\n",
625 uint64_t start = 0, length = 0, end;
626 if (map->prefetches != NULL)
627 bluesky_rangeset_get_extents(map->prefetches,
629 start = MIN(start, file_offset);
630 end = MAX(start + length, file_offset + file_size);
631 length = end - start;
632 cloudlog_partial_fetch_start(map, start, length);
633 if (map->prefetches != NULL) {
634 bluesky_rangeset_free(map->prefetches);
635 map->prefetches = NULL;
638 g_cond_wait(map->cond, map->lock);
639 } else if (rangeitem->start == file_offset
640 && rangeitem->length == file_size) {
642 g_print("Item now available.\n");
648 if (map->addr == NULL) {
649 int fd = openat(log->dirfd, map->filename, O_RDONLY);
652 fprintf(stderr, "Error opening logfile %s: %m\n", map->filename);
656 off_t length = lseek(fd, 0, SEEK_END);
657 map->addr = (const char *)mmap(NULL, length, PROT_READ, MAP_SHARED,
661 g_atomic_int_inc(&map->refcount);
667 if (location == CLOUDLOG_JOURNAL)
668 file_offset += sizeof(struct log_header);
670 file_offset += sizeof(struct cloudlog_header);
672 file_size = item->data_size;
674 str = bluesky_string_new_from_mmap(map, file_offset, file_size);
675 map->atime = bluesky_get_current_time();
678 bluesky_cachefile_unref(map);
679 g_mutex_unlock(map->lock);
681 bluesky_cloudlog_stats_update(item, 1);
685 void bluesky_mmap_unref(BlueSkyCacheFile *mmap)
690 if (g_atomic_int_dec_and_test(&mmap->mapcount)) {
691 g_mutex_lock(mmap->lock);
692 if (g_atomic_int_get(&mmap->mapcount) == 0) {
694 g_print("Unmapped log segment %d...\n", mmap->log_seq);
695 munmap((void *)mmap->addr, mmap->len);
697 g_atomic_int_add(&mmap->refcount, -1);
699 g_mutex_unlock(mmap->lock);
703 /******************************* JOURNAL REPLAY *******************************
704 * The journal replay code is used to recover filesystem state after a
705 * filesystem restart. We first look for the most recent commit record in the
706 * journal, which indicates the point before which all data in the journal has
707 * also been committed to the cloud. Then, we read in all data in the log past
710 static GList *directory_contents(const char *dirname)
712 GList *contents = NULL;
713 GDir *dir = g_dir_open(dirname, 0, NULL);
715 g_warning("Unable to open journal directory: %s", dirname);
720 while ((file = g_dir_read_name(dir)) != NULL) {
721 if (strncmp(file, "journal-", 8) == 0)
722 contents = g_list_prepend(contents, g_strdup(file));
726 contents = g_list_sort(contents, (GCompareFunc)strcmp);
731 static gboolean validate_journal_item(const char *buf, size_t len, off_t offset)
733 const struct log_header *header;
734 const struct log_footer *footer;
736 if (offset + sizeof(struct log_header) + sizeof(struct log_footer) > len)
739 header = (const struct log_header *)(buf + offset);
740 if (GUINT32_FROM_LE(header->magic) != HEADER_MAGIC)
742 if (GUINT32_FROM_LE(header->offset) != offset)
744 size_t size = GUINT32_FROM_LE(header->size1)
745 + GUINT32_FROM_LE(header->size2)
746 + GUINT32_FROM_LE(header->size3);
748 off_t footer_offset = offset + sizeof(struct log_header) + size;
749 if (footer_offset + sizeof(struct log_footer) > len)
751 footer = (const struct log_footer *)(buf + footer_offset);
753 if (GUINT32_FROM_LE(footer->magic) != FOOTER_MAGIC)
756 uint32_t crc = crc32c(BLUESKY_CRC32C_SEED, buf + offset,
757 sizeof(struct log_header) + sizeof(struct log_footer)
759 if (crc != BLUESKY_CRC32C_VALIDATOR) {
760 g_warning("Journal entry failed to validate: CRC %08x != %08x",
761 crc, BLUESKY_CRC32C_VALIDATOR);
768 /* Scan through a journal segment to extract correctly-written items (those
769 * that pass sanity checks and have a valid checksum). */
770 static void bluesky_replay_scan_journal(const char *buf, size_t len,
771 uint32_t *seq, uint32_t *start_offset)
773 const struct log_header *header;
776 while (validate_journal_item(buf, len, offset)) {
777 header = (const struct log_header *)(buf + offset);
778 size_t size = GUINT32_FROM_LE(header->size1)
779 + GUINT32_FROM_LE(header->size2)
780 + GUINT32_FROM_LE(header->size3);
782 if (header->type - '0' == LOGTYPE_JOURNAL_CHECKPOINT) {
783 const uint32_t *data = (const uint32_t *)((const char *)header + sizeof(struct log_header));
784 *seq = GUINT32_FROM_LE(data[0]);
785 *start_offset = GUINT32_FROM_LE(data[1]);
788 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
792 static void reload_item(BlueSkyCloudLog *log_item,
794 size_t len1, size_t len2, size_t len3)
796 BlueSkyFS *fs = log_item->fs;
797 /*const char *data1 = data;*/
798 const BlueSkyCloudID *data2
799 = (const BlueSkyCloudID *)(data + len1);
800 /*const BlueSkyCloudPointer *data3
801 = (const BlueSkyCloudPointer *)(data + len1 + len2);*/
803 bluesky_cloudlog_stats_update(log_item, -1);
804 bluesky_string_unref(log_item->data);
805 log_item->data = NULL;
806 log_item->location_flags = CLOUDLOG_JOURNAL;
807 bluesky_cloudlog_stats_update(log_item, 1);
810 memset(&id0, 0, sizeof(id0));
812 int link_count = len2 / sizeof(BlueSkyCloudID);
813 GArray *new_links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
814 for (int i = 0; i < link_count; i++) {
815 BlueSkyCloudID id = data2[i];
816 BlueSkyCloudLog *ref = NULL;
817 if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) != 0) {
818 g_mutex_lock(fs->lock);
819 ref = g_hash_table_lookup(fs->locations, &id);
821 bluesky_cloudlog_ref(ref);
823 g_mutex_unlock(fs->lock);
825 g_array_append_val(new_links, ref);
828 for (int i = 0; i < log_item->links->len; i++) {
829 BlueSkyCloudLog *c = g_array_index(log_item->links,
830 BlueSkyCloudLog *, i);
831 bluesky_cloudlog_unref(c);
833 g_array_unref(log_item->links);
834 log_item->links = new_links;
837 static void bluesky_replay_scan_journal2(BlueSkyFS *fs, GList **objects,
838 int log_seq, int start_offset,
839 const char *buf, size_t len)
841 const struct log_header *header;
842 off_t offset = start_offset;
844 while (validate_journal_item(buf, len, offset)) {
845 header = (const struct log_header *)(buf + offset);
846 g_print("In replay found valid item at offset %zd\n", offset);
847 size_t size = GUINT32_FROM_LE(header->size1)
848 + GUINT32_FROM_LE(header->size2)
849 + GUINT32_FROM_LE(header->size3);
851 BlueSkyCloudLog *log_item = bluesky_cloudlog_get(fs, header->id);
852 g_mutex_lock(log_item->lock);
853 *objects = g_list_prepend(*objects, log_item);
855 log_item->inum = GUINT64_FROM_LE(header->inum);
856 reload_item(log_item, buf + offset + sizeof(struct log_header),
857 GUINT32_FROM_LE(header->size1),
858 GUINT32_FROM_LE(header->size2),
859 GUINT32_FROM_LE(header->size3));
860 log_item->log_seq = log_seq;
861 log_item->log_offset = offset + sizeof(struct log_header);
862 log_item->log_size = header->size1;
864 bluesky_string_unref(log_item->data);
865 log_item->data = bluesky_string_new(g_memdup(buf + offset + sizeof(struct log_header), GUINT32_FROM_LE(header->size1)), GUINT32_FROM_LE(header->size1));
867 /* For any inodes which were read from the journal, deserialize the
868 * inode information, overwriting any old inode data. */
869 if (header->type - '0' == LOGTYPE_INODE) {
870 uint64_t inum = GUINT64_FROM_LE(header->inum);
872 g_mutex_lock(fs->lock);
873 inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum);
875 inode = bluesky_new_inode(inum, fs, BLUESKY_PENDING);
876 inode->change_count = 0;
877 bluesky_insert_inode(fs, inode);
879 g_mutex_lock(inode->lock);
880 bluesky_inode_free_resources(inode);
881 if (!bluesky_deserialize_inode(inode, log_item))
882 g_print("Error deserializing inode %"PRIu64"\n", inum);
883 fs->next_inum = MAX(fs->next_inum, inum + 1);
884 bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
885 inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
886 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
887 inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
888 bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
889 inode->unlogged_list = NULL;
890 inode->change_cloud = inode->change_commit;
891 bluesky_cloudlog_ref(log_item);
892 bluesky_cloudlog_unref(inode->committed_item);
893 inode->committed_item = log_item;
894 g_mutex_unlock(inode->lock);
895 g_mutex_unlock(fs->lock);
897 bluesky_string_unref(log_item->data);
898 log_item->data = NULL;
899 g_mutex_unlock(log_item->lock);
901 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
905 void bluesky_replay(BlueSkyFS *fs)
907 BlueSkyLog *log = fs->log;
908 GList *logfiles = directory_contents(log->log_directory);
910 /* Scan through log files in reverse order to find the most recent commit
912 logfiles = g_list_reverse(logfiles);
913 uint32_t seq_num = 0, start_offset = 0;
914 while (logfiles != NULL) {
915 char *filename = g_strdup_printf("%s/%s", log->log_directory,
916 (char *)logfiles->data);
917 g_print("Scanning file %s\n", filename);
918 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
920 g_warning("Mapping logfile %s failed!\n", filename);
922 bluesky_replay_scan_journal(g_mapped_file_get_contents(map),
923 g_mapped_file_get_length(map),
924 &seq_num, &start_offset);
925 g_mapped_file_unref(map);
929 g_free(logfiles->data);
930 logfiles = g_list_delete_link(logfiles, logfiles);
931 if (seq_num != 0 || start_offset != 0)
934 g_list_foreach(logfiles, (GFunc)g_free, NULL);
935 g_list_free(logfiles);
937 /* Now, scan forward starting from the given point in the log to
938 * reconstruct all filesystem state. As we reload objects we hold a
939 * reference to each loaded object. At the end we free all these
940 * references, so that any objects which were not linked into persistent
941 * filesystem data structures are freed. */
942 GList *objects = NULL;
944 char *filename = g_strdup_printf("%s/journal-%08d",
945 log->log_directory, seq_num);
946 g_print("Replaying file %s from offset %d\n", filename, start_offset);
947 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
950 g_warning("Mapping logfile failed, assuming end of journal\n");
954 bluesky_replay_scan_journal2(fs, &objects, seq_num, start_offset,
955 g_mapped_file_get_contents(map),
956 g_mapped_file_get_length(map));
957 g_mapped_file_unref(map);
962 while (objects != NULL) {
963 bluesky_cloudlog_unref((BlueSkyCloudLog *)objects->data);
964 objects = g_list_delete_link(objects, objects);