1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2010 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #define _ATFILE_SOURCE
40 #include <sys/types.h>
46 #include "bluesky-private.h"
48 /* The logging layer for BlueSky. This is used to write filesystem changes
49 * durably to disk so that they can be recovered in the event of a system
52 /* The logging layer takes care out writing out a sequence of log records to
53 * disk. On disk, each record consists of a header, a data payload, and a
54 * footer. The footer contains a checksum of the record, meant to help with
55 * identifying corrupt log records (we would assume because the log record was
56 * only incompletely written out before a crash, which should only happen for
57 * log records that were not considered committed). */
59 #define HEADER_MAGIC 0x676f4c0a
60 #define FOOTER_MAGIC 0x2e435243
62 static size_t readbuf(int fd, char *buf, size_t len)
64 size_t total_bytes = 0;
66 ssize_t bytes = read(fd, buf, len);
67 if (bytes < 0 && errno == EINTR)
78 static void writebuf(int fd, const char *buf, size_t len)
82 written = write(fd, buf, len);
83 if (written < 0 && errno == EINTR)
85 g_assert(written >= 0);
91 static void log_commit(BlueSkyLog *log)
100 /* Update disk-space usage statistics for the journal file. */
101 g_atomic_int_add(&log->disk_used, -log->current_log->disk_used);
103 if (fstat(log->fd, &statbuf) >= 0) {
104 /* Convert from 512-byte blocks to 1-kB units */
105 log->current_log->disk_used = (statbuf.st_blocks + 1) / 2;
107 g_atomic_int_add(&log->disk_used, log->current_log->disk_used);
109 while (log->committed != NULL) {
110 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data;
111 g_mutex_lock(item->lock);
112 bluesky_cloudlog_stats_update(item, -1);
113 item->pending_write &= ~CLOUDLOG_JOURNAL;
115 = (item->location_flags & ~CLOUDLOG_UNCOMMITTED) | CLOUDLOG_JOURNAL;
116 bluesky_cloudlog_stats_update(item, 1);
117 g_cond_signal(item->cond);
118 g_mutex_unlock(item->lock);
119 log->committed = g_slist_delete_link(log->committed, log->committed);
120 bluesky_cloudlog_unref(item);
124 if (bluesky_verbose && batchsize > 1)
125 g_print("Log batch size: %d\n", batchsize);
128 static gboolean log_open(BlueSkyLog *log)
139 if (log->current_log != NULL) {
140 bluesky_cachefile_unref(log->current_log);
141 log->current_log = NULL;
144 while (log->fd < 0) {
145 g_snprintf(logname, sizeof(logname), "journal-%08d", log->seq_num);
146 log->fd = openat(log->dirfd, logname, O_CREAT|O_WRONLY|O_EXCL, 0600);
147 if (log->fd < 0 && errno == EEXIST) {
148 fprintf(stderr, "Log file %s already exists...\n", logname);
151 } else if (log->fd < 0) {
152 fprintf(stderr, "Error opening logfile %s: %m\n", logname);
157 log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num,
159 g_assert(log->current_log != NULL);
160 g_mutex_unlock(log->current_log->lock);
162 if (ftruncate(log->fd, LOG_SEGMENT_SIZE) < 0) {
163 fprintf(stderr, "Unable to truncate logfile %s: %m\n", logname);
170 /* All log writes (at least for a single log) are made by one thread, so we
171 * don't need to worry about concurrent access to the log file. Log items to
172 * write are pulled off a queue (and so may be posted by any thread).
173 * fdatasync() is used to ensure the log items are stable on disk.
175 * The log is broken up into separate files, roughly of size LOG_SEGMENT_SIZE
176 * each. If a log segment is not currently open (log->fd is negative), a new
177 * one is created. Log segment filenames are assigned sequentially.
179 * Log replay ought to be implemented later, and ought to set the initial
180 * sequence number appropriately.
182 static gpointer log_thread(gpointer d)
184 BlueSkyLog *log = (BlueSkyLog *)d;
188 if (!log_open(log)) {
193 BlueSkyCloudLog *item
194 = (BlueSkyCloudLog *)g_async_queue_pop(log->queue);
195 g_mutex_lock(item->lock);
196 g_assert(item->data != NULL);
198 /* The item may have already been written to the journal... */
199 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
200 g_mutex_unlock(item->lock);
201 bluesky_cloudlog_unref(item);
202 g_atomic_int_add(&item->data_lock_count, -1);
206 bluesky_cloudlog_stats_update(item, -1);
207 item->pending_write |= CLOUDLOG_JOURNAL;
208 bluesky_cloudlog_stats_update(item, 1);
210 GString *data1 = g_string_new("");
211 GString *data2 = g_string_new("");
212 GString *data3 = g_string_new("");
213 bluesky_serialize_cloudlog(item, data1, data2, data3);
215 struct log_header header;
216 struct log_footer footer;
217 size_t size = sizeof(header) + sizeof(footer);
218 size += data1->len + data2->len + data3->len;
221 offset = lseek(log->fd, 0, SEEK_CUR);
223 /* Check whether the item would overflow the allocated journal size.
224 * If so, start a new log segment. We only allow oversized log
225 * segments if they contain a single log entry. */
226 if (offset + size >= LOG_SEGMENT_SIZE && offset > 0) {
231 header.magic = GUINT32_TO_LE(HEADER_MAGIC);
232 header.offset = GUINT32_TO_LE(offset);
233 header.size1 = GUINT32_TO_LE(data1->len);
234 header.size2 = GUINT32_TO_LE(data2->len);
235 header.size3 = GUINT32_TO_LE(data3->len);
236 header.type = item->type + '0';
237 header.id = item->id;
238 header.inum = GUINT64_TO_LE(item->inum);
239 footer.magic = GUINT32_TO_LE(FOOTER_MAGIC);
241 uint32_t crc = BLUESKY_CRC32C_SEED;
243 writebuf(log->fd, (const char *)&header, sizeof(header));
244 crc = crc32c(crc, (const char *)&header, sizeof(header));
246 writebuf(log->fd, data1->str, data1->len);
247 crc = crc32c(crc, data1->str, data1->len);
248 writebuf(log->fd, data2->str, data2->len);
249 crc = crc32c(crc, data2->str, data2->len);
250 writebuf(log->fd, data3->str, data3->len);
251 crc = crc32c(crc, data3->str, data3->len);
253 crc = crc32c(crc, (const char *)&footer,
254 sizeof(footer) - sizeof(uint32_t));
255 footer.crc = crc32c_finalize(crc);
256 writebuf(log->fd, (const char *)&footer, sizeof(footer));
258 item->log_seq = log->seq_num;
259 item->log_offset = offset;
260 item->log_size = size;
261 item->data_size = item->data->len;
265 g_string_free(data1, TRUE);
266 g_string_free(data2, TRUE);
267 g_string_free(data3, TRUE);
269 /* Replace the log item's string data with a memory-mapped copy of the
270 * data, now that it has been written to the log file. (Even if it
271 * isn't yet on disk, it should at least be in the page cache and so
272 * available to memory map.) */
273 bluesky_string_unref(item->data);
275 bluesky_cloudlog_fetch(item);
277 log->committed = g_slist_prepend(log->committed, item);
278 g_atomic_int_add(&item->data_lock_count, -1);
279 g_mutex_unlock(item->lock);
281 /* Force an if there are no other log items currently waiting to be
283 if (g_async_queue_length(log->queue) <= 0)
290 BlueSkyLog *bluesky_log_new(const char *log_directory)
292 BlueSkyLog *log = g_new0(BlueSkyLog, 1);
294 log->log_directory = g_strdup(log_directory);
297 log->queue = g_async_queue_new();
298 log->mmap_lock = g_mutex_new();
299 log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal);
301 /* Determine the highest-numbered log file, so that we can start writing
302 * out new journal entries at the next sequence number. */
303 GDir *dir = g_dir_open(log_directory, 0, NULL);
306 while ((file = g_dir_read_name(dir)) != NULL) {
307 if (strncmp(file, "journal-", 8) == 0) {
308 log->seq_num = MAX(log->seq_num, atoi(&file[8]) + 1);
312 g_print("Starting journal at sequence number %d\n", log->seq_num);
315 log->dirfd = open(log->log_directory, O_DIRECTORY);
316 if (log->dirfd < 0) {
317 fprintf(stderr, "Unable to open logging directory: %m\n");
321 g_thread_create(log_thread, log, FALSE, NULL);
326 void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log)
328 if (!(item->location_flags & CLOUDLOG_JOURNAL)) {
329 bluesky_cloudlog_ref(item);
330 item->location_flags |= CLOUDLOG_UNCOMMITTED;
331 g_atomic_int_add(&item->data_lock_count, 1);
332 g_async_queue_push(log->queue, item);
336 void bluesky_log_finish_all(GList *log_items)
338 while (log_items != NULL) {
339 BlueSkyCloudLog *item = (BlueSkyCloudLog *)log_items->data;
341 g_mutex_lock(item->lock);
342 while ((item->location_flags & CLOUDLOG_UNCOMMITTED))
343 g_cond_wait(item->cond, item->lock);
344 g_mutex_unlock(item->lock);
345 bluesky_cloudlog_unref(item);
347 log_items = g_list_delete_link(log_items, log_items);
351 /* Return a committed cloud log record that can be used as a watermark for how
352 * much of the journal has been written. */
353 BlueSkyCloudLog *bluesky_log_get_commit_point(BlueSkyFS *fs)
355 BlueSkyCloudLog *marker = bluesky_cloudlog_new(fs, NULL);
356 marker->type = LOGTYPE_JOURNAL_MARKER;
357 marker->data = bluesky_string_new(g_strdup(""), 0);
358 bluesky_cloudlog_stats_update(marker, 1);
359 bluesky_cloudlog_sync(marker);
361 g_mutex_lock(marker->lock);
362 while ((marker->pending_write & CLOUDLOG_JOURNAL))
363 g_cond_wait(marker->cond, marker->lock);
364 g_mutex_unlock(marker->lock);
369 void bluesky_log_write_commit_point(BlueSkyFS *fs, BlueSkyCloudLog *marker)
371 BlueSkyCloudLog *commit = bluesky_cloudlog_new(fs, NULL);
372 commit->type = LOGTYPE_JOURNAL_CHECKPOINT;
374 uint32_t seq, offset;
375 seq = GUINT32_TO_LE(marker->log_seq);
376 offset = GUINT32_TO_LE(marker->log_offset);
377 GString *loc = g_string_new("");
378 g_string_append_len(loc, (const gchar *)&seq, sizeof(seq));
379 g_string_append_len(loc, (const gchar *)&offset, sizeof(offset));
380 commit->data = bluesky_string_new_from_gstring(loc);
381 bluesky_cloudlog_stats_update(commit, 1);
382 bluesky_cloudlog_sync(commit);
384 g_mutex_lock(commit->lock);
385 while ((commit->location_flags & CLOUDLOG_UNCOMMITTED))
386 g_cond_wait(commit->cond, commit->lock);
387 g_mutex_unlock(commit->lock);
389 bluesky_cloudlog_unref(marker);
390 bluesky_cloudlog_unref(commit);
393 /* Memory-map the given log object into memory (read-only) and return a pointer
395 static int page_size = 0;
397 void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile)
399 g_atomic_int_add(&cachefile->refcount, -1);
402 static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile);
404 /* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
405 * Returns the object in the locked state and with a reference taken. */
406 BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
407 int clouddir, int log_seq,
408 gboolean start_fetch)
410 if (page_size == 0) {
411 page_size = getpagesize();
414 BlueSkyLog *log = fs->log;
420 // A request for a local log file
422 sprintf(logname, "journal-%08d", log_seq);
423 type = CLOUDLOG_JOURNAL;
425 sprintf(logname, "log-%08d-%08d", clouddir, log_seq);
426 type = CLOUDLOG_CLOUD;
429 BlueSkyCacheFile *map;
430 g_mutex_lock(log->mmap_lock);
431 map = g_hash_table_lookup(log->mmap_cache, logname);
434 && type == CLOUDLOG_JOURNAL
435 && fstatat(log->dirfd, logname, &statbuf, 0) < 0) {
436 /* A stale reference to a journal file which doesn't exist any longer
437 * because it was reclaimed. Return NULL. */
438 } else if (map == NULL) {
440 g_print("Adding cache file %s\n", logname);
442 map = g_new0(BlueSkyCacheFile, 1);
445 map->lock = g_mutex_new();
447 g_mutex_lock(map->lock);
448 map->cond = g_cond_new();
449 map->filename = g_strdup(logname);
450 map->log_dir = clouddir;
451 map->log_seq = log_seq;
453 g_atomic_int_set(&map->mapcount, 0);
454 g_atomic_int_set(&map->refcount, 0);
455 map->items = bluesky_rangeset_new();
457 g_hash_table_insert(log->mmap_cache, map->filename, map);
459 int fd = openat(log->dirfd, logname, O_WRONLY | O_CREAT, 0600);
461 ftruncate(fd, 5 << 20); // FIXME
465 g_mutex_lock(map->lock);
469 /* If the log file is stored in the cloud and has not been fully fetched,
470 * we may need to initiate a fetch now. */
471 if (clouddir >= 0 && start_fetch && !map->complete && !map->fetching)
472 cloudlog_fetch_start(map);
474 g_mutex_unlock(log->mmap_lock);
476 g_atomic_int_inc(&map->refcount);
480 static void robust_pwrite(int fd, const char *buf, ssize_t count, off_t offset)
483 ssize_t written = pwrite(fd, buf, count, offset);
487 g_warning("pwrite failure: %m");
496 static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
497 BlueSkyCacheFile *cachefile);
499 static void cloudlog_partial_fetch_start(BlueSkyCacheFile *cachefile,
500 size_t offset, size_t length)
502 g_atomic_int_inc(&cachefile->refcount);
504 g_print("Starting partial fetch of %s from cloud (%zd + %zd)\n",
505 cachefile->filename, offset, length);
506 BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
507 async->op = STORE_OP_GET;
508 async->key = g_strdup(cachefile->filename);
509 async->start = offset;
511 async->profile = bluesky_profile_get();
512 bluesky_store_async_add_notifier(async,
513 (GFunc)cloudlog_partial_fetch_complete,
515 bluesky_store_async_submit(async);
516 bluesky_store_async_unref(async);
519 static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
520 BlueSkyCacheFile *cachefile)
522 if (bluesky_verbose || async->result != 0)
523 g_print("Fetch of %s from cloud complete, status = %d\n",
524 async->key, async->result);
526 g_mutex_lock(cachefile->lock);
527 if (async->result >= 0) {
528 if (async->len == 0) {
530 g_print("Complete object was fetched.\n");
531 cachefile->complete = TRUE;
534 /* Descrypt items fetched and write valid items out to the local log,
535 * but only if they do not overlap existing objects. This will protect
536 * against an attack by the cloud provider where one valid object is
537 * moved to another offset and used to overwrite data that we already
539 BlueSkyRangeset *items = bluesky_rangeset_new();
540 int fd = openat(cachefile->log->dirfd, cachefile->filename, O_WRONLY);
542 gboolean allow_unauth;
543 async->data = bluesky_string_dup(async->data);
544 allow_unauth = cachefile->log_dir == BLUESKY_CLOUD_DIR_CLEANER;
545 bluesky_cloudlog_decrypt(async->data->data, async->data->len,
546 cachefile->fs->keys, items, allow_unauth);
547 uint64_t item_offset = 0;
549 const BlueSkyRangesetItem *item;
550 item = bluesky_rangeset_lookup_next(items, item_offset);
553 if (bluesky_verbose) {
554 g_print(" item offset from range request: %d\n",
555 (int)(item->start + async->start));
557 if (bluesky_rangeset_insert(cachefile->items,
558 async->start + item->start,
559 item->length, item->data))
561 robust_pwrite(fd, async->data->data + item->start,
562 item->length, async->start + item->start);
564 g_print(" item overlaps existing data!\n");
566 item_offset = item->start + 1;
568 /* TODO: Iterate over items and merge into cached file. */
571 g_warning("Unable to open and write to cache file %s: %m",
572 cachefile->filename);
575 bluesky_rangeset_free(items);
577 g_print("Error fetching %s from cloud, retrying...\n", async->key);
578 cloudlog_partial_fetch_start(cachefile, async->start, async->len);
581 /* Update disk-space usage statistics, since the writes above may have
582 * consumed more space. */
583 g_atomic_int_add(&cachefile->log->disk_used, -cachefile->disk_used);
585 if (fstatat(cachefile->log->dirfd, cachefile->filename, &statbuf, 0) >= 0) {
586 /* Convert from 512-byte blocks to 1-kB units */
587 cachefile->disk_used = (statbuf.st_blocks + 1) / 2;
589 g_atomic_int_add(&cachefile->log->disk_used, cachefile->disk_used);
591 bluesky_cachefile_unref(cachefile);
592 g_cond_broadcast(cachefile->cond);
593 g_mutex_unlock(cachefile->lock);
596 static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile)
598 g_atomic_int_inc(&cachefile->refcount);
599 cachefile->fetching = TRUE;
601 g_print("Starting fetch of %s from cloud\n", cachefile->filename);
602 BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
603 async->op = STORE_OP_GET;
604 async->key = g_strdup(cachefile->filename);
605 async->profile = bluesky_profile_get();
606 bluesky_store_async_add_notifier(async,
607 (GFunc)cloudlog_partial_fetch_complete,
609 bluesky_store_async_submit(async);
610 bluesky_store_async_unref(async);
613 /* Map and return a read-only version of a byte range from a cached file. The
614 * CacheFile object must be locked. */
615 BlueSkyRCStr *bluesky_cachefile_map_raw(BlueSkyCacheFile *cachefile,
616 off_t offset, size_t size)
618 cachefile->atime = bluesky_get_current_time();
620 /* Easy case: the needed data is already in memory */
621 if (cachefile->addr != NULL && offset + size <= cachefile->len)
622 return bluesky_string_new_from_mmap(cachefile, offset, size);
624 int fd = openat(cachefile->log->dirfd, cachefile->filename, O_RDONLY);
626 fprintf(stderr, "Error opening logfile %s: %m\n",
627 cachefile->filename);
631 off_t length = lseek(fd, 0, SEEK_END);
632 if (offset + size > length) {
637 /* File is not mapped in memory. Map the entire file in, then return a
638 * pointer to just the required data. */
639 if (cachefile->addr == NULL) {
640 cachefile->addr = (const char *)mmap(NULL, length, PROT_READ,
642 cachefile->len = length;
643 g_atomic_int_inc(&cachefile->refcount);
646 return bluesky_string_new_from_mmap(cachefile, offset, size);
649 /* Otherwise, the file was mapped in but doesn't cover the data we need.
650 * This shouldn't happen much, if at all, but if it does just read the data
651 * we need directly from the file. We lose memory-management benefits of
652 * using mmapped data, but otherwise this works. */
653 char *buf = g_malloc(size);
654 size_t actual_size = readbuf(fd, buf, size);
656 if (actual_size != size) {
660 return bluesky_string_new(buf, size);
664 /* The arguments are mostly straightforward. log_dir is -1 for access from the
665 * journal, and non-negative for access to a cloud log segment. map_data
666 * should be TRUE for the case that are mapping just the data of an item where
667 * we have already parsed the item headers; this surpresses the error when the
668 * access is not to the first bytes of the item. */
669 BlueSkyRCStr *bluesky_log_map_object(BlueSkyCloudLog *item, gboolean map_data)
671 BlueSkyFS *fs = item->fs;
672 BlueSkyCacheFile *map = NULL;
673 BlueSkyRCStr *str = NULL;
675 size_t file_offset = 0, file_size = 0;
676 gboolean range_request = bluesky_options.full_segment_fetches
679 if (page_size == 0) {
680 page_size = getpagesize();
683 bluesky_cloudlog_stats_update(item, -1);
685 /* First, check to see if the journal still contains a copy of the item and
687 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
688 map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE);
690 location = CLOUDLOG_JOURNAL;
691 file_offset = item->log_offset;
692 file_size = item->log_size;
696 if (location == 0 && (item->location_flags & CLOUDLOG_CLOUD)) {
697 item->location_flags &= ~CLOUDLOG_JOURNAL;
698 map = bluesky_cachefile_lookup(fs,
699 item->location.directory,
700 item->location.sequence,
703 g_warning("Unable to remap cloud log segment!");
706 location = CLOUDLOG_CLOUD;
707 file_offset = item->location.offset;
708 file_size = item->location.size;
711 /* Log segments fetched from the cloud might only be partially-fetched.
712 * Check whether the object we are interested in is available. */
713 if (location == CLOUDLOG_CLOUD) {
715 const BlueSkyRangesetItem *rangeitem;
716 rangeitem = bluesky_rangeset_lookup(map->items, file_offset);
717 if (rangeitem != NULL && (rangeitem->start != file_offset
718 || rangeitem->length != file_size)) {
719 g_warning("log-%d: Item offset %zd seems to be invalid!",
720 (int)item->location.sequence, file_offset);
723 if (rangeitem == NULL) {
724 if (bluesky_verbose) {
725 g_print("Item at offset 0x%zx not available, need to fetch.\n",
729 uint64_t start = file_offset, length = file_size, end;
730 if (map->prefetches != NULL)
731 bluesky_rangeset_get_extents(map->prefetches,
733 start = MIN(start, file_offset);
734 end = MAX(start + length, file_offset + file_size);
735 length = end - start;
736 cloudlog_partial_fetch_start(map, start, length);
737 if (map->prefetches != NULL) {
738 bluesky_rangeset_free(map->prefetches);
739 map->prefetches = NULL;
742 g_cond_wait(map->cond, map->lock);
743 } else if (rangeitem->start == file_offset
744 && rangeitem->length == file_size) {
746 g_print("Item %zd now available.\n", file_offset);
753 if (location == CLOUDLOG_JOURNAL)
754 file_offset += sizeof(struct log_header);
756 file_offset += sizeof(struct cloudlog_header);
758 file_size = item->data_size;
760 str = bluesky_cachefile_map_raw(map, file_offset, file_size);
763 bluesky_cachefile_unref(map);
764 g_mutex_unlock(map->lock);
766 bluesky_cloudlog_stats_update(item, 1);
770 void bluesky_mmap_unref(BlueSkyCacheFile *mmap)
775 if (g_atomic_int_dec_and_test(&mmap->mapcount)) {
776 g_mutex_lock(mmap->lock);
777 if (mmap->addr != NULL && g_atomic_int_get(&mmap->mapcount) == 0) {
779 g_print("Unmapped log segment %d...\n", mmap->log_seq);
780 munmap((void *)mmap->addr, mmap->len);
782 g_atomic_int_add(&mmap->refcount, -1);
784 g_mutex_unlock(mmap->lock);
786 g_assert(g_atomic_int_get(&mmap->mapcount) >= 0);
789 /******************************* JOURNAL REPLAY *******************************
790 * The journal replay code is used to recover filesystem state after a
791 * filesystem restart. We first look for the most recent commit record in the
792 * journal, which indicates the point before which all data in the journal has
793 * also been committed to the cloud. Then, we read in all data in the log past
796 static GList *directory_contents(const char *dirname)
798 GList *contents = NULL;
799 GDir *dir = g_dir_open(dirname, 0, NULL);
801 g_warning("Unable to open journal directory: %s", dirname);
806 while ((file = g_dir_read_name(dir)) != NULL) {
807 if (strncmp(file, "journal-", 8) == 0)
808 contents = g_list_prepend(contents, g_strdup(file));
812 contents = g_list_sort(contents, (GCompareFunc)strcmp);
817 static gboolean validate_journal_item(const char *buf, size_t len, off_t offset)
819 const struct log_header *header;
820 const struct log_footer *footer;
822 if (offset + sizeof(struct log_header) + sizeof(struct log_footer) > len)
825 header = (const struct log_header *)(buf + offset);
826 if (GUINT32_FROM_LE(header->magic) != HEADER_MAGIC)
828 if (GUINT32_FROM_LE(header->offset) != offset)
830 size_t size = GUINT32_FROM_LE(header->size1)
831 + GUINT32_FROM_LE(header->size2)
832 + GUINT32_FROM_LE(header->size3);
834 off_t footer_offset = offset + sizeof(struct log_header) + size;
835 if (footer_offset + sizeof(struct log_footer) > len)
837 footer = (const struct log_footer *)(buf + footer_offset);
839 if (GUINT32_FROM_LE(footer->magic) != FOOTER_MAGIC)
842 uint32_t crc = crc32c(BLUESKY_CRC32C_SEED, buf + offset,
843 sizeof(struct log_header) + sizeof(struct log_footer)
845 if (crc != BLUESKY_CRC32C_VALIDATOR) {
846 g_warning("Journal entry failed to validate: CRC %08x != %08x",
847 crc, BLUESKY_CRC32C_VALIDATOR);
854 /* Scan through a journal segment to extract correctly-written items (those
855 * that pass sanity checks and have a valid checksum). */
856 static void bluesky_replay_scan_journal(const char *buf, size_t len,
857 uint32_t *seq, uint32_t *start_offset)
859 const struct log_header *header;
862 while (validate_journal_item(buf, len, offset)) {
863 header = (const struct log_header *)(buf + offset);
864 size_t size = GUINT32_FROM_LE(header->size1)
865 + GUINT32_FROM_LE(header->size2)
866 + GUINT32_FROM_LE(header->size3);
868 if (header->type - '0' == LOGTYPE_JOURNAL_CHECKPOINT) {
869 const uint32_t *data = (const uint32_t *)((const char *)header + sizeof(struct log_header));
870 *seq = GUINT32_FROM_LE(data[0]);
871 *start_offset = GUINT32_FROM_LE(data[1]);
874 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
878 static void reload_item(BlueSkyCloudLog *log_item,
880 size_t len1, size_t len2, size_t len3)
882 BlueSkyFS *fs = log_item->fs;
883 /*const char *data1 = data;*/
884 const BlueSkyCloudID *data2
885 = (const BlueSkyCloudID *)(data + len1);
886 /*const BlueSkyCloudPointer *data3
887 = (const BlueSkyCloudPointer *)(data + len1 + len2);*/
889 bluesky_cloudlog_stats_update(log_item, -1);
890 bluesky_string_unref(log_item->data);
891 log_item->data = NULL;
892 log_item->location_flags = CLOUDLOG_JOURNAL;
893 bluesky_cloudlog_stats_update(log_item, 1);
896 memset(&id0, 0, sizeof(id0));
898 int link_count = len2 / sizeof(BlueSkyCloudID);
899 GArray *new_links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
900 for (int i = 0; i < link_count; i++) {
901 BlueSkyCloudID id = data2[i];
902 BlueSkyCloudLog *ref = NULL;
903 if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) != 0) {
904 g_mutex_lock(fs->lock);
905 ref = g_hash_table_lookup(fs->locations, &id);
907 bluesky_cloudlog_ref(ref);
909 g_mutex_unlock(fs->lock);
911 g_array_append_val(new_links, ref);
914 for (int i = 0; i < log_item->links->len; i++) {
915 BlueSkyCloudLog *c = g_array_index(log_item->links,
916 BlueSkyCloudLog *, i);
917 bluesky_cloudlog_unref(c);
919 g_array_unref(log_item->links);
920 log_item->links = new_links;
923 static void bluesky_replay_scan_journal2(BlueSkyFS *fs, GList **objects,
924 int log_seq, int start_offset,
925 const char *buf, size_t len)
927 const struct log_header *header;
928 off_t offset = start_offset;
930 while (validate_journal_item(buf, len, offset)) {
931 header = (const struct log_header *)(buf + offset);
932 g_print("In replay found valid item at offset %zd\n", offset);
933 size_t size = GUINT32_FROM_LE(header->size1)
934 + GUINT32_FROM_LE(header->size2)
935 + GUINT32_FROM_LE(header->size3);
937 BlueSkyCloudLog *log_item = bluesky_cloudlog_get(fs, header->id);
938 g_mutex_lock(log_item->lock);
939 *objects = g_list_prepend(*objects, log_item);
941 log_item->inum = GUINT64_FROM_LE(header->inum);
942 reload_item(log_item, buf + offset + sizeof(struct log_header),
943 GUINT32_FROM_LE(header->size1),
944 GUINT32_FROM_LE(header->size2),
945 GUINT32_FROM_LE(header->size3));
946 log_item->log_seq = log_seq;
947 log_item->log_offset = offset + sizeof(struct log_header);
948 log_item->log_size = header->size1;
950 bluesky_string_unref(log_item->data);
951 log_item->data = bluesky_string_new(g_memdup(buf + offset + sizeof(struct log_header), GUINT32_FROM_LE(header->size1)), GUINT32_FROM_LE(header->size1));
953 /* For any inodes which were read from the journal, deserialize the
954 * inode information, overwriting any old inode data. */
955 if (header->type - '0' == LOGTYPE_INODE) {
956 uint64_t inum = GUINT64_FROM_LE(header->inum);
958 g_mutex_lock(fs->lock);
959 inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum);
961 inode = bluesky_new_inode(inum, fs, BLUESKY_PENDING);
962 inode->change_count = 0;
963 bluesky_insert_inode(fs, inode);
965 g_mutex_lock(inode->lock);
966 bluesky_inode_free_resources(inode);
967 if (!bluesky_deserialize_inode(inode, log_item))
968 g_print("Error deserializing inode %"PRIu64"\n", inum);
969 fs->next_inum = MAX(fs->next_inum, inum + 1);
970 bluesky_list_unlink(&fs->accessed_list, inode->accessed_list);
971 inode->accessed_list = bluesky_list_prepend(&fs->accessed_list, inode);
972 bluesky_list_unlink(&fs->dirty_list, inode->dirty_list);
973 inode->dirty_list = bluesky_list_prepend(&fs->dirty_list, inode);
974 bluesky_list_unlink(&fs->unlogged_list, inode->unlogged_list);
975 inode->unlogged_list = NULL;
976 inode->change_cloud = inode->change_commit;
977 bluesky_cloudlog_ref(log_item);
978 bluesky_cloudlog_unref(inode->committed_item);
979 inode->committed_item = log_item;
980 g_mutex_unlock(inode->lock);
981 g_mutex_unlock(fs->lock);
983 bluesky_string_unref(log_item->data);
984 log_item->data = NULL;
985 g_mutex_unlock(log_item->lock);
987 offset += sizeof(struct log_header) + size + sizeof(struct log_footer);
991 void bluesky_replay(BlueSkyFS *fs)
993 BlueSkyLog *log = fs->log;
994 GList *logfiles = directory_contents(log->log_directory);
996 /* Scan through log files in reverse order to find the most recent commit
998 logfiles = g_list_reverse(logfiles);
999 uint32_t seq_num = 0, start_offset = 0;
1000 while (logfiles != NULL) {
1001 char *filename = g_strdup_printf("%s/%s", log->log_directory,
1002 (char *)logfiles->data);
1003 g_print("Scanning file %s\n", filename);
1004 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
1006 g_warning("Mapping logfile %s failed!\n", filename);
1008 bluesky_replay_scan_journal(g_mapped_file_get_contents(map),
1009 g_mapped_file_get_length(map),
1010 &seq_num, &start_offset);
1011 g_mapped_file_unref(map);
1015 g_free(logfiles->data);
1016 logfiles = g_list_delete_link(logfiles, logfiles);
1017 if (seq_num != 0 || start_offset != 0)
1020 g_list_foreach(logfiles, (GFunc)g_free, NULL);
1021 g_list_free(logfiles);
1023 /* Now, scan forward starting from the given point in the log to
1024 * reconstruct all filesystem state. As we reload objects we hold a
1025 * reference to each loaded object. At the end we free all these
1026 * references, so that any objects which were not linked into persistent
1027 * filesystem data structures are freed. */
1028 GList *objects = NULL;
1030 char *filename = g_strdup_printf("%s/journal-%08d",
1031 log->log_directory, seq_num);
1032 g_print("Replaying file %s from offset %d\n", filename, start_offset);
1033 GMappedFile *map = g_mapped_file_new(filename, FALSE, NULL);
1036 g_warning("Mapping logfile failed, assuming end of journal\n");
1040 bluesky_replay_scan_journal2(fs, &objects, seq_num, start_offset,
1041 g_mapped_file_get_contents(map),
1042 g_mapped_file_get_length(map));
1043 g_mapped_file_unref(map);
1048 while (objects != NULL) {
1049 bluesky_cloudlog_unref((BlueSkyCloudLog *)objects->data);
1050 objects = g_list_delete_link(objects, objects);