1 /* Blue Sky: File Systems in the Cloud
3 * Copyright (C) 2009 The Regents of the University of California
4 * Written by Michael Vrable <mvrable@cs.ucsd.edu>
14 #include "bluesky-private.h"
16 // Rough size limit for a log segment. This is not a firm limit and there are
17 // no absolute guarantees on the size of a log segment.
18 #define CLOUDLOG_SEGMENT_SIZE (4 << 20)
20 BlueSkyCloudID bluesky_cloudlog_new_id()
23 bluesky_crypt_random_bytes((uint8_t *)&id.bytes, sizeof(id));
27 gchar *bluesky_cloudlog_id_to_string(BlueSkyCloudID id)
29 char buf[sizeof(BlueSkyCloudID) * 2 + 1];
32 for (int i = 0; i < sizeof(BlueSkyCloudID); i++) {
33 sprintf(&buf[2*i], "%02x", (uint8_t)(id.bytes[i]));
39 BlueSkyCloudID bluesky_cloudlog_id_from_string(const gchar *idstr)
42 memset(&id, 0, sizeof(id));
43 for (int i = 0; i < 2*sizeof(BlueSkyCloudID); i++) {
46 g_warning("Short cloud id: %s\n", idstr);
50 if (c >= '0' && c <= '9')
52 else if (c >= 'a' && c <= 'f')
55 g_warning("Bad character in cloud id: %s\n", idstr);
56 id.bytes[i / 2] += val << (i % 2 ? 0 : 4);
61 gboolean bluesky_cloudlog_equal(gconstpointer a, gconstpointer b)
63 BlueSkyCloudID *id1 = (BlueSkyCloudID *)a, *id2 = (BlueSkyCloudID *)b;
65 return memcmp(id1, id2, sizeof(BlueSkyCloudID)) == 0;
68 guint bluesky_cloudlog_hash(gconstpointer a)
70 BlueSkyCloudID *id = (BlueSkyCloudID *)a;
72 // Assume that bits in the ID are randomly chosen so that any subset of the
73 // bits can be used as a hash key.
74 return *(guint *)(&id->bytes);
77 /* Formatting of cloud log segments. This handles grouping items together
78 * before writing a batch to the cloud, handling indirection through items like
79 * the inode map, etc. */
81 BlueSkyCloudLog *bluesky_cloudlog_new(BlueSkyFS *fs, const BlueSkyCloudID *id)
83 BlueSkyCloudLog *log = g_new0(BlueSkyCloudLog, 1);
85 log->lock = g_mutex_new();
86 log->cond = g_cond_new();
88 log->type = LOGTYPE_UNKNOWN;
90 memcpy(&log->id, id, sizeof(BlueSkyCloudID));
92 log->id = bluesky_cloudlog_new_id();
93 log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
94 g_atomic_int_set(&log->refcount, 1);
99 /* Helper function for updating memory usage statistics for a filesystem (the
100 * cache_log_* variables). This will increment (type=1) or decrement (type=-1)
101 * the counter associated with the current state of the cloud log item. The
102 * item should be locked or otherwise protected from concurrent access. */
103 void bluesky_cloudlog_stats_update(BlueSkyCloudLog *log, int type)
105 BlueSkyFS *fs = log->fs;
107 if (log->location_flags & CLOUDLOG_CLOUD) {
108 g_atomic_int_add(&fs->cache_log_cloud, type);
109 } else if (log->location_flags & CLOUDLOG_JOURNAL) {
110 g_atomic_int_add(&fs->cache_log_journal, type);
111 } else if (log->pending_write & CLOUDLOG_JOURNAL) {
112 g_atomic_int_add(&fs->cache_log_journal, type);
113 } else if (log->data != NULL) {
114 g_atomic_int_add(&fs->cache_log_dirty, type);
118 /* The reference held by the hash table does not count towards the reference
119 * count. When a new object is created, it initially has a reference count of
120 * 1 for the creator, and similarly fetching an item from the hash table will
121 * also create a reference. If the reference count drops to zero,
122 * bluesky_cloudlog_unref attempts to remove the object from the hash
123 * table--but there is a potential race since another thread might read the
124 * object from the hash table at the same time. So an object with a reference
125 * count of zero may still be resurrected, in which case we need to abort the
126 * destruction. Once the object is gone from the hash table, and if the
127 * reference count is still zero, it can actually be deleted. */
128 void bluesky_cloudlog_ref(BlueSkyCloudLog *log)
133 g_atomic_int_inc(&log->refcount);
136 void bluesky_cloudlog_unref(BlueSkyCloudLog *log)
141 if (g_atomic_int_dec_and_test(&log->refcount)) {
142 BlueSkyFS *fs = log->fs;
144 g_mutex_lock(fs->lock);
145 if (g_atomic_int_get(&log->refcount) > 0) {
146 g_mutex_unlock(fs->lock);
150 if (!g_hash_table_remove(fs->locations, &log->id)) {
152 g_warning("Could not find and remove cloud log item from hash table!");
154 g_mutex_unlock(fs->lock);
156 bluesky_cloudlog_stats_update(log, -1);
157 log->type = LOGTYPE_INVALID;
158 g_mutex_free(log->lock);
159 g_cond_free(log->cond);
160 for (int i = 0; i < log->links->len; i++) {
161 BlueSkyCloudLog *c = g_array_index(log->links,
162 BlueSkyCloudLog *, i);
163 bluesky_cloudlog_unref(c);
165 g_array_unref(log->links);
166 bluesky_string_unref(log->data);
171 /* For locking reasons cloudlog unrefs may sometimes need to be performed in
172 * the future. We launch a thread for handling these delayed unreference
174 static gpointer cloudlog_unref_thread(gpointer q)
176 GAsyncQueue *queue = (GAsyncQueue *)q;
179 BlueSkyCloudLog *item = (BlueSkyCloudLog *)g_async_queue_pop(queue);
180 bluesky_cloudlog_unref(item);
186 void bluesky_cloudlog_unref_delayed(BlueSkyCloudLog *log)
189 g_async_queue_push(log->fs->unref_queue, log);
192 void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
194 fs->unref_queue = g_async_queue_new();
195 g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
198 /* Erase the information contained within the in-memory cloud log
199 * representation. This does not free up the item itself, but frees the data
200 * and references to other log items and resets the type back to unknown. If
201 * the object was written out to persistent storage, all state about it can be
202 * recovered by loading the object back in. The object must be locked before
203 * calling this function. */
204 void bluesky_cloudlog_erase(BlueSkyCloudLog *log)
206 g_assert(log->data_lock_count == 0);
208 if (log->type == LOGTYPE_UNKNOWN)
211 log->type = LOGTYPE_UNKNOWN;
213 bluesky_string_unref(log->data);
215 log->data_lock_count = 0;
217 for (int i = 0; i < log->links->len; i++) {
218 BlueSkyCloudLog *c = g_array_index(log->links,
219 BlueSkyCloudLog *, i);
220 bluesky_cloudlog_unref(c);
222 g_array_unref(log->links);
223 log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
226 /* Start a write of the object to the local log. */
227 void bluesky_cloudlog_sync(BlueSkyCloudLog *log)
229 bluesky_log_item_submit(log, log->fs->log);
232 /* Add the given entry to the global hash table containing cloud log entries.
233 * Takes ownership of the caller's reference. */
234 void bluesky_cloudlog_insert_locked(BlueSkyCloudLog *log)
236 g_hash_table_insert(log->fs->locations, &log->id, log);
239 void bluesky_cloudlog_insert(BlueSkyCloudLog *log)
241 g_mutex_lock(log->fs->lock);
242 bluesky_cloudlog_insert_locked(log);
243 g_mutex_unlock(log->fs->lock);
246 /* Look up the cloud log entry for the given ID. If create is TRUE and the
247 * item does not exist, create a special pending entry that can later be filled
248 * in when the real item is loaded. The returned item has a reference held.
249 * As a special case, if a null ID is provided then NULL is returned. */
250 BlueSkyCloudLog *bluesky_cloudlog_get(BlueSkyFS *fs, BlueSkyCloudID id)
252 static BlueSkyCloudID id0 = {{0}};
254 if (memcmp(&id, &id0, sizeof(BlueSkyCloudID)) == 0)
257 g_mutex_lock(fs->lock);
258 BlueSkyCloudLog *item;
259 item = g_hash_table_lookup(fs->locations, &id);
261 item = bluesky_cloudlog_new(fs, &id);
262 bluesky_cloudlog_stats_update(item, 1);
263 bluesky_cloudlog_insert_locked(item);
265 bluesky_cloudlog_ref(item);
267 g_mutex_unlock(fs->lock);
271 /* Attempt to prefetch a cloud log item. This does not guarantee that it will
272 * be made available, but does make it more likely that a future call to
273 * bluesky_cloudlog_fetch will complete quickly. Item must be locked? */
274 void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item)
276 if (item->data != NULL)
279 /* TODO: Some of the code here is duplicated with bluesky_log_map_object.
280 * Refactor to fix that. */
281 BlueSkyFS *fs = item->fs;
282 BlueSkyCacheFile *map = NULL;
284 /* First, check to see if the journal still contains a copy of the item and
285 * if so update the atime on the journal so it is likely to be kept around
286 * until we need it. */
287 if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
288 map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE);
290 map->atime = bluesky_get_current_time();
291 bluesky_cachefile_unref(map);
292 g_mutex_unlock(map->lock);
297 item->location_flags &= ~CLOUDLOG_JOURNAL;
298 if (!(item->location_flags & CLOUDLOG_CLOUD))
301 map = bluesky_cachefile_lookup(fs,
302 item->location.directory,
303 item->location.sequence,
308 /* At this point, we have information about the log segment containing the
309 * item we need. If our item is already fetched, we have nothing to do
310 * except update the atime. If not, queue up a fetch of our object. */
311 const BlueSkyRangesetItem *rangeitem;
312 rangeitem = bluesky_rangeset_lookup(map->items,
313 item->location.offset);
314 if (rangeitem == NULL) {
315 if (map->prefetches == NULL)
316 map->prefetches = bluesky_rangeset_new();
318 gchar *id = bluesky_cloudlog_id_to_string(item->id);
320 g_print("Need to prefetch %s\n", id);
323 bluesky_rangeset_insert(map->prefetches,
324 item->location.offset,
325 item->location.size, NULL);
327 uint64_t start, length;
328 bluesky_rangeset_get_extents(map->prefetches, &start, &length);
330 g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n",
334 bluesky_cachefile_unref(map);
335 g_mutex_unlock(map->lock);
338 /* Ensure that a cloud log item is loaded in memory, and if not read it in.
339 * TODO: Make asynchronous, and make this also fetch from the cloud. Right now
340 * we only read from the log. Log item must be locked. */
341 void bluesky_cloudlog_fetch(BlueSkyCloudLog *log)
343 if (log->data != NULL)
346 BlueSkyProfile *profile = bluesky_profile_get();
348 bluesky_profile_add_event(profile, g_strdup_printf("Fetch log entry"));
350 /* There are actually two cases: a full deserialization if we have not ever
351 * read the object before, and a partial deserialization where the metadata
352 * is already in memory and we just need to remap the data. If the object
353 * type has not yet been set, we'll need to read and parse the metadata.
354 * Once that is done, we can fall through the case of remapping the data
356 if (log->type == LOGTYPE_UNKNOWN) {
357 BlueSkyRCStr *raw = bluesky_log_map_object(log, FALSE);
358 g_assert(raw != NULL);
359 bluesky_deserialize_cloudlog(log, raw->data, raw->len);
360 bluesky_string_unref(raw);
363 /* At this point all metadata should be available and we need only remap
364 * the object data. */
365 log->data = bluesky_log_map_object(log, TRUE);
367 if (log->data == NULL) {
368 g_error("Unable to fetch cloudlog entry!");
372 bluesky_profile_add_event(profile, g_strdup_printf("Fetch complete"));
373 g_cond_broadcast(log->cond);
376 BlueSkyCloudPointer bluesky_cloudlog_serialize(BlueSkyCloudLog *log,
379 BlueSkyCloudLogState *state = fs->log_state;
381 if ((log->location_flags | log->pending_write) & CLOUDLOG_CLOUD) {
382 return log->location;
385 for (int i = 0; i < log->links->len; i++) {
386 BlueSkyCloudLog *ref = g_array_index(log->links,
387 BlueSkyCloudLog *, i);
389 bluesky_cloudlog_serialize(ref, fs);
392 /* FIXME: Ought lock to be taken earlier? */
393 g_mutex_lock(log->lock);
394 bluesky_cloudlog_fetch(log);
395 g_assert(log->data != NULL);
397 bluesky_cloudlog_stats_update(log, -1);
399 GString *data1 = g_string_new("");
400 GString *data2 = g_string_new("");
401 GString *data3 = g_string_new("");
402 bluesky_serialize_cloudlog(log, data1, data2, data3);
404 log->location = state->location;
405 log->location.offset = state->data->len;
406 log->data_size = data1->len;
408 struct cloudlog_header header;
409 memcpy(header.magic, CLOUDLOG_MAGIC, 4);
410 memset(header.crypt_auth, sizeof(header.crypt_auth), 0);
411 memset(header.crypt_iv, sizeof(header.crypt_iv), 0);
412 header.type = log->type + '0';
413 header.size1 = GUINT32_TO_LE(data1->len);
414 header.size2 = GUINT32_TO_LE(data2->len);
415 header.size3 = GUINT32_TO_LE(data3->len);
417 header.inum = GUINT64_TO_LE(log->inum);
419 g_string_append_len(state->data, (const char *)&header, sizeof(header));
420 g_string_append_len(state->data, data1->str, data1->len);
421 g_string_append_len(state->data, data2->str, data2->len);
422 g_string_append_len(state->data, data3->str, data3->len);
424 log->location.size = state->data->len - log->location.offset;
426 g_string_free(data1, TRUE);
427 g_string_free(data2, TRUE);
428 g_string_free(data3, TRUE);
430 /* If the object we flushed was an inode, update the inode map. */
431 if (log->type == LOGTYPE_INODE) {
432 g_mutex_lock(fs->lock);
433 InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map,
435 bluesky_cloudlog_unref_delayed(entry->item);
437 bluesky_cloudlog_ref(entry->item);
438 g_mutex_unlock(fs->lock);
441 /* TODO: We should mark the objects as committed on the cloud until the
442 * data is flushed and acknowledged. */
443 log->pending_write |= CLOUDLOG_CLOUD;
444 bluesky_cloudlog_stats_update(log, 1);
445 state->writeback_list = g_slist_prepend(state->writeback_list, log);
446 bluesky_cloudlog_ref(log);
447 g_mutex_unlock(log->lock);
449 if (state->data->len > CLOUDLOG_SEGMENT_SIZE)
450 bluesky_cloudlog_flush(fs);
452 return log->location;
455 static void cloudlog_flush_complete(BlueSkyStoreAsync *async,
456 SerializedRecord *record)
458 g_print("Write of %s to cloud complete, status = %d\n",
459 async->key, async->result);
461 g_mutex_lock(record->lock);
462 if (async->result >= 0) {
463 while (record->items != NULL) {
464 BlueSkyCloudLog *item = (BlueSkyCloudLog *)record->items->data;
465 g_mutex_lock(item->lock);
466 bluesky_cloudlog_stats_update(item, -1);
467 item->pending_write &= ~CLOUDLOG_CLOUD;
468 item->location_flags |= CLOUDLOG_CLOUD;
469 bluesky_cloudlog_stats_update(item, 1);
470 g_mutex_unlock(item->lock);
471 bluesky_cloudlog_unref(item);
473 record->items = g_slist_delete_link(record->items, record->items);
476 bluesky_string_unref(record->data);
478 g_slist_free(record->items);
479 record->items = NULL;
480 record->complete = TRUE;
481 g_cond_broadcast(record->cond);
483 g_print("Write should be resubmitted...\n");
485 BlueSkyStoreAsync *async2 = bluesky_store_async_new(async->store);
486 async2->op = STORE_OP_PUT;
487 async2->key = g_strdup(async->key);
488 async2->data = record->data;
489 async2->profile = async->profile;
490 bluesky_string_ref(record->data);
491 bluesky_store_async_submit(async2);
492 bluesky_store_async_add_notifier(async2,
493 (GFunc)cloudlog_flush_complete,
495 bluesky_store_async_unref(async2);
497 g_mutex_unlock(record->lock);
500 /* Finish up a partially-written cloud log segment and flush it to storage. */
501 void bluesky_cloudlog_flush(BlueSkyFS *fs)
503 BlueSkyCloudLogState *state = fs->log_state;
504 if (state->data == NULL || state->data->len == 0)
507 /* TODO: Append some type of commit record to the log segment? */
509 g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
510 SerializedRecord *record = g_new0(SerializedRecord, 1);
511 bluesky_cloudlog_encrypt(state->data, fs->keys);
512 record->data = bluesky_string_new_from_gstring(state->data);
513 record->items = state->writeback_list;
514 record->lock = g_mutex_new();
515 record->cond = g_cond_new();
516 state->writeback_list = NULL;
518 BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
519 async->op = STORE_OP_PUT;
520 async->key = g_strdup_printf("log-%08d-%08d",
521 state->location.directory,
522 state->location.sequence);
523 async->data = record->data;
524 bluesky_string_ref(record->data);
525 bluesky_store_async_submit(async);
526 bluesky_store_async_add_notifier(async,
527 (GFunc)cloudlog_flush_complete,
529 bluesky_store_async_unref(async);
531 state->pending_segments = g_list_prepend(state->pending_segments, record);
533 state->location.sequence++;
534 state->location.offset = 0;
535 state->data = g_string_new("");
538 /* Make an encryption pass over a cloud log segment to encrypt private data in
540 void bluesky_cloudlog_encrypt(GString *segment, BlueSkyCryptKeys *keys)
542 char *data = segment->str;
543 size_t remaining_size = segment->len;
545 while (remaining_size >= sizeof(struct cloudlog_header)) {
546 struct cloudlog_header *header = (struct cloudlog_header *)data;
547 size_t item_size = sizeof(struct cloudlog_header)
548 + GUINT32_FROM_LE(header->size1)
549 + GUINT32_FROM_LE(header->size2)
550 + GUINT32_FROM_LE(header->size3);
551 if (item_size > remaining_size)
553 bluesky_crypt_block_encrypt(data, item_size, keys);
556 remaining_size -= item_size;
560 /* Make an decryption pass over a cloud log segment to decrypt items which were
561 * encrypted. Also computes a list of all offsets which at which valid
562 * cloud log items are found and adds those offsets to items (if non-NULL).
564 * If allow_unauth is set to true, then allow a limited set of unauthenticated
565 * items that may have been rewritten by a file system cleaner. These include
566 * the checkpoint and inode map records only; other items must still pass
568 void bluesky_cloudlog_decrypt(char *segment, size_t len,
569 BlueSkyCryptKeys *keys,
570 BlueSkyRangeset *items,
571 gboolean allow_unauth)
573 char *data = segment;
574 size_t remaining_size = len;
577 while (remaining_size >= sizeof(struct cloudlog_header)) {
578 struct cloudlog_header *header = (struct cloudlog_header *)data;
579 size_t item_size = sizeof(struct cloudlog_header)
580 + GUINT32_FROM_LE(header->size1)
581 + GUINT32_FROM_LE(header->size2)
582 + GUINT32_FROM_LE(header->size3);
583 if (item_size > remaining_size)
585 if (bluesky_crypt_block_decrypt(data, item_size, keys, allow_unauth)) {
588 g_print(" data item at %zx\n", offset);
589 bluesky_rangeset_insert(items, offset, item_size,
590 GINT_TO_POINTER(TRUE));
593 g_warning("Unauthenticated data at offset %zd", offset);
595 bluesky_rangeset_insert(items, offset, item_size,
596 GINT_TO_POINTER(TRUE));
602 remaining_size -= item_size;