return;
}
- g_hash_table_remove(fs->locations, &log->id);
+ if (!g_hash_table_remove(fs->locations, &log->id)) {
+ if (bluesky_verbose)
+ g_warning("Could not find and remove cloud log item from hash table!");
+ }
g_mutex_unlock(fs->lock);
bluesky_cloudlog_stats_update(log, -1);
}
}
+/* For locking reasons cloudlog unrefs may sometimes need to be performed in
+ * the future. We launch a thread for handling these delayed unreference
+ * requests. */
+static gpointer cloudlog_unref_thread(gpointer q)
+{
+ GAsyncQueue *queue = (GAsyncQueue *)q;
+
+ while (TRUE) {
+ BlueSkyCloudLog *item = (BlueSkyCloudLog *)g_async_queue_pop(queue);
+ bluesky_cloudlog_unref(item);
+ }
+
+ return NULL;
+}
+
+void bluesky_cloudlog_unref_delayed(BlueSkyCloudLog *log)
+{
+ if (log != NULL)
+ g_async_queue_push(log->fs->unref_queue, log);
+}
+
+void bluesky_cloudlog_threads_init(BlueSkyFS *fs)
+{
+ fs->unref_queue = g_async_queue_new();
+ g_thread_create(cloudlog_unref_thread, fs->unref_queue, FALSE, NULL);
+}
+
+/* Erase the information contained within the in-memory cloud log
+ * representation. This does not free up the item itself, but frees the data
+ * and references to other log items and resets the type back to unknown. If
+ * the object was written out to persistent storage, all state about it can be
+ * recovered by loading the object back in. The object must be locked before
+ * calling this function. */
+void bluesky_cloudlog_erase(BlueSkyCloudLog *log)
+{
+ g_assert(log->data_lock_count == 0);
+
+ if (log->type == LOGTYPE_UNKNOWN)
+ return;
+
+ log->type = LOGTYPE_UNKNOWN;
+ log->data_size = 0;
+ bluesky_string_unref(log->data);
+ log->data = NULL;
+ log->data_lock_count = 0;
+
+ for (int i = 0; i < log->links->len; i++) {
+ BlueSkyCloudLog *c = g_array_index(log->links,
+ BlueSkyCloudLog *, i);
+ bluesky_cloudlog_unref(c);
+ }
+ g_array_unref(log->links);
+ log->links = g_array_new(FALSE, TRUE, sizeof(BlueSkyCloudLog *));
+}
+
/* Start a write of the object to the local log. */
void bluesky_cloudlog_sync(BlueSkyCloudLog *log)
{
/* Add the given entry to the global hash table containing cloud log entries.
* Takes ownership of the caller's reference. */
+void bluesky_cloudlog_insert_locked(BlueSkyCloudLog *log)
+{
+ g_hash_table_insert(log->fs->locations, &log->id, log);
+}
+
void bluesky_cloudlog_insert(BlueSkyCloudLog *log)
{
g_mutex_lock(log->fs->lock);
- g_hash_table_insert(log->fs->locations, &log->id, log);
+ bluesky_cloudlog_insert_locked(log);
g_mutex_unlock(log->fs->lock);
}
item = g_hash_table_lookup(fs->locations, &id);
if (item == NULL) {
item = bluesky_cloudlog_new(fs, &id);
- g_hash_table_insert(fs->locations, &id, item);
+ bluesky_cloudlog_stats_update(item, 1);
+ bluesky_cloudlog_insert_locked(item);
} else {
bluesky_cloudlog_ref(item);
}
return item;
}
+/* Attempt to prefetch a cloud log item. This does not guarantee that it will
+ * be made available, but does make it more likely that a future call to
+ * bluesky_cloudlog_fetch will complete quickly. Item must be locked? */
+void bluesky_cloudlog_prefetch(BlueSkyCloudLog *item)
+{
+ if (item->data != NULL)
+ return;
+
+ /* TODO: Some of the code here is duplicated with bluesky_log_map_object.
+ * Refactor to fix that. */
+ BlueSkyFS *fs = item->fs;
+ BlueSkyCacheFile *map = NULL;
+
+ /* First, check to see if the journal still contains a copy of the item and
+ * if so update the atime on the journal so it is likely to be kept around
+ * until we need it. */
+ if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
+ map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE);
+ if (map != NULL) {
+ map->atime = bluesky_get_current_time();
+ bluesky_cachefile_unref(map);
+ g_mutex_unlock(map->lock);
+ return;
+ }
+ }
+
+ item->location_flags &= ~CLOUDLOG_JOURNAL;
+ if (!(item->location_flags & CLOUDLOG_CLOUD))
+ return;
+
+ map = bluesky_cachefile_lookup(fs,
+ item->location.directory,
+ item->location.sequence,
+ FALSE);
+ if (map == NULL)
+ return;
+
+ /* At this point, we have information about the log segment containing the
+ * item we need. If our item is already fetched, we have nothing to do
+ * except update the atime. If not, queue up a fetch of our object. */
+ const BlueSkyRangesetItem *rangeitem;
+ rangeitem = bluesky_rangeset_lookup(map->items,
+ item->location.offset);
+ if (rangeitem == NULL) {
+ if (map->prefetches == NULL)
+ map->prefetches = bluesky_rangeset_new();
+
+ gchar *id = bluesky_cloudlog_id_to_string(item->id);
+ if (bluesky_verbose)
+ g_print("Need to prefetch %s\n", id);
+ g_free(id);
+
+ bluesky_rangeset_insert(map->prefetches,
+ item->location.offset,
+ item->location.size, NULL);
+
+ uint64_t start, length;
+ bluesky_rangeset_get_extents(map->prefetches, &start, &length);
+ if (bluesky_verbose)
+ g_print("Range to prefetch: %"PRIu64" + %"PRIu64"\n",
+ start, length);
+ }
+
+ bluesky_cachefile_unref(map);
+ g_mutex_unlock(map->lock);
+}
+
/* Ensure that a cloud log item is loaded in memory, and if not read it in.
* TODO: Make asynchronous, and make this also fetch from the cloud. Right now
* we only read from the log. Log item must be locked. */
if (log->data != NULL)
return;
+ BlueSkyProfile *profile = bluesky_profile_get();
+ if (profile != NULL)
+ bluesky_profile_add_event(profile, g_strdup_printf("Fetch log entry"));
+
/* There are actually two cases: a full deserialization if we have not ever
* read the object before, and a partial deserialization where the metadata
* is already in memory and we just need to remap the data. If the object
* Once that is done, we can fall through the case of remapping the data
* itself. */
if (log->type == LOGTYPE_UNKNOWN) {
- BlueSkyRCStr *raw = NULL;
- if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
- raw = bluesky_log_map_object(log->fs, -1, log->log_seq,
- log->log_offset, log->log_size);
- }
-
- if (raw == NULL && (log->location_flags & CLOUDLOG_CLOUD)) {
- log->location_flags &= ~CLOUDLOG_JOURNAL;
- raw = bluesky_log_map_object(log->fs,
- log->location.directory,
- log->location.sequence,
- log->location.offset,
- log->location.size);
- }
-
+ BlueSkyRCStr *raw = bluesky_log_map_object(log, FALSE);
g_assert(raw != NULL);
bluesky_deserialize_cloudlog(log, raw->data, raw->len);
bluesky_string_unref(raw);
/* At this point all metadata should be available and we need only remap
* the object data. */
-
- int offset;
- if ((log->location_flags | log->pending_write) & CLOUDLOG_JOURNAL) {
- bluesky_cloudlog_stats_update(log, -1);
- offset = log->log_offset + sizeof(struct log_header);
- log->data = bluesky_log_map_object(log->fs, -1, log->log_seq,
- offset, log->data_size);
- bluesky_cloudlog_stats_update(log, 1);
- }
-
- if (log->data == NULL && (log->location_flags & CLOUDLOG_CLOUD)) {
- log->location_flags &= ~CLOUDLOG_JOURNAL;
- bluesky_cloudlog_stats_update(log, -1);
- offset = log->location.offset + sizeof(struct cloudlog_header);
- log->data = bluesky_log_map_object(log->fs, log->location.directory,
- log->location.sequence,
- offset, log->data_size);
- bluesky_cloudlog_stats_update(log, 1);
- }
+ log->data = bluesky_log_map_object(log, TRUE);
if (log->data == NULL) {
g_error("Unable to fetch cloudlog entry!");
}
+ if (profile != NULL)
+ bluesky_profile_add_event(profile, g_strdup_printf("Fetch complete"));
g_cond_broadcast(log->cond);
}
bluesky_cloudlog_serialize(ref, fs);
}
+ /* FIXME: Ought lock to be taken earlier? */
g_mutex_lock(log->lock);
bluesky_cloudlog_fetch(log);
g_assert(log->data != NULL);
GString *data3 = g_string_new("");
bluesky_serialize_cloudlog(log, data1, data2, data3);
- /* TODO: Right now offset/size are set to the raw data, but we should add
- * header parsing to the code which loads objects back in. */
log->location = state->location;
log->location.offset = state->data->len;
log->data_size = data1->len;
struct cloudlog_header header;
memcpy(header.magic, CLOUDLOG_MAGIC, 4);
+ memset(header.crypt_auth, sizeof(header.crypt_auth), 0);
+ memset(header.crypt_iv, sizeof(header.crypt_iv), 0);
header.type = log->type + '0';
header.size1 = GUINT32_TO_LE(data1->len);
header.size2 = GUINT32_TO_LE(data2->len);
header.size3 = GUINT32_TO_LE(data3->len);
header.id = log->id;
+ header.inum = GUINT64_TO_LE(log->inum);
g_string_append_len(state->data, (const char *)&header, sizeof(header));
g_string_append_len(state->data, data1->str, data1->len);
log->location.size = state->data->len - log->location.offset;
+ g_string_free(data1, TRUE);
+ g_string_free(data2, TRUE);
+ g_string_free(data3, TRUE);
+
/* If the object we flushed was an inode, update the inode map. */
if (log->type == LOGTYPE_INODE) {
g_mutex_lock(fs->lock);
InodeMapEntry *entry = bluesky_inode_map_lookup(fs->inode_map,
log->inum, 1);
- entry->id = log->id;
- entry->location = log->location;
+ bluesky_cloudlog_unref_delayed(entry->item);
+ entry->item = log;
+ bluesky_cloudlog_ref(entry->item);
g_mutex_unlock(fs->lock);
}
async2->op = STORE_OP_PUT;
async2->key = g_strdup(async->key);
async2->data = record->data;
+ async2->profile = async->profile;
bluesky_string_ref(record->data);
bluesky_store_async_submit(async2);
bluesky_store_async_add_notifier(async2,
g_print("Serializing %zd bytes of data to cloud\n", state->data->len);
SerializedRecord *record = g_new0(SerializedRecord, 1);
+ bluesky_cloudlog_encrypt(state->data, fs->keys);
record->data = bluesky_string_new_from_gstring(state->data);
record->items = state->writeback_list;
record->lock = g_mutex_new();
state->location.offset = 0;
state->data = g_string_new("");
}
+
+/* Make an encryption pass over a cloud log segment to encrypt private data in
+ * it. */
+void bluesky_cloudlog_encrypt(GString *segment, BlueSkyCryptKeys *keys)
+{
+ char *data = segment->str;
+ size_t remaining_size = segment->len;
+
+ while (remaining_size >= sizeof(struct cloudlog_header)) {
+ struct cloudlog_header *header = (struct cloudlog_header *)data;
+ size_t item_size = sizeof(struct cloudlog_header)
+ + GUINT32_FROM_LE(header->size1)
+ + GUINT32_FROM_LE(header->size2)
+ + GUINT32_FROM_LE(header->size3);
+ if (item_size > remaining_size)
+ break;
+ bluesky_crypt_block_encrypt(data, item_size, keys);
+
+ data += item_size;
+ remaining_size -= item_size;
+ }
+}
+
+/* Make an decryption pass over a cloud log segment to decrypt items which were
+ * encrypted. Also computes a list of all offsets which at which valid
+ * cloud log items are found and adds those offsets to items (if non-NULL).
+ *
+ * If allow_unauth is set to true, then allow a limited set of unauthenticated
+ * items that may have been rewritten by a file system cleaner. These include
+ * the checkpoint and inode map records only; other items must still pass
+ * authentication. */
+void bluesky_cloudlog_decrypt(char *segment, size_t len,
+ BlueSkyCryptKeys *keys,
+ BlueSkyRangeset *items,
+ gboolean allow_unauth)
+{
+ char *data = segment;
+ size_t remaining_size = len;
+ size_t offset = 0;
+
+ while (remaining_size >= sizeof(struct cloudlog_header)) {
+ struct cloudlog_header *header = (struct cloudlog_header *)data;
+ size_t item_size = sizeof(struct cloudlog_header)
+ + GUINT32_FROM_LE(header->size1)
+ + GUINT32_FROM_LE(header->size2)
+ + GUINT32_FROM_LE(header->size3);
+ if (item_size > remaining_size)
+ break;
+ if (bluesky_crypt_block_decrypt(data, item_size, keys, allow_unauth)) {
+ if (items != NULL) {
+ if (bluesky_verbose)
+ g_print(" data item at %zx\n", offset);
+ bluesky_rangeset_insert(items, offset, item_size,
+ GINT_TO_POINTER(TRUE));
+ }
+ } else {
+ g_warning("Unauthenticated data at offset %zd", offset);
+ if (items != NULL) {
+ bluesky_rangeset_insert(items, offset, item_size,
+ GINT_TO_POINTER(TRUE));
+ }
+ }
+
+ data += item_size;
+ offset += item_size;
+ remaining_size -= item_size;
+ }
+}