cachefile->filename);
async->data = bluesky_string_dup(async->data);
bluesky_cloudlog_decrypt(async->data->data, async->data->len,
- cachefile->fs->keys);
+ cachefile->fs->keys, cachefile->items);
if (!g_file_set_contents(pathname, async->data->data, async->data->len,
NULL))
g_print("Error writing out fetched file to cache!\n");
map->log = log;
g_atomic_int_set(&map->mapcount, 0);
g_atomic_int_set(&map->refcount, 0);
+ map->items = bluesky_rangeset_new();
g_hash_table_insert(log->mmap_cache, map->filename, map);
return map;
}
+/* The arguments are mostly straightforward. log_dir is -1 for access from the
+ * journal, and non-negative for access to a cloud log segment. map_data
+ * should be TRUE for the case that are mapping just the data of an item where
+ * we have already parsed the item headers; this surpresses the error when the
+ * access is not to the first bytes of the item. */
BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
- int log_seq, int log_offset, int log_size)
+ int log_seq, int log_offset, int log_size,
+ gboolean map_data)
{
if (page_size == 0) {
page_size = getpagesize();
return NULL;
}
+ /* Log segments fetched from the cloud might only be partially-fetched.
+ * Check whether the object we are interested in is available. */
+ if (log_dir >= 0) {
+ const BlueSkyRangesetItem *rangeitem;
+ rangeitem = bluesky_rangeset_lookup(map->items, log_offset);
+ if (rangeitem == NULL || rangeitem->start != log_offset) {
+ g_warning("log-%d: Item at offset %d does not seem to be available\n", log_seq, log_offset);
+ }
+ if (map_data && rangeitem != NULL
+ && log_offset > rangeitem->start
+ && log_size <= rangeitem->length - (log_offset - rangeitem->start))
+ {
+ g_warning(" ...allowing access to middle of log item");
+ }
+ }
+
if (map->addr == NULL) {
while (!map->ready && map->fetching) {
g_print("Waiting for log segment to be fetched from cloud...\n");