Implement fetching of cloud log items via range requests.
[bluesky.git] / bluesky / log.c
index 38606c1..88b95df 100644 (file)
@@ -110,7 +110,8 @@ static gboolean log_open(BlueSkyLog *log)
         }
     }
 
-    log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num);
+    log->current_log = bluesky_cachefile_lookup(log->fs, -1, log->seq_num,
+                                                FALSE);
     g_assert(log->current_log != NULL);
     g_mutex_unlock(log->current_log->lock);
 
@@ -354,59 +355,13 @@ void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile)
     g_atomic_int_add(&cachefile->refcount, -1);
 }
 
-static void cloudlog_fetch_complete(BlueSkyStoreAsync *async,
-                                    BlueSkyCacheFile *cachefile);
-
-static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile)
-{
-    g_atomic_int_inc(&cachefile->refcount);
-    cachefile->fetching = TRUE;
-    g_print("Starting fetch of %s from cloud\n", cachefile->filename);
-    BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
-    async->op = STORE_OP_GET;
-    async->key = g_strdup(cachefile->filename);
-    bluesky_store_async_add_notifier(async,
-                                     (GFunc)cloudlog_fetch_complete,
-                                     cachefile);
-    bluesky_store_async_submit(async);
-    bluesky_store_async_unref(async);
-}
-
-static void cloudlog_fetch_complete(BlueSkyStoreAsync *async,
-                                    BlueSkyCacheFile *cachefile)
-{
-    g_print("Fetch of %s from cloud complete, status = %d\n",
-            async->key, async->result);
-
-    g_mutex_lock(cachefile->lock);
-    if (async->result >= 0) {
-        char *pathname = g_strdup_printf("%s/%s",
-                                         cachefile->log->log_directory,
-                                         cachefile->filename);
-        async->data = bluesky_string_dup(async->data);
-        bluesky_cloudlog_decrypt(async->data->data, async->data->len,
-                                 cachefile->fs->keys, cachefile->items);
-        if (!g_file_set_contents(pathname, async->data->data, async->data->len,
-                                 NULL))
-            g_print("Error writing out fetched file to cache!\n");
-        g_free(pathname);
-
-        cachefile->fetching = FALSE;
-        cachefile->ready = TRUE;
-    } else {
-        g_print("Error fetching from cloud, retrying...\n");
-        cloudlog_fetch_start(cachefile);
-    }
-
-    bluesky_cachefile_unref(cachefile);
-    g_cond_broadcast(cachefile->cond);
-    g_mutex_unlock(cachefile->lock);
-}
+static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile);
 
 /* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
  * Returns the object in the locked state and with a reference taken. */
 BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
-                                           int clouddir, int log_seq)
+                                           int clouddir, int log_seq,
+                                           gboolean start_fetch)
 {
     if (page_size == 0) {
         page_size = getpagesize();
@@ -455,8 +410,14 @@ BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
 
         g_hash_table_insert(log->mmap_cache, map->filename, map);
 
+        int fd = openat(log->dirfd, logname, O_WRONLY | O_CREAT, 0600);
+        if (fd >= 0) {
+            ftruncate(fd, 5 << 20);     // FIXME
+            close(fd);
+        }
+
         // If the log file is stored in the cloud, we may need to fetch it
-        if (clouddir >= 0)
+        if (clouddir >= 0 && start_fetch)
             cloudlog_fetch_start(map);
     } else {
         g_mutex_lock(map->lock);
@@ -468,55 +429,164 @@ BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
     return map;
 }
 
+static void robust_pwrite(int fd, const char *buf, ssize_t count, off_t offset)
+{
+    while (count > 0) {
+        ssize_t written = pwrite(fd, buf, count, offset);
+        if (written < 0) {
+            if (errno == EINTR)
+                continue;
+            g_warning("pwrite failure: %m");
+            return;
+        }
+        buf += written;
+        count -= written;
+        offset += written;
+    }
+}
+
+static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
+                                            BlueSkyCacheFile *cachefile);
+
+static void cloudlog_partial_fetch_start(BlueSkyCacheFile *cachefile,
+                                         size_t offset, size_t length)
+{
+    g_atomic_int_inc(&cachefile->refcount);
+    g_print("Starting fetch of %s from cloud\n", cachefile->filename);
+    BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
+    async->op = STORE_OP_GET;
+    async->key = g_strdup(cachefile->filename);
+    async->start = offset;
+    async->len = length;
+    bluesky_store_async_add_notifier(async,
+                                     (GFunc)cloudlog_partial_fetch_complete,
+                                     cachefile);
+    bluesky_store_async_submit(async);
+    bluesky_store_async_unref(async);
+}
+
+static void cloudlog_partial_fetch_complete(BlueSkyStoreAsync *async,
+                                            BlueSkyCacheFile *cachefile)
+{
+    g_print("Partial fetch of %s from cloud complete, status = %d\n",
+            async->key, async->result);
+
+    g_mutex_lock(cachefile->lock);
+    if (async->result >= 0) {
+        /* Descrypt items fetched and write valid items out to the local log,
+         * but only if they do not overlap existing objects.  This will protect
+         * against an attack by the cloud provider where one valid object is
+         * moved to another offset and used to overwrite data that we already
+         * have fetched. */
+        BlueSkyRangeset *items = bluesky_rangeset_new();
+        int fd = openat(cachefile->log->dirfd, cachefile->filename, O_WRONLY);
+        if (fd >= 0) {
+            async->data = bluesky_string_dup(async->data);
+            bluesky_cloudlog_decrypt(async->data->data, async->data->len,
+                                     cachefile->fs->keys, items);
+            uint64_t item_offset = 0;
+            while (TRUE) {
+                const BlueSkyRangesetItem *item;
+                item = bluesky_rangeset_lookup_next(items, item_offset);
+                if (item == NULL)
+                    break;
+                g_print("  item offset from range request: %d\n",
+                        (int)(item->start + async->start));
+                if (bluesky_rangeset_insert(cachefile->items,
+                                            async->start + item->start,
+                                            item->length, item->data))
+                {
+                    robust_pwrite(fd, async->data->data + item->start,
+                                  item->length, async->start + item->start);
+                } else {
+                    g_print("    item overlaps existing data!\n");
+                }
+                item_offset = item->start + 1;
+            }
+            /* TODO: Iterate over items and merge into cached file. */
+            close(fd);
+        } else {
+            g_warning("Unable to open and write to cache file %s: %m",
+                      cachefile->filename);
+        }
+    } else {
+        g_print("Error fetching from cloud, retrying...\n");
+        cloudlog_partial_fetch_start(cachefile, async->start, async->len);
+    }
+
+    bluesky_cachefile_unref(cachefile);
+    g_cond_broadcast(cachefile->cond);
+    g_mutex_unlock(cachefile->lock);
+}
+
+static void cloudlog_fetch_start(BlueSkyCacheFile *cachefile)
+{
+    g_atomic_int_inc(&cachefile->refcount);
+    cachefile->fetching = TRUE;
+    g_print("Starting fetch of %s from cloud\n", cachefile->filename);
+    BlueSkyStoreAsync *async = bluesky_store_async_new(cachefile->fs->store);
+    async->op = STORE_OP_GET;
+    async->key = g_strdup(cachefile->filename);
+    bluesky_store_async_add_notifier(async,
+                                     (GFunc)cloudlog_partial_fetch_complete,
+                                     cachefile);
+    bluesky_store_async_submit(async);
+    bluesky_store_async_unref(async);
+}
+
 /* The arguments are mostly straightforward.  log_dir is -1 for access from the
  * journal, and non-negative for access to a cloud log segment.  map_data
  * should be TRUE for the case that are mapping just the data of an item where
  * we have already parsed the item headers; this surpresses the error when the
  * access is not to the first bytes of the item. */
-BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
-                                     int log_seq, int log_offset, int log_size,
-                                     gboolean map_data)
+BlueSkyRCStr *bluesky_log_map_object(BlueSkyCloudLog *item, gboolean map_data)
 {
+    BlueSkyFS *fs = item->fs;
+    BlueSkyLog *log = fs->log;
+    BlueSkyCacheFile *map = NULL;
+    BlueSkyRCStr *str = NULL;
+    int location = 0;
+    size_t file_offset = 0, file_size = 0;
+    gboolean range_request = TRUE;
+
     if (page_size == 0) {
         page_size = getpagesize();
     }
 
-    BlueSkyLog *log = fs->log;
-    BlueSkyCacheFile *map = bluesky_cachefile_lookup(fs, log_dir, log_seq);
+    bluesky_cloudlog_stats_update(item, -1);
 
-    if (map == NULL) {
-        return NULL;
+    /* First, check to see if the journal still contains a copy of the item and
+     * if so use that. */
+    if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
+        map = bluesky_cachefile_lookup(fs, -1, item->log_seq, TRUE);
+        if (map != NULL) {
+            location = CLOUDLOG_JOURNAL;
+            file_offset = item->log_offset;
+            file_size = item->log_size;
+        }
     }
 
-    /* Log segments fetched from the cloud might only be partially-fetched.
-     * Check whether the object we are interested in is available. */
-    if (log_dir >= 0) {
-        const BlueSkyRangesetItem *rangeitem;
-        rangeitem = bluesky_rangeset_lookup(map->items, log_offset);
-        if (rangeitem == NULL || rangeitem->start != log_offset) {
-            g_warning("log-%d: Item at offset %d does not seem to be available\n", log_seq, log_offset);
-        }
-        if (map_data && rangeitem != NULL
-            && log_offset > rangeitem->start
-            && log_size <= rangeitem->length - (log_offset - rangeitem->start))
-        {
-            g_warning("  ...allowing access to middle of log item");
+    if (location == 0 && (item->location_flags & CLOUDLOG_CLOUD)) {
+        item->location_flags &= ~CLOUDLOG_JOURNAL;
+        map = bluesky_cachefile_lookup(fs,
+                                       item->location.directory,
+                                       item->location.sequence,
+                                       !range_request);
+        if (map == NULL) {
+            g_warning("Unable to remap cloud log segment!");
+            goto exit1;
         }
+        location = CLOUDLOG_CLOUD;
+        file_offset = item->location.offset;
+        file_size = item->location.size;
     }
 
     if (map->addr == NULL) {
-        while (!map->ready && map->fetching) {
-            g_print("Waiting for log segment to be fetched from cloud...\n");
-            g_cond_wait(map->cond, map->lock);
-        }
-
         int fd = openat(log->dirfd, map->filename, O_RDONLY);
 
         if (fd < 0) {
             fprintf(stderr, "Error opening logfile %s: %m\n", map->filename);
-            bluesky_cachefile_unref(map);
-            g_mutex_unlock(map->lock);
-            return NULL;
+            goto exit2;
         }
 
         off_t length = lseek(fd, 0, SEEK_END);
@@ -526,17 +596,55 @@ BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
         map->len = length;
         g_atomic_int_add(&log->disk_used, map->len / 1024);
 
-        g_print("Re-mapped log segment %d...\n", log_seq);
         g_atomic_int_inc(&map->refcount);
 
         close(fd);
     }
 
-    BlueSkyRCStr *str;
+    /* Log segments fetched from the cloud might only be partially-fetched.
+     * Check whether the object we are interested in is available. */
+    if (location == CLOUDLOG_CLOUD) {
+        while (TRUE) {
+            const BlueSkyRangesetItem *rangeitem;
+            rangeitem = bluesky_rangeset_lookup(map->items, file_offset);
+            if (rangeitem != NULL && (rangeitem->start != file_offset
+                                      || rangeitem->length != file_size)) {
+                g_warning("log-%d: Item offset %zd seems to be invalid!",
+                          (int)item->location.sequence, file_offset);
+                goto exit2;
+            }
+            if (rangeitem == NULL) {
+                g_print("Item at offset 0x%zx not available, need to fetch.\n",
+                        file_offset);
+                if (range_request)
+                    cloudlog_partial_fetch_start(map, file_offset, file_size);
+                g_cond_wait(map->cond, map->lock);
+            } else if (rangeitem->start == file_offset
+                       && rangeitem->length == file_size) {
+                g_print("Item now available.\n");
+                break;
+            }
+        }
+    }
+
+    if (map_data) {
+        if (location == CLOUDLOG_JOURNAL)
+            file_offset += sizeof(struct log_header);
+        else
+            file_offset += sizeof(struct cloudlog_header);
+
+        file_size = item->data_size;
+    }
+    str = bluesky_string_new_from_mmap(map, file_offset, file_size);
     map->atime = bluesky_get_current_time();
-    str = bluesky_string_new_from_mmap(map, log_offset, log_size);
+
+    g_print("Returning item at offset 0x%zx.\n", file_offset);
+
+exit2:
     bluesky_cachefile_unref(map);
     g_mutex_unlock(map->lock);
+exit1:
+    bluesky_cloudlog_stats_update(item, 1);
     return str;
 }