Implement a (dumb) cache garbage collector.
[bluesky.git] / bluesky / log.c
index cec46cb..976e32e 100644 (file)
@@ -36,7 +36,7 @@
 
 // Rough size limit for a log segment.  This is not a firm limit and there are
 // no absolute guarantees on the size of a log segment.
-#define LOG_SEGMENT_SIZE (1 << 23)
+#define LOG_SEGMENT_SIZE (1 << 24)
 
 #define HEADER_MAGIC 0x676f4c0a
 #define FOOTER_MAGIC 0x2e435243
@@ -77,8 +77,10 @@ static void log_commit(BlueSkyLog *log)
     while (log->committed != NULL) {
         BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data;
         g_mutex_lock(item->lock);
+        bluesky_cloudlog_stats_update(item, -1);
         item->pending_write &= ~CLOUDLOG_JOURNAL;
         item->location_flags |= CLOUDLOG_JOURNAL;
+        bluesky_cloudlog_stats_update(item, 1);
         g_cond_signal(item->cond);
         g_mutex_unlock(item->lock);
         log->committed = g_slist_delete_link(log->committed, log->committed);
@@ -102,7 +104,7 @@ static gboolean log_open(BlueSkyLog *log)
     }
 
     while (log->fd < 0) {
-        g_snprintf(logname, sizeof(logname), "log-%08d", log->seq_num);
+        g_snprintf(logname, sizeof(logname), "journal-%08d", log->seq_num);
         log->fd = openat(log->dirfd, logname, O_CREAT|O_WRONLY|O_EXCL, 0600);
         if (log->fd < 0 && errno == EEXIST) {
             fprintf(stderr, "Log file %s already exists...\n", logname);
@@ -158,7 +160,9 @@ static gpointer log_thread(gpointer d)
             continue;
         }
 
+        bluesky_cloudlog_stats_update(item, -1);
         item->pending_write |= CLOUDLOG_JOURNAL;
+        bluesky_cloudlog_stats_update(item, 1);
 
         struct log_header header;
         struct log_footer footer;
@@ -200,6 +204,14 @@ static gpointer log_thread(gpointer d)
 
         offset += sizeof(header) + sizeof(footer) + item->data->len;
 
+        /* Replace the log item's string data with a memory-mapped copy of the
+         * data, now that it has been written to the log file.  (Even if it
+         * isn't yet on disk, it should at least be in the page cache and so
+         * available to memory map.) */
+        bluesky_string_unref(item->data);
+        item->data = NULL;
+        bluesky_cloudlog_fetch(item);
+
         log->committed  = g_slist_prepend(log->committed, item);
         g_atomic_int_add(&item->data_lock_count, -1);
         g_mutex_unlock(item->lock);
@@ -222,7 +234,7 @@ BlueSkyLog *bluesky_log_new(const char *log_directory)
     log->seq_num = 0;
     log->queue = g_async_queue_new();
     log->mmap_lock = g_mutex_new();
-    log->mmap_cache = g_hash_table_new(NULL, NULL);
+    log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal);
 
     log->dirfd = open(log->log_directory, O_DIRECTORY);
     if (log->dirfd < 0) {
@@ -261,72 +273,244 @@ void bluesky_log_finish_all(GList *log_items)
  * to it. */
 static int page_size = 0;
 
-BlueSkyRCStr *bluesky_log_map_object(BlueSkyLog *log,
-                                     int log_seq, int log_offset, int log_size)
+void bluesky_cachefile_unref(BlueSkyCacheFile *cachefile)
+{
+    g_atomic_int_add(&cachefile->refcount, -1);
+}
+
+static void cloudlog_fetch_complete(BlueSkyStoreAsync *async,
+                                    BlueSkyCacheFile *cachefile)
+{
+    g_print("Fetch of %s from cloud complete, status = %d\n",
+            async->key, async->result);
+
+    if (async->result < 0)
+        return;
+
+    g_mutex_lock(cachefile->lock);
+    char *pathname = g_strdup_printf("%s/%s", cachefile->log->log_directory,
+                                     cachefile->filename);
+    if (!g_file_set_contents(pathname, async->data->data, async->data->len,
+                             NULL))
+    {
+        g_print("Error writing out fetched file to cache!\n");
+    }
+    g_free(pathname);
+    cachefile->fetching = FALSE;
+    cachefile->ready = TRUE;
+    bluesky_cachefile_unref(cachefile);
+    g_cond_broadcast(cachefile->cond);
+    g_mutex_unlock(cachefile->lock);
+}
+
+/* Find the BlueSkyCacheFile object for the given journal or cloud log segment.
+ * Returns the object in the locked state and with a reference taken. */
+BlueSkyCacheFile *bluesky_cachefile_lookup(BlueSkyFS *fs,
+                                           int clouddir, int log_seq)
 {
     if (page_size == 0) {
         page_size = getpagesize();
     }
 
-    BlueSkyMmap *map;
+    BlueSkyLog *log = fs->log;
+
+    char logname[64];
+    int type;
+
+    // A request for a local log file
+    if (clouddir < 0) {
+        sprintf(logname, "journal-%08d", log_seq);
+        type = CLOUDLOG_JOURNAL;
+    } else {
+        sprintf(logname, "log-%08d-%08d", clouddir, log_seq);
+        type = CLOUDLOG_CLOUD;
+    }
+
+    BlueSkyCacheFile *map;
     g_mutex_lock(log->mmap_lock);
-    map = g_hash_table_lookup(log->mmap_cache, GINT_TO_POINTER(log_seq));
+    map = g_hash_table_lookup(log->mmap_cache, logname);
 
     if (map == NULL) {
-        char logname[64];
-        g_snprintf(logname, sizeof(logname), "log-%08d", log_seq);
-        int fd = openat(log->dirfd, logname, O_RDONLY);
+        /* TODO: stat() call */
+        map = g_new0(BlueSkyCacheFile, 1);
+        map->type = CLOUDLOG_JOURNAL;
+        map->lock = g_mutex_new();
+        map->type = type;
+        g_mutex_lock(map->lock);
+        map->cond = g_cond_new();
+        map->filename = g_strdup(logname);
+        map->log_seq = log_seq;
+        map->log = log;
+        g_atomic_int_set(&map->mapcount, 0);
+        g_atomic_int_set(&map->refcount, 0);
+
+        g_hash_table_insert(log->mmap_cache, map->filename, map);
+
+        // If the log file is stored in the cloud, we may need to fetch it
+        if (clouddir >= 0) {
+            g_atomic_int_inc(&map->refcount);
+            map->fetching = TRUE;
+            g_print("Starting fetch of %s from cloud\n", logname);
+            BlueSkyStoreAsync *async = bluesky_store_async_new(fs->store);
+            async->op = STORE_OP_GET;
+            async->key = g_strdup(logname);
+            bluesky_store_async_add_notifier(async,
+                                             (GFunc)cloudlog_fetch_complete,
+                                             map);
+            bluesky_store_async_submit(async);
+            bluesky_store_async_unref(async);
+        }
+    } else {
+        g_mutex_lock(map->lock);
+    }
+
+    g_mutex_unlock(log->mmap_lock);
+    g_atomic_int_inc(&map->refcount);
+    return map;
+}
+
+BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
+                                     int log_seq, int log_offset, int log_size)
+{
+    if (page_size == 0) {
+        page_size = getpagesize();
+    }
+
+    BlueSkyLog *log = fs->log;
+    BlueSkyCacheFile *map = bluesky_cachefile_lookup(fs, log_dir, log_seq);
+
+    if (map == NULL) {
+        return NULL;
+    }
+
+    if (map->addr == NULL) {
+        while (!map->ready && map->fetching) {
+            g_print("Waiting for log segment to be fetched from cloud...\n");
+            g_cond_wait(map->cond, map->lock);
+        }
+
+        int fd = openat(log->dirfd, map->filename, O_RDONLY);
 
         if (fd < 0) {
-            fprintf(stderr, "Error opening logfile %s: %m\n", logname);
-            g_mutex_unlock(log->mmap_lock);
+            fprintf(stderr, "Error opening logfile %s: %m\n", map->filename);
+            bluesky_cachefile_unref(map);
+            g_mutex_unlock(map->lock);
             return NULL;
         }
 
-        map = g_new0(BlueSkyMmap, 1);
-
         off_t length = lseek(fd, 0, SEEK_END);
-        map->log_seq = log_seq;
         map->addr = (const char *)mmap(NULL, length, PROT_READ, MAP_SHARED,
                                        fd, 0);
+        g_atomic_int_add(&log->disk_used, -(map->len / 1024));
         map->len = length;
-        map->log = log;
-        g_atomic_int_set(&map->refcount, 0);
+        g_atomic_int_add(&log->disk_used, map->len / 1024);
 
-        g_hash_table_insert(log->mmap_cache, GINT_TO_POINTER(log_seq), map);
+        g_print("Re-mapped log segment %d...\n", log_seq);
+        g_atomic_int_inc(&map->refcount);
 
         close(fd);
     }
 
     g_mutex_unlock(log->mmap_lock);
 
-    return bluesky_string_new_from_mmap(map, log_offset, log_size);
+    BlueSkyRCStr *str;
+    map->atime = bluesky_get_current_time();
+    str = bluesky_string_new_from_mmap(map, log_offset, log_size);
+    bluesky_cachefile_unref(map);
+    g_mutex_unlock(map->lock);
+    return str;
 }
 
-void bluesky_mmap_unref(BlueSkyMmap *mmap)
+void bluesky_mmap_unref(BlueSkyCacheFile *mmap)
 {
     if (mmap == NULL)
         return;
 
-    if (g_atomic_int_dec_and_test(&mmap->refcount)) {
-        /* There is a potential race condition here: the BlueSkyLog contains a
-         * hash table of currently-existing BlueSkyMmap objects, which does not
-         * hold a reference.  Some other thread might grab a new reference to
-         * this object after reading it from the hash table.  So, before
-         * destruction we need to grab the lock for the hash table, then check
-         * the reference count again.  If it is still zero, we can proceed with
-         * object destruction. */
-        BlueSkyLog *log = mmap->log;
-        g_mutex_lock(log->mmap_lock);
-        if (g_atomic_int_get(&mmap->refcount) > 0) {
-            g_mutex_unlock(log->mmap_lock);
-            return;
+    if (g_atomic_int_dec_and_test(&mmap->mapcount)) {
+        g_mutex_lock(mmap->lock);
+        if (g_atomic_int_get(&mmap->mapcount) == 0) {
+            g_print("Unmapped log segment %d...\n", mmap->log_seq);
+            munmap((void *)mmap->addr, mmap->len);
+            mmap->addr = NULL;
+            g_atomic_int_add(&mmap->refcount, -1);
         }
-
-        g_hash_table_remove(log->mmap_cache, GINT_TO_POINTER(mmap->log_seq));
-        munmap((void *)mmap->addr, mmap->len);
-        g_free(mmap);
-        g_mutex_unlock(log->mmap_lock);
+        g_mutex_unlock(mmap->lock);
     }
 }
 
+/* Scan through all currently-stored files in the journal/cache and garbage
+ * collect old unused ones, if needed. */
+static void gather_cachefiles(gpointer key, gpointer value, gpointer user_data)
+{
+    GList **files = (GList **)user_data;
+    *files = g_list_prepend(*files, value);
+}
+
+static gint compare_cachefiles(gconstpointer a, gconstpointer b)
+{
+    int64_t ta, tb;
+
+    ta = ((BlueSkyCacheFile *)a)->atime;
+    tb = ((BlueSkyCacheFile *)b)->atime;
+    if (ta < tb)
+        return -1;
+    else if (ta > tb)
+        return 1;
+    else
+        return 0;
+}
+
+void bluesky_cachefile_gc(BlueSkyFS *fs)
+{
+    GList *files = NULL;
+
+    g_mutex_lock(fs->log->mmap_lock);
+    g_hash_table_foreach(fs->log->mmap_cache, gather_cachefiles, &files);
+
+    /* Sort based on atime.  The atime should be stable since it shouln't be
+     * updated except by threads which can grab the mmap_lock, which we already
+     * hold. */
+    files = g_list_sort(files, compare_cachefiles);
+
+    /* Walk the list of files, starting with the oldest, deleting files if
+     * possible until enough space has been reclaimed. */
+    g_print("\nScanning cache: (total size = %d kB)\n", fs->log->disk_used);
+    while (files != NULL) {
+        BlueSkyCacheFile *cachefile = (BlueSkyCacheFile *)files->data;
+        /* Try to lock the structure, but if the lock is held by another thread
+         * then we'll just skip the file on this pass. */
+        if (g_mutex_trylock(cachefile->lock)) {
+            int64_t age = bluesky_get_current_time() - cachefile->atime;
+            g_print("%s addr=%p mapcount=%d refcount=%d atime_age=%f",
+                    cachefile->filename, cachefile->addr, cachefile->mapcount,
+                    cachefile->refcount, age / 1e6);
+            if (cachefile->fetching)
+                g_print(" (fetching)");
+            g_print("\n");
+
+            if (g_atomic_int_get(&cachefile->refcount) == 0
+                && g_atomic_int_get(&cachefile->mapcount) == 0
+                && cachefile->type == CLOUDLOG_CLOUD /* FIXME: journals too */)
+            {
+                g_print("   ...deleting\n");
+                if (unlinkat(fs->log->dirfd, cachefile->filename, 0) < 0) {
+                    fprintf(stderr, "Unable to unlink journal %s: %m\n",
+                            cachefile->filename);
+                }
+
+                g_hash_table_remove(fs->log->mmap_cache, cachefile->filename);
+                g_mutex_unlock(cachefile->lock);
+                g_mutex_free(cachefile->lock);
+                g_cond_free(cachefile->cond);
+                g_free(cachefile->filename);
+                g_free(cachefile);
+            } else {
+                g_mutex_unlock(cachefile->lock);
+            }
+        }
+        files = g_list_delete_link(files, files);
+    }
+    g_list_free(files);
+
+    g_mutex_unlock(fs->log->mmap_lock);
+}