g_mutex_lock(item->lock);
bluesky_cloudlog_stats_update(item, -1);
item->pending_write &= ~CLOUDLOG_JOURNAL;
- item->location_flags |= CLOUDLOG_JOURNAL;
+ item->location_flags
+ = (item->location_flags & ~CLOUDLOG_UNCOMMITTED) | CLOUDLOG_JOURNAL;
bluesky_cloudlog_stats_update(item, 1);
g_cond_signal(item->cond);
g_mutex_unlock(item->lock);
log->mmap_lock = g_mutex_new();
log->mmap_cache = g_hash_table_new(g_str_hash, g_str_equal);
+ /* Determine the highest-numbered log file, so that we can start writing
+ * out new journal entries at the next sequence number. */
+ GDir *dir = g_dir_open(log_directory, 0, NULL);
+ if (dir != NULL) {
+ const gchar *file;
+ while ((file = g_dir_read_name(dir)) != NULL) {
+ if (strncmp(file, "journal-", 8) == 0) {
+ log->seq_num = MAX(log->seq_num, atoi(&file[8]) + 1);
+ }
+ }
+ g_dir_close(dir);
+ g_print("Starting journal at sequence number %d\n", log->seq_num);
+ }
+
log->dirfd = open(log->log_directory, O_DIRECTORY);
if (log->dirfd < 0) {
fprintf(stderr, "Unable to open logging directory: %m\n");
void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log)
{
- bluesky_cloudlog_ref(item);
- g_atomic_int_add(&item->data_lock_count, 1);
- g_async_queue_push(log->queue, item);
+ if (!(item->location_flags & CLOUDLOG_JOURNAL)) {
+ bluesky_cloudlog_ref(item);
+ item->location_flags |= CLOUDLOG_UNCOMMITTED;
+ g_atomic_int_add(&item->data_lock_count, 1);
+ g_async_queue_push(log->queue, item);
+ }
}
void bluesky_log_finish_all(GList *log_items)
BlueSkyCloudLog *item = (BlueSkyCloudLog *)log_items->data;
g_mutex_lock(item->lock);
- while ((item->pending_write & CLOUDLOG_JOURNAL))
+ while ((item->location_flags & CLOUDLOG_UNCOMMITTED))
g_cond_wait(item->cond, item->lock);
g_mutex_unlock(item->lock);
bluesky_cloudlog_unref(item);
BlueSkyCloudLog *marker = bluesky_cloudlog_new(fs, NULL);
marker->type = LOGTYPE_JOURNAL_MARKER;
marker->data = bluesky_string_new(g_strdup(""), 0);
+ bluesky_cloudlog_stats_update(marker, 1);
bluesky_cloudlog_sync(marker);
g_mutex_lock(marker->lock);
g_string_append_len(loc, (const gchar *)&seq, sizeof(seq));
g_string_append_len(loc, (const gchar *)&offset, sizeof(offset));
commit->data = bluesky_string_new_from_gstring(loc);
+ bluesky_cloudlog_stats_update(commit, 1);
bluesky_cloudlog_sync(commit);
g_mutex_lock(commit->lock);
- while ((commit->pending_write & CLOUDLOG_JOURNAL))
+ while ((commit->location_flags & CLOUDLOG_UNCOMMITTED))
g_cond_wait(commit->cond, commit->lock);
g_mutex_unlock(commit->lock);
char *pathname = g_strdup_printf("%s/%s",
cachefile->log->log_directory,
cachefile->filename);
+ async->data = bluesky_string_dup(async->data);
+ bluesky_cloudlog_decrypt(async->data->data, async->data->len,
+ cachefile->fs->keys, cachefile->items);
if (!g_file_set_contents(pathname, async->data->data, async->data->len,
NULL))
g_print("Error writing out fetched file to cache!\n");
map->log = log;
g_atomic_int_set(&map->mapcount, 0);
g_atomic_int_set(&map->refcount, 0);
+ map->items = bluesky_rangeset_new();
g_hash_table_insert(log->mmap_cache, map->filename, map);
return map;
}
+/* The arguments are mostly straightforward. log_dir is -1 for access from the
+ * journal, and non-negative for access to a cloud log segment. map_data
+ * should be TRUE for the case that are mapping just the data of an item where
+ * we have already parsed the item headers; this surpresses the error when the
+ * access is not to the first bytes of the item. */
BlueSkyRCStr *bluesky_log_map_object(BlueSkyFS *fs, int log_dir,
- int log_seq, int log_offset, int log_size)
+ int log_seq, int log_offset, int log_size,
+ gboolean map_data)
{
if (page_size == 0) {
page_size = getpagesize();
return NULL;
}
+ /* Log segments fetched from the cloud might only be partially-fetched.
+ * Check whether the object we are interested in is available. */
+ if (log_dir >= 0) {
+ const BlueSkyRangesetItem *rangeitem;
+ rangeitem = bluesky_rangeset_lookup(map->items, log_offset);
+ if (rangeitem == NULL || rangeitem->start != log_offset) {
+ g_warning("log-%d: Item at offset %d does not seem to be available\n", log_seq, log_offset);
+ }
+ if (map_data && rangeitem != NULL
+ && log_offset > rangeitem->start
+ && log_size <= rangeitem->length - (log_offset - rangeitem->start))
+ {
+ g_warning(" ...allowing access to middle of log item");
+ }
+ }
+
if (map->addr == NULL) {
while (!map->ready && map->fetching) {
g_print("Waiting for log segment to be fetched from cloud...\n");
close(fd);
}
- g_mutex_unlock(log->mmap_lock);
-
BlueSkyRCStr *str;
map->atime = bluesky_get_current_time();
str = bluesky_string_new_from_mmap(map, log_offset, log_size);
}
}
-/* Scan through all currently-stored files in the journal/cache and garbage
- * collect old unused ones, if needed. */
-static void gather_cachefiles(gpointer key, gpointer value, gpointer user_data)
-{
- GList **files = (GList **)user_data;
- *files = g_list_prepend(*files, value);
-}
-
-static gint compare_cachefiles(gconstpointer a, gconstpointer b)
-{
- int64_t ta, tb;
-
- ta = ((BlueSkyCacheFile *)a)->atime;
- tb = ((BlueSkyCacheFile *)b)->atime;
- if (ta < tb)
- return -1;
- else if (ta > tb)
- return 1;
- else
- return 0;
-}
-
-void bluesky_cachefile_gc(BlueSkyFS *fs)
-{
- GList *files = NULL;
-
- g_mutex_lock(fs->log->mmap_lock);
- g_hash_table_foreach(fs->log->mmap_cache, gather_cachefiles, &files);
-
- /* Sort based on atime. The atime should be stable since it shouln't be
- * updated except by threads which can grab the mmap_lock, which we already
- * hold. */
- files = g_list_sort(files, compare_cachefiles);
-
- /* Walk the list of files, starting with the oldest, deleting files if
- * possible until enough space has been reclaimed. */
- g_print("\nScanning cache: (total size = %d kB)\n", fs->log->disk_used);
- while (files != NULL) {
- BlueSkyCacheFile *cachefile = (BlueSkyCacheFile *)files->data;
- /* Try to lock the structure, but if the lock is held by another thread
- * then we'll just skip the file on this pass. */
- if (g_mutex_trylock(cachefile->lock)) {
- int64_t age = bluesky_get_current_time() - cachefile->atime;
- g_print("%s addr=%p mapcount=%d refcount=%d atime_age=%f",
- cachefile->filename, cachefile->addr, cachefile->mapcount,
- cachefile->refcount, age / 1e6);
- if (cachefile->fetching)
- g_print(" (fetching)");
- g_print("\n");
-
- gboolean deletion_candidate = FALSE;
- if (g_atomic_int_get(&fs->log->disk_used)
- > bluesky_options.cache_size
- && g_atomic_int_get(&cachefile->refcount) == 0
- && g_atomic_int_get(&cachefile->mapcount) == 0)
- {
- deletion_candidate = TRUE;
- }
-
- /* Don't allow journal files to be reclaimed until all data is
- * known to be durably stored in the cloud. */
- if (cachefile->type == CLOUDLOG_JOURNAL
- && cachefile->log_seq >= fs->log->journal_watermark)
- {
- deletion_candidate = FALSE;
- }
-
- if (deletion_candidate) {
- g_print(" ...deleting\n");
- if (unlinkat(fs->log->dirfd, cachefile->filename, 0) < 0) {
- fprintf(stderr, "Unable to unlink journal %s: %m\n",
- cachefile->filename);
- }
-
- g_atomic_int_add(&fs->log->disk_used, -(cachefile->len / 1024));
- g_hash_table_remove(fs->log->mmap_cache, cachefile->filename);
- g_mutex_unlock(cachefile->lock);
- g_mutex_free(cachefile->lock);
- g_cond_free(cachefile->cond);
- g_free(cachefile->filename);
- g_free(cachefile);
- } else {
- g_mutex_unlock(cachefile->lock);
- }
- }
- files = g_list_delete_link(files, files);
- }
- g_list_free(files);
-
- g_mutex_unlock(fs->log->mmap_lock);
-}
-
/******************************* JOURNAL REPLAY *******************************
* The journal replay code is used to recover filesystem state after a
* filesystem restart. We first look for the most recent commit record in the
/*const BlueSkyCloudPointer *data3
= (const BlueSkyCloudPointer *)(data + len1 + len2);*/
+ bluesky_cloudlog_stats_update(log_item, -1);
bluesky_string_unref(log_item->data);
log_item->data = NULL;
log_item->location_flags = CLOUDLOG_JOURNAL;
+ bluesky_cloudlog_stats_update(log_item, 1);
BlueSkyCloudID id0;
memset(&id0, 0, sizeof(id0));