// Rough size limit for a log segment. This is not a firm limit and there are
// no absolute guarantees on the size of a log segment.
-#define LOG_SEGMENT_SIZE (1 << 23)
+#define LOG_SEGMENT_SIZE (1 << 24)
#define HEADER_MAGIC 0x676f4c0a
#define FOOTER_MAGIC 0x2e435243
while (log->committed != NULL) {
BlueSkyCloudLog *item = (BlueSkyCloudLog *)log->committed->data;
g_mutex_lock(item->lock);
+ bluesky_cloudlog_stats_update(item, -1);
item->pending_write &= ~CLOUDLOG_JOURNAL;
item->location_flags |= CLOUDLOG_JOURNAL;
+ bluesky_cloudlog_stats_update(item, 1);
g_cond_signal(item->cond);
g_mutex_unlock(item->lock);
log->committed = g_slist_delete_link(log->committed, log->committed);
+ bluesky_cloudlog_unref(item);
batchsize++;
}
if ((item->location_flags | item->pending_write) & CLOUDLOG_JOURNAL) {
g_mutex_unlock(item->lock);
bluesky_cloudlog_unref(item);
+ g_atomic_int_add(&item->data_lock_count, -1);
continue;
}
+ bluesky_cloudlog_stats_update(item, -1);
item->pending_write |= CLOUDLOG_JOURNAL;
+ bluesky_cloudlog_stats_update(item, 1);
struct log_header header;
struct log_footer footer;
offset += sizeof(header) + sizeof(footer) + item->data->len;
log->committed = g_slist_prepend(log->committed, item);
+ g_atomic_int_add(&item->data_lock_count, -1);
g_mutex_unlock(item->lock);
/* Force an if there are no other log items currently waiting to be
void bluesky_log_item_submit(BlueSkyCloudLog *item, BlueSkyLog *log)
{
bluesky_cloudlog_ref(item);
+ g_atomic_int_add(&item->data_lock_count, 1);
g_async_queue_push(log->queue, item);
}
g_hash_table_insert(log->mmap_cache, GINT_TO_POINTER(log_seq), map);
+ g_print("Mapped log segment %d...\n", log_seq);
+
close(fd);
}