summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
abc0e90)
Previously we didn't have timers set up and simply called the writeback
code after each operation (which could mean writeback wasn't invoked for
long stretches of time).
g_print("Inode %"PRIu64":\n", inode->inum);
g_print("Inode %"PRIu64":\n", inode->inum);
- gboolean locked = FALSE;
+ gboolean locked = TRUE;
if (g_mutex_trylock(inode->lock)) {
if (g_mutex_trylock(inode->lock)) {
g_mutex_unlock(inode->lock);
}
g_print(" Locked: %c Refcount: %d\n",
g_mutex_unlock(inode->lock);
}
g_print(" Locked: %c Refcount: %d\n",
i->type = type;
i->fs = fs;
i->inum = inum;
i->type = type;
i->fs = fs;
i->inum = inum;
switch (type) {
case BLUESKY_REGULAR:
switch (type) {
case BLUESKY_REGULAR:
req->xdr_result = _xdr_result;
result = (*local)((char *)req->args, req);
req->xdr_result = _xdr_result;
result = (*local)((char *)req->args, req);
- bluesky_flushd_invoke(fs);
bluesky_debug_dump(fs);
return;
bluesky_debug_dump(fs);
return;
static GMainContext *main_context;
static GMainLoop *main_loop;
static GMainContext *main_context;
static GMainLoop *main_loop;
+static gboolean async_flushd(gpointer data)
+{
+ bluesky_flushd_invoke(fs);
+ return TRUE;
+}
+
static async_rpc_init()
{
main_context = g_main_context_new();
main_loop = g_main_loop_new(main_context, FALSE);
static async_rpc_init()
{
main_context = g_main_context_new();
main_loop = g_main_loop_new(main_context, FALSE);
+
+ /* Arrange to have the cache writeback code run every five seconds. */
+ GSource *source = g_timeout_source_new_seconds(5);
+ g_source_set_callback(source, async_flushd, NULL, NULL);
+ g_source_attach(source, main_context);
+ g_source_unref(source);
}
struct rpc_call_header {
}
struct rpc_call_header {