Add fetching of blocks from S3.
[bluesky.git] / inode.c
diff --git a/inode.c b/inode.c
index b1071a6..df3883b 100644 (file)
--- a/inode.c
+++ b/inode.c
@@ -300,9 +300,8 @@ void bluesky_file_read(BlueSkyInode *inode, uint64_t offset,
             memset(buf, 0, bytes);
             break;
         case BLUESKY_BLOCK_REF:
-            /* TODO: Pull in data first */
-            memset(buf, 0, bytes);
-            break;
+            bluesky_block_fetch(inode->fs, b);
+            /* Fall through */
         case BLUESKY_BLOCK_CACHED:
         case BLUESKY_BLOCK_DIRTY:
             memcpy(buf, &b->data[block_offset], bytes);
@@ -315,6 +314,22 @@ void bluesky_file_read(BlueSkyInode *inode, uint64_t offset,
     }
 }
 
+/* Read the given block from cloud-backed storage if the data is not already
+ * cached. */
+void bluesky_block_fetch(BlueSkyFS *fs, BlueSkyBlock *block)
+{
+    if (block->type != BLUESKY_BLOCK_REF)
+        return;
+
+    g_print("Fetching block from %s\n", block->ref);
+    BlueSkyRCStr *string = s3store_get(fs->store, block->ref);
+
+    g_free(block->data);
+    block->data = g_memdup(string->data, BLUESKY_BLOCK_SIZE);
+    block->type = BLUESKY_BLOCK_CACHED;
+    bluesky_string_unref(string);
+}
+
 /* Write the given block to cloud-backed storage and mark it clean. */
 void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block)
 {
@@ -333,7 +348,11 @@ void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block)
     s3store_put(fs->store, name, data);
     g_free(block->ref);
     block->ref = g_strdup(name);
-    block->type = BLUESKY_BLOCK_CACHED;
+
+    /* block->type = BLUESKY_BLOCK_CACHED; */
+    g_free(block->data);
+    block->data = NULL;
+    block->type = BLUESKY_BLOCK_REF;
 
     g_checksum_free(csum);
     bluesky_string_unref(data);