From: Michael Vrable Date: Tue, 1 Sep 2009 18:24:50 +0000 (-0700) Subject: CMake reorganization. X-Git-Url: http://git.vrable.net/?p=bluesky.git;a=commitdiff_plain;h=70fdd2326239a9a5e02b3c3699d2588d5fee48fa CMake reorganization. --- diff --git a/CMakeLists.txt b/CMakeLists.txt index b3696a2..5bdb01c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,25 +4,6 @@ project(bluesky) include(FindPkgConfig) pkg_check_modules(GLIB REQUIRED glib-2.0 gthread-2.0) -link_directories(/home/mvrable/scratch/libs3-1.4/build/lib) - -add_library(bluesky SHARED dir.c inode.c store.c s3store.cc) -add_executable(bluesky-test main.c) -add_executable(nfsproxy - nfs3/nfsd.c nfs3/rpc.c nfs3/mount.c nfs3/nfs3.c - nfs3/mount_prot_xdr.c nfs3/nfs3_prot_xdr.c) - -#set_property(TARGET bluesky bluesky-test -# APPEND PROPERTY COMPILE_FLAGS ${GLIB_CFLAGS}) -#set_property(TARGET bluesky bluesky-test -# APPEND PROPERTY LINK_FLAGS ${GLIB_LDLAGS}) - -set(CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}") -set(INSTALL_RPATH_USE_LINK_PATH 1) -include_directories(${GLIB_INCLUDE_DIRS} ".") -target_link_libraries(bluesky ${GLIB_LIBRARIES} s3) -target_link_libraries(bluesky-test bluesky ${GLIB_LIBRARIES}) -target_link_libraries(nfsproxy bluesky ${GLIB_LIBRARIES}) - -#message("GLIB CFLAGS:" ${GLIB_CFLAGS}) -#message("GLIB CFLAGS:" (get_property TARGET bluesky PROPERTY COMPILE_FLAGS)) +add_definitions(-D_FILE_OFFSET_BITS=64) +add_subdirectory(bluesky) +add_subdirectory(nfs3) diff --git a/bluesky.h b/bluesky.h deleted file mode 100644 index 856dd75..0000000 --- a/bluesky.h +++ /dev/null @@ -1,162 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#ifndef _BLUESKY_H -#define _BLUESKY_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct S3Store; - -/* Reference-counted blocks of memory, used for passing data in and out of - * storage backends and in other places. */ -typedef struct { - gint refcount; - gchar *data; - gsize len; -} BlueSkyRCStr; - -BlueSkyRCStr *bluesky_string_new(gpointer data, gsize len); -void bluesky_string_ref(BlueSkyRCStr *string); -void bluesky_string_unref(BlueSkyRCStr *string); -BlueSkyRCStr *bluesky_string_dup(BlueSkyRCStr *string); - -/* File types. The numeric values are chosen to match with those used in - * NFSv3. */ -typedef enum { - BLUESKY_REGULAR = 1, - BLUESKY_DIRECTORY = 2, - BLUESKY_BLOCK = 3, - BLUESKY_CHARACTER = 4, - BLUESKY_SYMLINK = 5, - BLUESKY_SOCKET = 6, - BLUESKY_FIFO = 7, -} BlueSkyFileType; - -/* Filesystem state. Each filesystem which is exported is represented by a - * single bluesky_fs structure in memory. */ -typedef struct { - GMutex *lock; - - gchar *name; /* Descriptive name for the filesystem */ - GHashTable *inodes; /* Cached inodes */ - uint64_t next_inum; /* Next available inode for allocation */ - - struct S3Store *store; -} BlueSkyFS; - -/* Inode number of the root directory. */ -#define BLUESKY_ROOT_INUM 1 - -/* Timestamp, measured in microseconds since the Unix epoch. */ -typedef int64_t bluesky_time; - -/* In-memory representation of an inode within a Blue Sky server. This - * corresponds roughly with information that is committed to persistent - * storage. */ -typedef struct { - gint refcnt; /* May be accessed atomically without lock */ - GMutex *lock; - - BlueSkyFS *fs; - - BlueSkyFileType type; - uint32_t mode; - uint32_t uid, gid; - uint32_t nlink; - - /* Rather than track an inode number and generation number, we will simply - * never re-use a fileid after a file is deleted. 64 bits should be enough - * that we don't exhaust the identifier space. */ - uint64_t inum; - - uint64_t change_count; /* Incremented each with each change made */ - int64_t atime; /* Microseconds since the Unix epoch */ - int64_t ctime; - int64_t mtime; - int64_t ntime; /* "new time": time object was created */ - - /* File-specific fields */ - uint64_t size; - GArray *blocks; - - /* Directory-specific fields */ - GSequence *dirents; /* List of entries for READDIR */ - GHashTable *dirhash; /* Hash table by name for LOOKUP */ - uint64_t parent_inum; /* inode for ".."; 0 if the root directory */ -} BlueSkyInode; - -/* A directory entry. The name is UTF-8 and is a freshly-allocated string. - * Each directory entry is listed in two indices: dirents is indexed by cookie - * and dirhash by name. The cookie is a randomly-assigned 32-bit value, unique - * within the directory, that remains unchanged until the entry is deleted. It - * is used to provide a stable key for restarting a READDIR call. */ -typedef struct { - gchar *name; - uint32_t cookie; - uint64_t inum; -} BlueSkyDirent; - -/* File data is divided into fixed-size blocks (except the last block which may - * be short?). These blocks are backed by storage in a key/value store, but - * may also be dirty if modifications have been made in-memory that have not - * been committed. */ -#define BLUESKY_BLOCK_SIZE 32768ULL -#define BLUESKY_MAX_FILE_SIZE (BLUESKY_BLOCK_SIZE << 24) -typedef enum { - BLUESKY_BLOCK_ZERO = 0, /* Data is all zeroes, not explicitly stored */ - BLUESKY_BLOCK_REF = 1, /* Reference to key/value store, not cached */ - BLUESKY_BLOCK_CACHED = 2, /* Data is cached in memory, clean */ - BLUESKY_BLOCK_DIRTY = 3, /* Data needs to be committed to store */ -} BlueSkyBlockType; - -typedef struct { - BlueSkyBlockType type; - gchar *ref; /* Name of data block in the backing store */ - BlueSkyRCStr *data; /* Pointer to data in memory if cached */ -} BlueSkyBlock; - -BlueSkyFS *bluesky_new_fs(gchar *name); -int64_t bluesky_get_current_time(); -void bluesky_inode_update_ctime(BlueSkyInode *inode, gboolean update_mtime); -uint64_t bluesky_fs_alloc_inode(BlueSkyFS *fs); -BlueSkyInode *bluesky_new_inode(uint64_t inum, BlueSkyFS *fs, BlueSkyFileType type); - -BlueSkyInode *bluesky_get_inode(BlueSkyFS *fs, uint64_t inum); -void bluesky_insert_inode(BlueSkyFS *fs, BlueSkyInode *inode); - -void bluesky_dirent_destroy(gpointer dirent); -uint64_t bluesky_directory_hash(gchar *name); -uint64_t bluesky_directory_lookup(BlueSkyInode *inode, gchar *name); -gboolean bluesky_directory_insert(BlueSkyInode *dir, gchar *name, - uint64_t inum); -void bluesky_directory_dump(BlueSkyInode *dir); - -void bluesky_block_touch(BlueSkyInode *inode, uint64_t i); -void bluesky_block_fetch(BlueSkyFS *fs, BlueSkyBlock *block); -void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block); -void bluesky_file_truncate(BlueSkyInode *inode, uint64_t size); -void bluesky_file_write(BlueSkyInode *inode, uint64_t offset, - const char *data, gint len); -void bluesky_file_read(BlueSkyInode *inode, uint64_t offset, - char *buf, gint len); - -struct S3Store *s3store_new(); -BlueSkyRCStr *s3store_get(struct S3Store *store, const gchar *key); -void s3store_put(struct S3Store *store, const gchar *key, BlueSkyRCStr *val); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/bluesky/CMakeLists.txt b/bluesky/CMakeLists.txt new file mode 100644 index 0000000..f09f362 --- /dev/null +++ b/bluesky/CMakeLists.txt @@ -0,0 +1,11 @@ +link_directories(/home/mvrable/scratch/libs3-1.4/build/lib) + +add_library(bluesky SHARED dir.c inode.c store.c s3store.c) +add_executable(bluesky-test main.c) + +set(CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}") +set(INSTALL_RPATH_USE_LINK_PATH 1) + +include_directories(${GLIB_INCLUDE_DIRS}) +target_link_libraries(bluesky ${GLIB_LIBRARIES} s3) +target_link_libraries(bluesky-test bluesky ${GLIB_LIBRARIES}) diff --git a/bluesky/bluesky.h b/bluesky/bluesky.h new file mode 100644 index 0000000..856dd75 --- /dev/null +++ b/bluesky/bluesky.h @@ -0,0 +1,162 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#ifndef _BLUESKY_H +#define _BLUESKY_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct S3Store; + +/* Reference-counted blocks of memory, used for passing data in and out of + * storage backends and in other places. */ +typedef struct { + gint refcount; + gchar *data; + gsize len; +} BlueSkyRCStr; + +BlueSkyRCStr *bluesky_string_new(gpointer data, gsize len); +void bluesky_string_ref(BlueSkyRCStr *string); +void bluesky_string_unref(BlueSkyRCStr *string); +BlueSkyRCStr *bluesky_string_dup(BlueSkyRCStr *string); + +/* File types. The numeric values are chosen to match with those used in + * NFSv3. */ +typedef enum { + BLUESKY_REGULAR = 1, + BLUESKY_DIRECTORY = 2, + BLUESKY_BLOCK = 3, + BLUESKY_CHARACTER = 4, + BLUESKY_SYMLINK = 5, + BLUESKY_SOCKET = 6, + BLUESKY_FIFO = 7, +} BlueSkyFileType; + +/* Filesystem state. Each filesystem which is exported is represented by a + * single bluesky_fs structure in memory. */ +typedef struct { + GMutex *lock; + + gchar *name; /* Descriptive name for the filesystem */ + GHashTable *inodes; /* Cached inodes */ + uint64_t next_inum; /* Next available inode for allocation */ + + struct S3Store *store; +} BlueSkyFS; + +/* Inode number of the root directory. */ +#define BLUESKY_ROOT_INUM 1 + +/* Timestamp, measured in microseconds since the Unix epoch. */ +typedef int64_t bluesky_time; + +/* In-memory representation of an inode within a Blue Sky server. This + * corresponds roughly with information that is committed to persistent + * storage. */ +typedef struct { + gint refcnt; /* May be accessed atomically without lock */ + GMutex *lock; + + BlueSkyFS *fs; + + BlueSkyFileType type; + uint32_t mode; + uint32_t uid, gid; + uint32_t nlink; + + /* Rather than track an inode number and generation number, we will simply + * never re-use a fileid after a file is deleted. 64 bits should be enough + * that we don't exhaust the identifier space. */ + uint64_t inum; + + uint64_t change_count; /* Incremented each with each change made */ + int64_t atime; /* Microseconds since the Unix epoch */ + int64_t ctime; + int64_t mtime; + int64_t ntime; /* "new time": time object was created */ + + /* File-specific fields */ + uint64_t size; + GArray *blocks; + + /* Directory-specific fields */ + GSequence *dirents; /* List of entries for READDIR */ + GHashTable *dirhash; /* Hash table by name for LOOKUP */ + uint64_t parent_inum; /* inode for ".."; 0 if the root directory */ +} BlueSkyInode; + +/* A directory entry. The name is UTF-8 and is a freshly-allocated string. + * Each directory entry is listed in two indices: dirents is indexed by cookie + * and dirhash by name. The cookie is a randomly-assigned 32-bit value, unique + * within the directory, that remains unchanged until the entry is deleted. It + * is used to provide a stable key for restarting a READDIR call. */ +typedef struct { + gchar *name; + uint32_t cookie; + uint64_t inum; +} BlueSkyDirent; + +/* File data is divided into fixed-size blocks (except the last block which may + * be short?). These blocks are backed by storage in a key/value store, but + * may also be dirty if modifications have been made in-memory that have not + * been committed. */ +#define BLUESKY_BLOCK_SIZE 32768ULL +#define BLUESKY_MAX_FILE_SIZE (BLUESKY_BLOCK_SIZE << 24) +typedef enum { + BLUESKY_BLOCK_ZERO = 0, /* Data is all zeroes, not explicitly stored */ + BLUESKY_BLOCK_REF = 1, /* Reference to key/value store, not cached */ + BLUESKY_BLOCK_CACHED = 2, /* Data is cached in memory, clean */ + BLUESKY_BLOCK_DIRTY = 3, /* Data needs to be committed to store */ +} BlueSkyBlockType; + +typedef struct { + BlueSkyBlockType type; + gchar *ref; /* Name of data block in the backing store */ + BlueSkyRCStr *data; /* Pointer to data in memory if cached */ +} BlueSkyBlock; + +BlueSkyFS *bluesky_new_fs(gchar *name); +int64_t bluesky_get_current_time(); +void bluesky_inode_update_ctime(BlueSkyInode *inode, gboolean update_mtime); +uint64_t bluesky_fs_alloc_inode(BlueSkyFS *fs); +BlueSkyInode *bluesky_new_inode(uint64_t inum, BlueSkyFS *fs, BlueSkyFileType type); + +BlueSkyInode *bluesky_get_inode(BlueSkyFS *fs, uint64_t inum); +void bluesky_insert_inode(BlueSkyFS *fs, BlueSkyInode *inode); + +void bluesky_dirent_destroy(gpointer dirent); +uint64_t bluesky_directory_hash(gchar *name); +uint64_t bluesky_directory_lookup(BlueSkyInode *inode, gchar *name); +gboolean bluesky_directory_insert(BlueSkyInode *dir, gchar *name, + uint64_t inum); +void bluesky_directory_dump(BlueSkyInode *dir); + +void bluesky_block_touch(BlueSkyInode *inode, uint64_t i); +void bluesky_block_fetch(BlueSkyFS *fs, BlueSkyBlock *block); +void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block); +void bluesky_file_truncate(BlueSkyInode *inode, uint64_t size); +void bluesky_file_write(BlueSkyInode *inode, uint64_t offset, + const char *data, gint len); +void bluesky_file_read(BlueSkyInode *inode, uint64_t offset, + char *buf, gint len); + +struct S3Store *s3store_new(); +BlueSkyRCStr *s3store_get(struct S3Store *store, const gchar *key); +void s3store_put(struct S3Store *store, const gchar *key, BlueSkyRCStr *val); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/bluesky/dir.c b/bluesky/dir.c new file mode 100644 index 0000000..204439e --- /dev/null +++ b/bluesky/dir.c @@ -0,0 +1,143 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#include +#include + +#include "bluesky.h" + +/* Core filesystem: handling of directories. */ + +void bluesky_dirent_destroy(gpointer data) +{ + BlueSkyDirent *dirent = (BlueSkyDirent *)data; + g_free(dirent->name); + g_free(dirent); +} + +gint bluesky_dirent_compare(gconstpointer a, gconstpointer b, + gpointer unused) +{ + uint32_t hash1 = ((const BlueSkyDirent *)a)->cookie; + uint32_t hash2 = ((const BlueSkyDirent *)b)->cookie; + + if (hash1 < hash2) + return -1; + else if (hash1 > hash2) + return 1; + else + return 0; +} + +/* Perform a lookup for a file name within a directory. Returns the inode + * number if found, or 0 if not (0 is never a valid inode number). Should be + * called with the inode lock already held. */ +uint64_t bluesky_directory_lookup(BlueSkyInode *inode, gchar *name) +{ + g_return_val_if_fail(inode->type == BLUESKY_DIRECTORY, 0); + g_return_val_if_fail(inode->dirhash != NULL, 0); + + BlueSkyDirent *d = g_hash_table_lookup(inode->dirhash, name); + if (d == NULL) + return 0; + else + return d->inum; +} + +/* Insert a new entry into a directory. Should be called with the inode lock + * already held. */ +gboolean bluesky_directory_insert(BlueSkyInode *dir, gchar *name, uint64_t inum) +{ + g_return_val_if_fail(dir->type == BLUESKY_DIRECTORY, FALSE); + + /* Check that no entry with the given name already exists. */ + if (g_hash_table_lookup(dir->dirhash, name) != NULL) + return FALSE; + + BlueSkyDirent *d = g_new(BlueSkyDirent, 1); + d->name = g_strdup(name); + d->inum = inum; + + GSequence *dirents = dir->dirents; + + /* Pick an unused cookie value for the directory at random. Restrict + * ourselves to positive 32-bit values (even if treated as signed), and + * keep the first four slots free. */ + while (1) { + do { + d->cookie = g_random_int() & 0x7fffffff; + } while (d->cookie < 4); + + /* If the directory is empty, we can't possibly have a collision, so + * just go with the first key chosen. */ + if (g_sequence_get_length(dirents) == 0) + break; + + /* Otherwise, try looking up the generated cookie. If we do not find a + * match, we can use this cookie value, otherwise we need to generate a + * new one and try again. Because of the check above for an empty + * directory, we know that the lookup will return something so no need + * to worry about NULL. */ + GSequenceIter *i = g_sequence_search(dir->dirents, d, + bluesky_dirent_compare, NULL); + i = g_sequence_iter_prev(i); + if (((BlueSkyDirent *)g_sequence_get(i))->cookie != d->cookie) + break; + } + + /* Add the directory entry to both indices. */ + g_sequence_insert_sorted(dirents, d, bluesky_dirent_compare, NULL); + g_hash_table_insert(dir->dirhash, d->name, d); + + bluesky_inode_update_ctime(dir, 1); + + return TRUE; +} + +/* Remove an from a directory. Should be called with the inode lock already + * held. */ +gboolean bluesky_directory_remove(BlueSkyInode *dir, gchar *name) +{ + g_return_val_if_fail(dir->type == BLUESKY_DIRECTORY, FALSE); + + BlueSkyDirent *d = g_hash_table_lookup(dir->dirhash, name); + if (d == NULL) { + return FALSE; + } + + g_hash_table_remove(dir->dirhash, name); + + GSequenceIter *i = g_sequence_search(dir->dirents, d, + bluesky_dirent_compare, NULL); + i = g_sequence_iter_prev(i); + + /* Assertion check, this ought to succeed */ + g_return_val_if_fail(g_sequence_get(i) == d, FALSE); + + g_sequence_remove(i); + + bluesky_dirent_destroy(d); + + bluesky_inode_update_ctime(dir, 1); + + return TRUE; +} + +/* Dump the contents of a directory to stdout. Debugging only. */ +void bluesky_directory_dump(BlueSkyInode *dir) +{ + g_print("Directory dump:\n"); + + GSequenceIter *i = g_sequence_get_begin_iter(dir->dirents); + + while (!g_sequence_iter_is_end(i)) { + BlueSkyDirent *d = g_sequence_get(i); + g_print(" 0x%08x [inum=%lld] %s\n", d->cookie, d->inum, d->name); + i = g_sequence_iter_next(i); + } +} diff --git a/bluesky/inode.c b/bluesky/inode.c new file mode 100644 index 0000000..7ea8dbf --- /dev/null +++ b/bluesky/inode.c @@ -0,0 +1,355 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#include +#include +#include + +#include "bluesky.h" + +/* Core filesystem. Different proxies, such as the NFSv3 one, interface to + * this, but the core actually tracks the data which is stored. So far we just + * implement an in-memory filesystem, but eventually this will be state which + * is persisted to the cloud. */ + +/* Return the current time, in microseconds since the epoch. */ +int64_t bluesky_get_current_time() +{ + GTimeVal t; + g_get_current_time(&t); + return (int64_t)t.tv_sec * 1000000 + t.tv_usec; +} + +/* Update an inode to indicate that a modification was made. This increases + * the change counter, updates the ctime to the current time, and optionally + * updates the mtime. */ +void bluesky_inode_update_ctime(BlueSkyInode *inode, gboolean update_mtime) +{ + int64_t now = bluesky_get_current_time(); + inode->change_count++; + inode->ctime = now; + if (update_mtime) + inode->mtime = now; +} + +/* Compute the HMAC keyed-hash function using the given hash algorithm, data, + * and key. */ +void compute_hmac(GChecksumType algo, + const guchar *data, gsize data_len, + const guchar *key, gsize key_len, + guint8 *buffer, gsize *digest_len) +{ + int block_size; + + switch (algo) { + case G_CHECKSUM_MD5: + case G_CHECKSUM_SHA1: + case G_CHECKSUM_SHA256: + block_size = 64; + break; + default: + g_error("Unknown hash algorithm for HMAC: %d\n", algo); + } + + gsize digest_size = g_checksum_type_get_length(algo); + + guchar keybuf[block_size]; + memset(keybuf, 0, block_size); + memcpy(keybuf, key, MIN(block_size, key_len)); + for (int i = 0; i < block_size; i++) + keybuf[i] ^= 0x36; + + GChecksum *csum1 = g_checksum_new(algo); + g_checksum_update(csum1, keybuf, block_size); + g_checksum_update(csum1, data, data_len); + guint8 digest[digest_size]; + g_checksum_get_digest(csum1, digest, &digest_size); + + memset(keybuf, 0, block_size); + memcpy(keybuf, key, MIN(block_size, key_len)); + for (int i = 0; i < block_size; i++) + keybuf[i] ^= 0x5c; + + GChecksum *csum2 = g_checksum_new(algo); + g_checksum_update(csum2, keybuf, block_size); + g_checksum_update(csum2, digest, digest_size); + + g_checksum_get_digest(csum2, buffer, digest_len); + + g_checksum_free(csum1); + g_checksum_free(csum2); +} + +/* Unfortunately a glib hash table is only guaranteed to be able to store + * 32-bit keys if we use the key directly. If we want 64-bit inode numbers, + * we'll have to allocate memory to store the 64-bit inumber, and use a pointer + * to it. Rather than allocate the memory for the key, we'll just include a + * pointer to the 64-bit inum stored in the inode itself, so that we don't need + * to do any more memory management. */ +static guint bluesky_fs_key_hash_func(gconstpointer key) +{ + uint64_t inum = *(const uint64_t *)key; + return (guint)inum; +} + +static gboolean bluesky_fs_key_equal_func(gconstpointer a, gconstpointer b) +{ + uint64_t i1 = *(const uint64_t *)a; + uint64_t i2 = *(const uint64_t *)b; + return i1 == i2; +} + +/* Filesystem-level operations. A filesystem is like a directory tree that we + * are willing to export. */ +BlueSkyFS *bluesky_new_fs(gchar *name) +{ + BlueSkyFS *fs = g_new0(BlueSkyFS, 1); + fs->lock = g_mutex_new(); + fs->name = g_strdup(name); + fs->inodes = g_hash_table_new(bluesky_fs_key_hash_func, + bluesky_fs_key_equal_func); + fs->next_inum = BLUESKY_ROOT_INUM + 1; + fs->store = s3store_new(); + + return fs; +} + +/* Allocate a fresh inode number which has not been used before within a + * filesystem. */ +uint64_t bluesky_fs_alloc_inode(BlueSkyFS *fs) +{ + uint64_t inum; + + g_mutex_lock(fs->lock); + inum = fs->next_inum; + fs->next_inum++; + g_mutex_unlock(fs->lock); + + return inum; +} + +BlueSkyInode *bluesky_new_inode(uint64_t inum, BlueSkyFS *fs, + BlueSkyFileType type) +{ + BlueSkyInode *i = g_new0(BlueSkyInode, 1); + + i->lock = g_mutex_new(); + i->type = type; + i->fs = fs; + i->inum = inum; + + switch (type) { + case BLUESKY_REGULAR: + i->blocks = g_array_new(FALSE, TRUE, sizeof(BlueSkyBlock)); + break; + case BLUESKY_DIRECTORY: + i->dirents = g_sequence_new(bluesky_dirent_destroy); + i->dirhash = g_hash_table_new(g_str_hash, g_str_equal); + break; + case BLUESKY_BLOCK: + case BLUESKY_CHARACTER: + case BLUESKY_SYMLINK: + case BLUESKY_SOCKET: + case BLUESKY_FIFO: + break; + } + + return i; +} + +/* Retrieve an inode from the filesystem. Eventually this will be a cache and + * so we might need to go fetch the inode from elsewhere; for now all + * filesystem state is stored here. */ +BlueSkyInode *bluesky_get_inode(BlueSkyFS *fs, uint64_t inum) +{ + BlueSkyInode *inode = NULL; + + g_mutex_lock(fs->lock); + inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum); + g_mutex_unlock(fs->lock); + + return inode; +} + +/* Insert an inode into the filesystem inode cache. */ +void bluesky_insert_inode(BlueSkyFS *fs, BlueSkyInode *inode) +{ + g_mutex_lock(fs->lock); + g_hash_table_insert(fs->inodes, &inode->inum, inode); + g_mutex_unlock(fs->lock); +} + +/* Mark a given block dirty and make sure that data is faulted in so that it + * can be written to. */ +void bluesky_block_touch(BlueSkyInode *inode, uint64_t i) +{ + g_return_if_fail(i < inode->blocks->len); + BlueSkyBlock *block = &g_array_index(inode->blocks, BlueSkyBlock, i); + + switch (block->type) { + case BLUESKY_BLOCK_ZERO: + block->data = bluesky_string_new(g_malloc0(BLUESKY_BLOCK_SIZE), + BLUESKY_BLOCK_SIZE); + break; + case BLUESKY_BLOCK_REF: + bluesky_block_fetch(inode->fs, block); + g_assert(block->type == BLUESKY_BLOCK_CACHED); + /* Fall through */ + case BLUESKY_BLOCK_CACHED: + case BLUESKY_BLOCK_DIRTY: + block->data = bluesky_string_dup(block->data); + break; + } + + block->type = BLUESKY_BLOCK_DIRTY; +} + +/* Set the size of a file. This will truncate or extend the file as needed. + * Newly-allocated bytes are zeroed. */ +void bluesky_file_truncate(BlueSkyInode *inode, uint64_t size) +{ + g_return_if_fail(size <= BLUESKY_MAX_FILE_SIZE); + + if (size == inode->size) + return; + + uint64_t blocks = (size + BLUESKY_BLOCK_SIZE - 1) / BLUESKY_BLOCK_SIZE; + + if (blocks > inode->blocks->len) { + /* Need to add new blocks to the end of a file. New block structures + * are automatically zeroed, which initializes them to be pointers to + * zero blocks so we don't need to do any more work. */ + g_array_set_size(inode->blocks, blocks); + } else if (blocks < inode->blocks->len) { + /* Delete blocks from a file. Must reclaim memory. */ + for (guint i = inode->blocks->len; i < blocks; i++) { + BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, i); + g_free(b->ref); + bluesky_string_unref(b->data); + } + g_array_set_size(inode->blocks, blocks); + } + + /* If the file size is being decreased, ensure that any trailing data in + * the last block is zeroed. */ + if (size < inode->size) { + BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, + blocks - 1); + if (b->type != BLUESKY_BLOCK_ZERO) { + bluesky_block_touch(inode, blocks - 1); + int end_offset = size % BLUESKY_BLOCK_SIZE; + if (end_offset > 0) { + memset(&b->data->data[end_offset], 0, + BLUESKY_BLOCK_SIZE - end_offset); + } + } + } + + inode->size = size; + bluesky_inode_update_ctime(inode, 1); +} + +void bluesky_file_write(BlueSkyInode *inode, uint64_t offset, + const char *data, gint len) +{ + g_return_if_fail(inode->type == BLUESKY_REGULAR); + g_return_if_fail(offset < inode->size); + g_return_if_fail(len <= inode->size - offset); + + if (len == 0) + return; + + while (len > 0) { + uint64_t block_num = offset / BLUESKY_BLOCK_SIZE; + gint block_offset = offset % BLUESKY_BLOCK_SIZE; + gint bytes = MIN(BLUESKY_BLOCK_SIZE - block_offset, len); + + bluesky_block_touch(inode, block_num); + BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, + block_num); + memcpy(&b->data->data[block_offset], data, bytes); + bluesky_block_flush(inode->fs, b); + + offset += bytes; + data += bytes; + len -= bytes; + } + + bluesky_inode_update_ctime(inode, 1); +} + +void bluesky_file_read(BlueSkyInode *inode, uint64_t offset, + char *buf, gint len) +{ + g_return_if_fail(inode->type == BLUESKY_REGULAR); + g_return_if_fail(offset < inode->size); + g_return_if_fail(len <= inode->size - offset); + + while (len > 0) { + uint64_t block_num = offset / BLUESKY_BLOCK_SIZE; + gint block_offset = offset % BLUESKY_BLOCK_SIZE; + gint bytes = MIN(BLUESKY_BLOCK_SIZE - block_offset, len); + + BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, + block_num); + switch (b->type) { + case BLUESKY_BLOCK_ZERO: + memset(buf, 0, bytes); + break; + case BLUESKY_BLOCK_REF: + bluesky_block_fetch(inode->fs, b); + /* Fall through */ + case BLUESKY_BLOCK_CACHED: + case BLUESKY_BLOCK_DIRTY: + memcpy(buf, &b->data->data[block_offset], bytes); + break; + } + + offset += bytes; + buf += bytes; + len -= bytes; + } +} + +/* Read the given block from cloud-backed storage if the data is not already + * cached. */ +void bluesky_block_fetch(BlueSkyFS *fs, BlueSkyBlock *block) +{ + if (block->type != BLUESKY_BLOCK_REF) + return; + + g_print("Fetching block from %s\n", block->ref); + BlueSkyRCStr *string = s3store_get(fs->store, block->ref); + + bluesky_string_unref(block->data); + block->data = string; + block->type = BLUESKY_BLOCK_CACHED; +} + +/* Write the given block to cloud-backed storage and mark it clean. */ +void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block) +{ + if (block->type != BLUESKY_BLOCK_DIRTY) + return; + + GChecksum *csum = g_checksum_new(G_CHECKSUM_SHA256); + g_checksum_update(csum, block->data->data, block->data->len); + gchar *name = g_strdup(g_checksum_get_string(csum)); + + g_print("Flushing block as %s\n", name); + s3store_put(fs->store, name, block->data); + g_free(block->ref); + block->ref = name; + + /* block->type = BLUESKY_BLOCK_CACHED; */ + bluesky_string_unref(block->data); + block->data = NULL; + block->type = BLUESKY_BLOCK_REF; + + g_checksum_free(csum); +} diff --git a/bluesky/main.c b/bluesky/main.c new file mode 100644 index 0000000..c02bc22 --- /dev/null +++ b/bluesky/main.c @@ -0,0 +1,59 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#include +#include +#include +#include + +#include "bluesky.h" + +/* Small test program for BlueSkyFS. Doesn't do much useful. */ + +int main(int argc, char *argv[]) +{ + g_thread_init(NULL); + + printf("BlueSkyFS starting...\n"); + + printf(" time = %lld\n", bluesky_get_current_time()); + + BlueSkyFS *fs = bluesky_new_fs("export"); + + BlueSkyInode *root; + root = bluesky_new_inode(BLUESKY_ROOT_INUM, fs, BLUESKY_DIRECTORY); + root->nlink = 1; + root->mode = 0755; + bluesky_insert_inode(fs, root); + + BlueSkyInode *file; + file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); + file->nlink = 1; + file->mode = 0755; + bluesky_insert_inode(fs, file); + bluesky_directory_insert(root, "foo", file->inum); + + file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); + file->nlink = 1; + file->mode = 0755; + bluesky_insert_inode(fs, file); + bluesky_directory_insert(root, "bar", file->inum); + + file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); + file->nlink = 1; + file->mode = 0755; + bluesky_insert_inode(fs, file); + bluesky_directory_insert(root, "baz", file->inum); + + bluesky_directory_dump(root); + bluesky_directory_lookup(root, "foo"); + bluesky_directory_lookup(root, "bar"); + bluesky_directory_lookup(root, "baz"); + + return 0; +} diff --git a/bluesky/s3store.c b/bluesky/s3store.c new file mode 100644 index 0000000..47a8f60 --- /dev/null +++ b/bluesky/s3store.c @@ -0,0 +1,119 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#include +#include +#include +#include + +#include "bluesky.h" +#include "libs3.h" + +/* Interface to Amazon S3 storage. */ + +/* Simple in-memory data store for test purposes. */ +struct S3Store { + S3BucketContext bucket; +}; + +struct S3Store *s3store_new() +{ + struct S3Store *store = g_new(struct S3Store, 1); + store->bucket.bucketName = "mvrable-bluesky"; + store->bucket.protocol = S3ProtocolHTTP; + store->bucket.uriStyle = S3UriStylePath; + store->bucket.accessKeyId = getenv("AWS_ACCESS_KEY_ID"); + store->bucket.secretAccessKey = getenv("AWS_SECRET_ACCESS_KEY"); + + g_print("Initializing S3 with bucket %s, access key %s\n", + store->bucket.bucketName, store->bucket.accessKeyId); + + return store; +} + +struct get_info { + gchar *buf; + gint offset; +}; + +static S3Status s3store_get_handler(int bufferSize, const char *buffer, + void *callbackData) +{ + struct get_info *info = (struct get_info *)callbackData; + gint bytes = MIN(bufferSize, (int)(BLUESKY_BLOCK_SIZE - info->offset)); + memcpy(info->buf + info->offset, buffer, bytes); + info->offset += bytes; + return S3StatusOK; +} + +struct put_info { + BlueSkyRCStr *val; + gint offset; +}; + +static int s3store_put_handler(int bufferSize, char *buffer, + void *callbackData) +{ + struct put_info *info = (struct put_info *)callbackData; + gint bytes = MIN(bufferSize, (int)(info->val->len - info->offset)); + memcpy(buffer, (char *)info->val->data + info->offset, bytes); + info->offset += bytes; + return bytes; +} + +static S3Status s3store_properties_callback(const S3ResponseProperties *properties, + void *callbackData) +{ + g_print("(Properties callback)\n"); + return S3StatusOK; +} + +void s3store_response_callback(S3Status status, + const S3ErrorDetails *errorDetails, + void *callbackData) +{ + g_print("S3 operation complete, status=%s\n", + S3_get_status_name(status)); + if (errorDetails != NULL) { + g_print(" Error message: %s\n", errorDetails->message); + } +} + +BlueSkyRCStr *s3store_get(struct S3Store *store, const gchar *key) +{ + struct get_info info; + info.buf = (char *)g_malloc0(BLUESKY_BLOCK_SIZE); + info.offset = 0; + + struct S3GetObjectHandler handler; + handler.responseHandler.propertiesCallback = s3store_properties_callback; + handler.responseHandler.completeCallback = s3store_response_callback; + handler.getObjectDataCallback = s3store_get_handler; + + g_print("Starting fetch of %s from S3...\n", key); + S3_get_object(&store->bucket, key, NULL, 0, 0, NULL, + &handler, &info); + + return bluesky_string_new(info.buf, BLUESKY_BLOCK_SIZE); +} + +void s3store_put(struct S3Store *store, const gchar *key, BlueSkyRCStr *val) +{ + struct put_info info; + info.val = val; + info.offset = 0; + + struct S3PutObjectHandler handler; + handler.responseHandler.propertiesCallback = s3store_properties_callback; + handler.responseHandler.completeCallback = s3store_response_callback; + handler.putObjectDataCallback = s3store_put_handler; + + g_print("Starting store of %s to S3...\n", key); + S3_put_object(&store->bucket, key, val->len, NULL, NULL, + &handler, &info); +} diff --git a/bluesky/store.c b/bluesky/store.c new file mode 100644 index 0000000..9b509e6 --- /dev/null +++ b/bluesky/store.c @@ -0,0 +1,103 @@ +/* Blue Sky: File Systems in the Cloud + * + * Copyright (C) 2009 The Regents of the University of California + * Written by Michael Vrable + * + * TODO: Licensing + */ + +#include +#include +#include + +#include "bluesky.h" + +/* Interaction with cloud storage. We expose very simple GET/PUT style + * interface, which different backends can implement. Available backends + * (will) include Amazon S3 and a simple local store for testing purposes. */ + +/* Create and return a new reference-counted string. The reference count is + * initially one. The newly-returned string takes ownership of the memory + * pointed at by data, and will call g_free on it when the reference count + * drops to zero. */ +BlueSkyRCStr *bluesky_string_new(gpointer data, gsize len) +{ + BlueSkyRCStr *string = g_new(BlueSkyRCStr, 1); + string->data = data; + string->len = len; + g_atomic_int_set(&string->refcount, 1); + return string; +} + +void bluesky_string_ref(BlueSkyRCStr *string) +{ + if (string == NULL) + return; + + g_atomic_int_inc(&string->refcount); +} + +void bluesky_string_unref(BlueSkyRCStr *string) +{ + if (string == NULL) + return; + + if (g_atomic_int_dec_and_test(&string->refcount)) { + g_free(string->data); + g_free(string); + } +} + +/* Duplicate and return a new reference-counted string, containing a copy of + * the original data, with a reference count of 1. As an optimization, if the + * passed-in string already has a reference count of 1, the original is + * returned. Can be used to make a mutable copy of a shared string. For this + * to truly be safe, it is probably needed that there be some type of lock + * protecting access to the string. */ +BlueSkyRCStr *bluesky_string_dup(BlueSkyRCStr *string) +{ + if (string == NULL) + return NULL; + + if (g_atomic_int_dec_and_test(&string->refcount)) { + /* There are no other shared copies, so return this one. */ + g_atomic_int_inc(&string->refcount); + return string; + } else { + return bluesky_string_new(g_memdup(string->data, string->len), + string->len); + } +} + +/* Simple in-memory data store for test purposes. */ +typedef struct { + GMutex *lock; + + /* TODO: A hashtable isn't optimal for list queries... */ + GHashTable *store; +} MemStore; + +MemStore *memstore_new() +{ + MemStore *store = g_new(MemStore, 1); + store->lock = g_mutex_new(); + store->store = g_hash_table_new_full(g_str_hash, g_str_equal, + g_free, + (GDestroyNotify)bluesky_string_unref); + + return store; +} + +BlueSkyRCStr *memstore_get(MemStore *store, const gchar *key) +{ + BlueSkyRCStr *s = g_hash_table_lookup(store->store, key); + if (s != NULL) + bluesky_string_ref(s); + return s; +} + +void memstore_put(MemStore *store, const gchar *key, BlueSkyRCStr *val) +{ + bluesky_string_ref(val); + g_hash_table_insert(store->store, g_strdup(key), val); +} diff --git a/dir.c b/dir.c deleted file mode 100644 index 204439e..0000000 --- a/dir.c +++ /dev/null @@ -1,143 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#include -#include - -#include "bluesky.h" - -/* Core filesystem: handling of directories. */ - -void bluesky_dirent_destroy(gpointer data) -{ - BlueSkyDirent *dirent = (BlueSkyDirent *)data; - g_free(dirent->name); - g_free(dirent); -} - -gint bluesky_dirent_compare(gconstpointer a, gconstpointer b, - gpointer unused) -{ - uint32_t hash1 = ((const BlueSkyDirent *)a)->cookie; - uint32_t hash2 = ((const BlueSkyDirent *)b)->cookie; - - if (hash1 < hash2) - return -1; - else if (hash1 > hash2) - return 1; - else - return 0; -} - -/* Perform a lookup for a file name within a directory. Returns the inode - * number if found, or 0 if not (0 is never a valid inode number). Should be - * called with the inode lock already held. */ -uint64_t bluesky_directory_lookup(BlueSkyInode *inode, gchar *name) -{ - g_return_val_if_fail(inode->type == BLUESKY_DIRECTORY, 0); - g_return_val_if_fail(inode->dirhash != NULL, 0); - - BlueSkyDirent *d = g_hash_table_lookup(inode->dirhash, name); - if (d == NULL) - return 0; - else - return d->inum; -} - -/* Insert a new entry into a directory. Should be called with the inode lock - * already held. */ -gboolean bluesky_directory_insert(BlueSkyInode *dir, gchar *name, uint64_t inum) -{ - g_return_val_if_fail(dir->type == BLUESKY_DIRECTORY, FALSE); - - /* Check that no entry with the given name already exists. */ - if (g_hash_table_lookup(dir->dirhash, name) != NULL) - return FALSE; - - BlueSkyDirent *d = g_new(BlueSkyDirent, 1); - d->name = g_strdup(name); - d->inum = inum; - - GSequence *dirents = dir->dirents; - - /* Pick an unused cookie value for the directory at random. Restrict - * ourselves to positive 32-bit values (even if treated as signed), and - * keep the first four slots free. */ - while (1) { - do { - d->cookie = g_random_int() & 0x7fffffff; - } while (d->cookie < 4); - - /* If the directory is empty, we can't possibly have a collision, so - * just go with the first key chosen. */ - if (g_sequence_get_length(dirents) == 0) - break; - - /* Otherwise, try looking up the generated cookie. If we do not find a - * match, we can use this cookie value, otherwise we need to generate a - * new one and try again. Because of the check above for an empty - * directory, we know that the lookup will return something so no need - * to worry about NULL. */ - GSequenceIter *i = g_sequence_search(dir->dirents, d, - bluesky_dirent_compare, NULL); - i = g_sequence_iter_prev(i); - if (((BlueSkyDirent *)g_sequence_get(i))->cookie != d->cookie) - break; - } - - /* Add the directory entry to both indices. */ - g_sequence_insert_sorted(dirents, d, bluesky_dirent_compare, NULL); - g_hash_table_insert(dir->dirhash, d->name, d); - - bluesky_inode_update_ctime(dir, 1); - - return TRUE; -} - -/* Remove an from a directory. Should be called with the inode lock already - * held. */ -gboolean bluesky_directory_remove(BlueSkyInode *dir, gchar *name) -{ - g_return_val_if_fail(dir->type == BLUESKY_DIRECTORY, FALSE); - - BlueSkyDirent *d = g_hash_table_lookup(dir->dirhash, name); - if (d == NULL) { - return FALSE; - } - - g_hash_table_remove(dir->dirhash, name); - - GSequenceIter *i = g_sequence_search(dir->dirents, d, - bluesky_dirent_compare, NULL); - i = g_sequence_iter_prev(i); - - /* Assertion check, this ought to succeed */ - g_return_val_if_fail(g_sequence_get(i) == d, FALSE); - - g_sequence_remove(i); - - bluesky_dirent_destroy(d); - - bluesky_inode_update_ctime(dir, 1); - - return TRUE; -} - -/* Dump the contents of a directory to stdout. Debugging only. */ -void bluesky_directory_dump(BlueSkyInode *dir) -{ - g_print("Directory dump:\n"); - - GSequenceIter *i = g_sequence_get_begin_iter(dir->dirents); - - while (!g_sequence_iter_is_end(i)) { - BlueSkyDirent *d = g_sequence_get(i); - g_print(" 0x%08x [inum=%lld] %s\n", d->cookie, d->inum, d->name); - i = g_sequence_iter_next(i); - } -} diff --git a/inode.c b/inode.c deleted file mode 100644 index 7ea8dbf..0000000 --- a/inode.c +++ /dev/null @@ -1,355 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#include -#include -#include - -#include "bluesky.h" - -/* Core filesystem. Different proxies, such as the NFSv3 one, interface to - * this, but the core actually tracks the data which is stored. So far we just - * implement an in-memory filesystem, but eventually this will be state which - * is persisted to the cloud. */ - -/* Return the current time, in microseconds since the epoch. */ -int64_t bluesky_get_current_time() -{ - GTimeVal t; - g_get_current_time(&t); - return (int64_t)t.tv_sec * 1000000 + t.tv_usec; -} - -/* Update an inode to indicate that a modification was made. This increases - * the change counter, updates the ctime to the current time, and optionally - * updates the mtime. */ -void bluesky_inode_update_ctime(BlueSkyInode *inode, gboolean update_mtime) -{ - int64_t now = bluesky_get_current_time(); - inode->change_count++; - inode->ctime = now; - if (update_mtime) - inode->mtime = now; -} - -/* Compute the HMAC keyed-hash function using the given hash algorithm, data, - * and key. */ -void compute_hmac(GChecksumType algo, - const guchar *data, gsize data_len, - const guchar *key, gsize key_len, - guint8 *buffer, gsize *digest_len) -{ - int block_size; - - switch (algo) { - case G_CHECKSUM_MD5: - case G_CHECKSUM_SHA1: - case G_CHECKSUM_SHA256: - block_size = 64; - break; - default: - g_error("Unknown hash algorithm for HMAC: %d\n", algo); - } - - gsize digest_size = g_checksum_type_get_length(algo); - - guchar keybuf[block_size]; - memset(keybuf, 0, block_size); - memcpy(keybuf, key, MIN(block_size, key_len)); - for (int i = 0; i < block_size; i++) - keybuf[i] ^= 0x36; - - GChecksum *csum1 = g_checksum_new(algo); - g_checksum_update(csum1, keybuf, block_size); - g_checksum_update(csum1, data, data_len); - guint8 digest[digest_size]; - g_checksum_get_digest(csum1, digest, &digest_size); - - memset(keybuf, 0, block_size); - memcpy(keybuf, key, MIN(block_size, key_len)); - for (int i = 0; i < block_size; i++) - keybuf[i] ^= 0x5c; - - GChecksum *csum2 = g_checksum_new(algo); - g_checksum_update(csum2, keybuf, block_size); - g_checksum_update(csum2, digest, digest_size); - - g_checksum_get_digest(csum2, buffer, digest_len); - - g_checksum_free(csum1); - g_checksum_free(csum2); -} - -/* Unfortunately a glib hash table is only guaranteed to be able to store - * 32-bit keys if we use the key directly. If we want 64-bit inode numbers, - * we'll have to allocate memory to store the 64-bit inumber, and use a pointer - * to it. Rather than allocate the memory for the key, we'll just include a - * pointer to the 64-bit inum stored in the inode itself, so that we don't need - * to do any more memory management. */ -static guint bluesky_fs_key_hash_func(gconstpointer key) -{ - uint64_t inum = *(const uint64_t *)key; - return (guint)inum; -} - -static gboolean bluesky_fs_key_equal_func(gconstpointer a, gconstpointer b) -{ - uint64_t i1 = *(const uint64_t *)a; - uint64_t i2 = *(const uint64_t *)b; - return i1 == i2; -} - -/* Filesystem-level operations. A filesystem is like a directory tree that we - * are willing to export. */ -BlueSkyFS *bluesky_new_fs(gchar *name) -{ - BlueSkyFS *fs = g_new0(BlueSkyFS, 1); - fs->lock = g_mutex_new(); - fs->name = g_strdup(name); - fs->inodes = g_hash_table_new(bluesky_fs_key_hash_func, - bluesky_fs_key_equal_func); - fs->next_inum = BLUESKY_ROOT_INUM + 1; - fs->store = s3store_new(); - - return fs; -} - -/* Allocate a fresh inode number which has not been used before within a - * filesystem. */ -uint64_t bluesky_fs_alloc_inode(BlueSkyFS *fs) -{ - uint64_t inum; - - g_mutex_lock(fs->lock); - inum = fs->next_inum; - fs->next_inum++; - g_mutex_unlock(fs->lock); - - return inum; -} - -BlueSkyInode *bluesky_new_inode(uint64_t inum, BlueSkyFS *fs, - BlueSkyFileType type) -{ - BlueSkyInode *i = g_new0(BlueSkyInode, 1); - - i->lock = g_mutex_new(); - i->type = type; - i->fs = fs; - i->inum = inum; - - switch (type) { - case BLUESKY_REGULAR: - i->blocks = g_array_new(FALSE, TRUE, sizeof(BlueSkyBlock)); - break; - case BLUESKY_DIRECTORY: - i->dirents = g_sequence_new(bluesky_dirent_destroy); - i->dirhash = g_hash_table_new(g_str_hash, g_str_equal); - break; - case BLUESKY_BLOCK: - case BLUESKY_CHARACTER: - case BLUESKY_SYMLINK: - case BLUESKY_SOCKET: - case BLUESKY_FIFO: - break; - } - - return i; -} - -/* Retrieve an inode from the filesystem. Eventually this will be a cache and - * so we might need to go fetch the inode from elsewhere; for now all - * filesystem state is stored here. */ -BlueSkyInode *bluesky_get_inode(BlueSkyFS *fs, uint64_t inum) -{ - BlueSkyInode *inode = NULL; - - g_mutex_lock(fs->lock); - inode = (BlueSkyInode *)g_hash_table_lookup(fs->inodes, &inum); - g_mutex_unlock(fs->lock); - - return inode; -} - -/* Insert an inode into the filesystem inode cache. */ -void bluesky_insert_inode(BlueSkyFS *fs, BlueSkyInode *inode) -{ - g_mutex_lock(fs->lock); - g_hash_table_insert(fs->inodes, &inode->inum, inode); - g_mutex_unlock(fs->lock); -} - -/* Mark a given block dirty and make sure that data is faulted in so that it - * can be written to. */ -void bluesky_block_touch(BlueSkyInode *inode, uint64_t i) -{ - g_return_if_fail(i < inode->blocks->len); - BlueSkyBlock *block = &g_array_index(inode->blocks, BlueSkyBlock, i); - - switch (block->type) { - case BLUESKY_BLOCK_ZERO: - block->data = bluesky_string_new(g_malloc0(BLUESKY_BLOCK_SIZE), - BLUESKY_BLOCK_SIZE); - break; - case BLUESKY_BLOCK_REF: - bluesky_block_fetch(inode->fs, block); - g_assert(block->type == BLUESKY_BLOCK_CACHED); - /* Fall through */ - case BLUESKY_BLOCK_CACHED: - case BLUESKY_BLOCK_DIRTY: - block->data = bluesky_string_dup(block->data); - break; - } - - block->type = BLUESKY_BLOCK_DIRTY; -} - -/* Set the size of a file. This will truncate or extend the file as needed. - * Newly-allocated bytes are zeroed. */ -void bluesky_file_truncate(BlueSkyInode *inode, uint64_t size) -{ - g_return_if_fail(size <= BLUESKY_MAX_FILE_SIZE); - - if (size == inode->size) - return; - - uint64_t blocks = (size + BLUESKY_BLOCK_SIZE - 1) / BLUESKY_BLOCK_SIZE; - - if (blocks > inode->blocks->len) { - /* Need to add new blocks to the end of a file. New block structures - * are automatically zeroed, which initializes them to be pointers to - * zero blocks so we don't need to do any more work. */ - g_array_set_size(inode->blocks, blocks); - } else if (blocks < inode->blocks->len) { - /* Delete blocks from a file. Must reclaim memory. */ - for (guint i = inode->blocks->len; i < blocks; i++) { - BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, i); - g_free(b->ref); - bluesky_string_unref(b->data); - } - g_array_set_size(inode->blocks, blocks); - } - - /* If the file size is being decreased, ensure that any trailing data in - * the last block is zeroed. */ - if (size < inode->size) { - BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, - blocks - 1); - if (b->type != BLUESKY_BLOCK_ZERO) { - bluesky_block_touch(inode, blocks - 1); - int end_offset = size % BLUESKY_BLOCK_SIZE; - if (end_offset > 0) { - memset(&b->data->data[end_offset], 0, - BLUESKY_BLOCK_SIZE - end_offset); - } - } - } - - inode->size = size; - bluesky_inode_update_ctime(inode, 1); -} - -void bluesky_file_write(BlueSkyInode *inode, uint64_t offset, - const char *data, gint len) -{ - g_return_if_fail(inode->type == BLUESKY_REGULAR); - g_return_if_fail(offset < inode->size); - g_return_if_fail(len <= inode->size - offset); - - if (len == 0) - return; - - while (len > 0) { - uint64_t block_num = offset / BLUESKY_BLOCK_SIZE; - gint block_offset = offset % BLUESKY_BLOCK_SIZE; - gint bytes = MIN(BLUESKY_BLOCK_SIZE - block_offset, len); - - bluesky_block_touch(inode, block_num); - BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, - block_num); - memcpy(&b->data->data[block_offset], data, bytes); - bluesky_block_flush(inode->fs, b); - - offset += bytes; - data += bytes; - len -= bytes; - } - - bluesky_inode_update_ctime(inode, 1); -} - -void bluesky_file_read(BlueSkyInode *inode, uint64_t offset, - char *buf, gint len) -{ - g_return_if_fail(inode->type == BLUESKY_REGULAR); - g_return_if_fail(offset < inode->size); - g_return_if_fail(len <= inode->size - offset); - - while (len > 0) { - uint64_t block_num = offset / BLUESKY_BLOCK_SIZE; - gint block_offset = offset % BLUESKY_BLOCK_SIZE; - gint bytes = MIN(BLUESKY_BLOCK_SIZE - block_offset, len); - - BlueSkyBlock *b = &g_array_index(inode->blocks, BlueSkyBlock, - block_num); - switch (b->type) { - case BLUESKY_BLOCK_ZERO: - memset(buf, 0, bytes); - break; - case BLUESKY_BLOCK_REF: - bluesky_block_fetch(inode->fs, b); - /* Fall through */ - case BLUESKY_BLOCK_CACHED: - case BLUESKY_BLOCK_DIRTY: - memcpy(buf, &b->data->data[block_offset], bytes); - break; - } - - offset += bytes; - buf += bytes; - len -= bytes; - } -} - -/* Read the given block from cloud-backed storage if the data is not already - * cached. */ -void bluesky_block_fetch(BlueSkyFS *fs, BlueSkyBlock *block) -{ - if (block->type != BLUESKY_BLOCK_REF) - return; - - g_print("Fetching block from %s\n", block->ref); - BlueSkyRCStr *string = s3store_get(fs->store, block->ref); - - bluesky_string_unref(block->data); - block->data = string; - block->type = BLUESKY_BLOCK_CACHED; -} - -/* Write the given block to cloud-backed storage and mark it clean. */ -void bluesky_block_flush(BlueSkyFS *fs, BlueSkyBlock *block) -{ - if (block->type != BLUESKY_BLOCK_DIRTY) - return; - - GChecksum *csum = g_checksum_new(G_CHECKSUM_SHA256); - g_checksum_update(csum, block->data->data, block->data->len); - gchar *name = g_strdup(g_checksum_get_string(csum)); - - g_print("Flushing block as %s\n", name); - s3store_put(fs->store, name, block->data); - g_free(block->ref); - block->ref = name; - - /* block->type = BLUESKY_BLOCK_CACHED; */ - bluesky_string_unref(block->data); - block->data = NULL; - block->type = BLUESKY_BLOCK_REF; - - g_checksum_free(csum); -} diff --git a/libs3.h b/libs3.h deleted file mode 100644 index 8f06e31..0000000 --- a/libs3.h +++ /dev/null @@ -1,1865 +0,0 @@ -/** ************************************************************************** - * libs3.h - * - * Copyright 2008 Bryan Ischo - * - * This file is part of libs3. - * - * libs3 is free software: you can redistribute it and/or modify it under the - * terms of the GNU General Public License as published by the Free Software - * Foundation, version 3 of the License. - * - * In addition, as a special exception, the copyright holders give - * permission to link the code of this library and its programs with the - * OpenSSL library, and distribute linked combinations including the two. - * - * libs3 is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License version 3 - * along with libs3, in a file named COPYING. If not, see - * . - * - ************************************************************************** **/ - -#ifndef LIBS3_H -#define LIBS3_H - -#include -#include - - -#ifdef __cplusplus -extern "C" { -#endif - - -/** ************************************************************************** - * Overview - * -------- - * - * This library provides an API for using Amazon's S3 service (see - * http://s3.amazonaws.com). Its design goals are: - * - * - To provide a simple and straightforward API for accessing all of S3's - * functionality - * - To not require the developer using libs3 to need to know anything about: - * - HTTP - * - XML - * - SSL - * In other words, this API is meant to stand on its own, without requiring - * any implicit knowledge of how S3 services are accessed using HTTP - * protocols. - * - To be usable from multithreaded code - * - To be usable by code which wants to process multiple S3 requests - * simultaneously from a single thread - * - To be usable in the simple, straightforward way using sequentialized - * blocking requests - * - * The general usage pattern of libs3 is: - * - * - Initialize libs3 once per program by calling S3_initialize() at program - * start up time - * - Make any number of requests to S3 for getting, putting, or listing - * S3 buckets or objects, or modifying the ACLs associated with buckets - * or objects, using one of three general approaches: - * 1. Simple blocking requests, one at a time - * 2. Multiple threads each making simple blocking requests - * 3. From a single thread, managing multiple S3 requests simultaneously - * using file descriptors and a select()/poll() loop - * - Shut down libs3 at program exit time by calling S3_deinitialize() - * - * All functions which send requests to S3 return their results via a set of - * callback functions which must be supplied to libs3 at the time that the - * request is initiated. libs3 will call these functions back in the thread - * calling the libs3 function if blocking requests are made (i.e., if the - * S3RequestContext for the function invocation is passed in as NULL). - * If an S3RequestContext is used to drive multiple S3 requests - * simultaneously, then the callbacks will be made from the thread which - * calls S3_runall_request_context() or S3_runonce_request_context(), or - * possibly from the thread which calls S3_destroy_request_context(), if - * S3 requests are in progress at the time that this function is called. - * - * NOTE: Response headers from Amazon S3 are limited to 4K (2K of metas is all - * that Amazon supports, and libs3 allows Amazon an additional 2K of headers). - * - * NOTE: Because HTTP and the S3 REST protocol are highly under-specified, - * libs3 must make some assumptions about the maximum length of certain HTTP - * elements (such as headers) that it will accept. While efforts have been - * made to enforce maximums which are beyond that expected to be needed by any - * user of S3, it is always possible that these maximums may be too low in - * some rare circumstances. Bug reports should this unlikely situation occur - * would be most appreciated. - * - * Threading Rules - * --------------- - * - * 1. All arguments passed to any function must not be modified directly until - * the function returns. - * 2. All S3RequestContext and S3Request arguments passed to all functions may - * not be passed to any other libs3 function by any other thread until the - * function returns. - * 3. All functions may be called simultaneously by multiple threads as long - * as (1) and (2) are observed, EXCEPT for S3_initialize(), which must be - * called from one thread at a time only. - * 4. All callbacks will be made in the thread of the caller of the function - * which invoked them, so the caller of all libs3 functions should not hold - * locks that it would try to re-acquire in a callback, as this may - * deadlock. - ************************************************************************** **/ - - -/** ************************************************************************** - * Constants - ************************************************************************** **/ - -/** - * This is the hostname that all S3 requests will go through; virtual-host - * style requests will prepend the bucket name to this host name, and - * path-style requests will use this hostname directly - **/ -#define S3_HOSTNAME "s3.amazonaws.com" - - -/** - * S3_MAX_BUCKET_NAME_SIZE is the maximum size of a bucket name. - **/ - -#define S3_MAX_BUCKET_NAME_SIZE 255 - -/** - * S3_MAX_KEY_SIZE is the maximum size of keys that Amazon S3 supports. - **/ -#define S3_MAX_KEY_SIZE 1024 - - -/** - * S3_MAX_METADATA_SIZE is the maximum number of bytes allowed for - * x-amz-meta header names and values in any request passed to Amazon S3 - **/ -#define S3_MAX_METADATA_SIZE 2048 - - -/** - * S3_METADATA_HEADER_NAME_PREFIX is the prefix of an S3 "meta header" - **/ -#define S3_METADATA_HEADER_NAME_PREFIX "x-amz-meta-" - - -/** - * S3_MAX_METADATA_COUNT is the maximum number of x-amz-meta- headers that - * could be included in a request to S3. The smallest meta header is - * "x-amz-meta-n: v". Since S3 doesn't count the ": " against the total, the - * smallest amount of data to count for a header would be the length of - * "x-amz-meta-nv". - **/ -#define S3_MAX_METADATA_COUNT \ - (S3_MAX_METADATA_SIZE / (sizeof(S3_METADATA_HEADER_NAME_PREFIX "nv") - 1)) - - -/** - * S3_MAX_ACL_GRANT_COUNT is the maximum number of ACL grants that may be - * set on a bucket or object at one time. It is also the maximum number of - * ACL grants that the XML ACL parsing routine will parse. - **/ -#define S3_MAX_ACL_GRANT_COUNT 100 - - -/** - * This is the maximum number of characters (including terminating \0) that - * libs3 supports in an ACL grantee email address. - **/ -#define S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE 128 - - -/** - * This is the maximum number of characters (including terminating \0) that - * libs3 supports in an ACL grantee user id. - **/ -#define S3_MAX_GRANTEE_USER_ID_SIZE 128 - - -/** - * This is the maximum number of characters (including terminating \0) that - * libs3 supports in an ACL grantee user display name. - **/ -#define S3_MAX_GRANTEE_DISPLAY_NAME_SIZE 128 - - -/** - * This is the maximum number of characters that will be stored in the - * return buffer for the utility function which computes an HTTP authenticated - * query string - **/ -#define S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE \ - (sizeof("https://" S3_HOSTNAME "/") + (S3_MAX_KEY_SIZE * 3) + \ - sizeof("?AWSAccessKeyId=") + 32 + sizeof("&Expires=") + 32 + \ - sizeof("&Signature=") + 28 + 1) - - -/** - * This constant is used by the S3_initialize() function, to specify that - * the winsock library should be initialized by libs3; only relevent on - * Microsoft Windows platforms. - **/ -#define S3_INIT_WINSOCK 1 - - -/** - * This convenience constant is used by the S3_initialize() function to - * indicate that all libraries required by libs3 should be initialized. - **/ -#define S3_INIT_ALL (S3_INIT_WINSOCK) - - -/** ************************************************************************** - * Enumerations - ************************************************************************** **/ - -/** - * S3Status is a status code as returned by a libs3 function. The meaning of - * each status code is defined in the comments for each function which returns - * that status. - **/ -typedef enum -{ - S3StatusOK , - - /** - * Errors that prevent the S3 request from being issued or response from - * being read - **/ - S3StatusInternalError , - S3StatusOutOfMemory , - S3StatusInterrupted , - S3StatusInvalidBucketNameTooLong , - S3StatusInvalidBucketNameFirstCharacter , - S3StatusInvalidBucketNameCharacter , - S3StatusInvalidBucketNameCharacterSequence , - S3StatusInvalidBucketNameTooShort , - S3StatusInvalidBucketNameDotQuadNotation , - S3StatusQueryParamsTooLong , - S3StatusFailedToInitializeRequest , - S3StatusMetaDataHeadersTooLong , - S3StatusBadMetaData , - S3StatusBadContentType , - S3StatusContentTypeTooLong , - S3StatusBadMD5 , - S3StatusMD5TooLong , - S3StatusBadCacheControl , - S3StatusCacheControlTooLong , - S3StatusBadContentDispositionFilename , - S3StatusContentDispositionFilenameTooLong , - S3StatusBadContentEncoding , - S3StatusContentEncodingTooLong , - S3StatusBadIfMatchETag , - S3StatusIfMatchETagTooLong , - S3StatusBadIfNotMatchETag , - S3StatusIfNotMatchETagTooLong , - S3StatusHeadersTooLong , - S3StatusKeyTooLong , - S3StatusUriTooLong , - S3StatusXmlParseFailure , - S3StatusEmailAddressTooLong , - S3StatusUserIdTooLong , - S3StatusUserDisplayNameTooLong , - S3StatusGroupUriTooLong , - S3StatusPermissionTooLong , - S3StatusTargetBucketTooLong , - S3StatusTargetPrefixTooLong , - S3StatusTooManyGrants , - S3StatusBadGrantee , - S3StatusBadPermission , - S3StatusXmlDocumentTooLarge , - S3StatusNameLookupError , - S3StatusFailedToConnect , - S3StatusServerFailedVerification , - S3StatusConnectionFailed , - S3StatusAbortedByCallback , - - /** - * Errors from the S3 service - **/ - S3StatusErrorAccessDenied , - S3StatusErrorAccountProblem , - S3StatusErrorAmbiguousGrantByEmailAddress , - S3StatusErrorBadDigest , - S3StatusErrorBucketAlreadyExists , - S3StatusErrorBucketAlreadyOwnedByYou , - S3StatusErrorBucketNotEmpty , - S3StatusErrorCredentialsNotSupported , - S3StatusErrorCrossLocationLoggingProhibited , - S3StatusErrorEntityTooSmall , - S3StatusErrorEntityTooLarge , - S3StatusErrorExpiredToken , - S3StatusErrorIncompleteBody , - S3StatusErrorIncorrectNumberOfFilesInPostRequest , - S3StatusErrorInlineDataTooLarge , - S3StatusErrorInternalError , - S3StatusErrorInvalidAccessKeyId , - S3StatusErrorInvalidAddressingHeader , - S3StatusErrorInvalidArgument , - S3StatusErrorInvalidBucketName , - S3StatusErrorInvalidDigest , - S3StatusErrorInvalidLocationConstraint , - S3StatusErrorInvalidPayer , - S3StatusErrorInvalidPolicyDocument , - S3StatusErrorInvalidRange , - S3StatusErrorInvalidSecurity , - S3StatusErrorInvalidSOAPRequest , - S3StatusErrorInvalidStorageClass , - S3StatusErrorInvalidTargetBucketForLogging , - S3StatusErrorInvalidToken , - S3StatusErrorInvalidURI , - S3StatusErrorKeyTooLong , - S3StatusErrorMalformedACLError , - S3StatusErrorMalformedXML , - S3StatusErrorMaxMessageLengthExceeded , - S3StatusErrorMaxPostPreDataLengthExceededError , - S3StatusErrorMetadataTooLarge , - S3StatusErrorMethodNotAllowed , - S3StatusErrorMissingAttachment , - S3StatusErrorMissingContentLength , - S3StatusErrorMissingSecurityElement , - S3StatusErrorMissingSecurityHeader , - S3StatusErrorNoLoggingStatusForKey , - S3StatusErrorNoSuchBucket , - S3StatusErrorNoSuchKey , - S3StatusErrorNotImplemented , - S3StatusErrorNotSignedUp , - S3StatusErrorOperationAborted , - S3StatusErrorPermanentRedirect , - S3StatusErrorPreconditionFailed , - S3StatusErrorRedirect , - S3StatusErrorRequestIsNotMultiPartContent , - S3StatusErrorRequestTimeout , - S3StatusErrorRequestTimeTooSkewed , - S3StatusErrorRequestTorrentOfBucketError , - S3StatusErrorSignatureDoesNotMatch , - S3StatusErrorSlowDown , - S3StatusErrorTemporaryRedirect , - S3StatusErrorTokenRefreshRequired , - S3StatusErrorTooManyBuckets , - S3StatusErrorUnexpectedContent , - S3StatusErrorUnresolvableGrantByEmailAddress , - S3StatusErrorUserKeyMustBeSpecified , - S3StatusErrorUnknown , - - /** - * The following are HTTP errors returned by S3 without enough detail to - * distinguish any of the above S3StatusError conditions - **/ - S3StatusHttpErrorMovedTemporarily , - S3StatusHttpErrorBadRequest , - S3StatusHttpErrorForbidden , - S3StatusHttpErrorNotFound , - S3StatusHttpErrorConflict , - S3StatusHttpErrorUnknown -} S3Status; - - -/** - * S3Protocol represents a protocol that may be used for communicating a - * request to the Amazon S3 service. - * - * In general, HTTPS is greatly preferred (and should be the default of any - * application using libs3) because it protects any data being sent to or - * from S3 using strong encryption. However, HTTPS is much more CPU intensive - * than HTTP, and if the caller is absolutely certain that it is OK for the - * data to be viewable by anyone in transit, then HTTP can be used. - **/ -typedef enum -{ - S3ProtocolHTTPS = 0, - S3ProtocolHTTP = 1 -} S3Protocol; - - -/** - * S3UriStyle defines the form that an Amazon S3 URI identifying a bucket or - * object can take. They are of these forms: - * - * Virtual Host: ${protocol}://${bucket}.s3.amazonaws.com/[${key}] - * Path: ${protocol}://s3.amazonaws.com/${bucket}/[${key}] - * - * It is generally better to use the Virual Host URI form, because it ensures - * that the bucket name used is compatible with normal HTTP GETs and POSTs of - * data to/from the bucket. However, if DNS lookups for the bucket are too - * slow or unreliable for some reason, Path URI form may be used. - **/ -typedef enum -{ - S3UriStyleVirtualHost = 0, - S3UriStylePath = 1 -} S3UriStyle; - - -/** - * S3GranteeType defines the type of Grantee used in an S3 ACL Grant. - * Amazon Customer By Email - identifies the Grantee using their Amazon S3 - * account email address - * Canonical User - identifies the Grantee by S3 User ID and Display Name, - * which can only be obtained by making requests to S3, for example, by - * listing owned buckets - * All AWS Users - identifies all authenticated AWS users - * All Users - identifies all users - * Log Delivery - identifies the Amazon group responsible for writing - * server access logs into buckets - **/ -typedef enum -{ - S3GranteeTypeAmazonCustomerByEmail = 0, - S3GranteeTypeCanonicalUser = 1, - S3GranteeTypeAllAwsUsers = 2, - S3GranteeTypeAllUsers = 3, - S3GranteeTypeLogDelivery = 4 -} S3GranteeType; - - -/** - * This is an individual permission granted to a grantee in an S3 ACL Grant. - * Read permission gives the Grantee the permission to list the bucket, or - * read the object or its metadata - * Write permission gives the Grantee the permission to create, overwrite, or - * delete any object in the bucket, and is not supported for objects - * ReadACP permission gives the Grantee the permission to read the ACP for - * the bucket or object; the owner of the bucket or object always has - * this permission implicitly - * WriteACP permission gives the Grantee the permission to overwrite the ACP - * for the bucket or object; the owner of the bucket or object always has - * this permission implicitly - * FullControl permission gives the Grantee all permissions specified by the - * Read, Write, ReadACP, and WriteACP permissions - **/ -typedef enum -{ - S3PermissionRead = 0, - S3PermissionWrite = 1, - S3PermissionReadACP = 2, - S3PermissionWriteACP = 3, - S3PermissionFullControl = 4 -} S3Permission; - - -/** - * S3CannedAcl is an ACL that can be specified when an object is created or - * updated. Each canned ACL has a predefined value when expanded to a full - * set of S3 ACL Grants. - * Private canned ACL gives the owner FULL_CONTROL and no other permissions - * are issued - * Public Read canned ACL gives the owner FULL_CONTROL and all users Read - * permission - * Public Read Write canned ACL gives the owner FULL_CONTROL and all users - * Read and Write permission - * AuthenticatedRead canned ACL gives the owner FULL_CONTROL and authenticated - * S3 users Read permission - **/ -typedef enum -{ - S3CannedAclPrivate = 0, /* private */ - S3CannedAclPublicRead = 1, /* public-read */ - S3CannedAclPublicReadWrite = 2, /* public-read-write */ - S3CannedAclAuthenticatedRead = 3 /* authenticated-read */ -} S3CannedAcl; - - -/** ************************************************************************** - * Data Types - ************************************************************************** **/ - -/** - * An S3RequestContext manages multiple S3 requests simultaneously; see the - * S3_XXX_request_context functions below for details - **/ -typedef struct S3RequestContext S3RequestContext; - - -/** - * S3NameValue represents a single Name - Value pair, used to represent either - * S3 metadata associated with a key, or S3 error details. - **/ -typedef struct S3NameValue -{ - /** - * The name part of the Name - Value pair - **/ - const char *name; - - /** - * The value part of the Name - Value pair - **/ - const char *value; -} S3NameValue; - - -/** - * S3ResponseProperties is passed to the properties callback function which is - * called when the complete response properties have been received. Some of - * the fields of this structure are optional and may not be provided in the - * response, and some will always be provided in the response. - **/ -typedef struct S3ResponseProperties -{ - /** - * This optional field identifies the request ID and may be used when - * reporting problems to Amazon. - **/ - const char *requestId; - - /** - * This optional field identifies the request ID and may be used when - * reporting problems to Amazon. - **/ - const char *requestId2; - - /** - * This optional field is the content type of the data which is returned - * by the request. If not provided, the default can be assumed to be - * "binary/octet-stream". - **/ - const char *contentType; - - /** - * This optional field is the content length of the data which is returned - * in the response. A negative value means that this value was not - * provided in the response. A value of 0 means that there is no content - * provided. A positive value gives the number of bytes in the content of - * the response. - **/ - uint64_t contentLength; - - /** - * This optional field names the server which serviced the request. - **/ - const char *server; - - /** - * This optional field provides a string identifying the unique contents - * of the resource identified by the request, such that the contents can - * be assumed not to be changed if the same eTag is returned at a later - * time decribing the same resource. This is an MD5 sum of the contents. - **/ - const char *eTag; - - /** - * This optional field provides the last modified time, relative to the - * Unix epoch, of the contents. If this value is < 0, then the last - * modified time was not provided in the response. If this value is >= 0, - * then the last modified date of the contents are available as a number - * of seconds since the UNIX epoch. - * - **/ - int64_t lastModified; - - /** - * This is the number of user-provided meta data associated with the - * resource. - **/ - int metaDataCount; - - /** - * These are the meta data associated with the resource. In each case, - * the name will not include any S3-specific header prefixes - * (i.e. x-amz-meta- will have been removed from the beginning), and - * leading and trailing whitespace will have been stripped from the value. - **/ - const S3NameValue *metaData; -} S3ResponseProperties; - - -/** - * S3AclGrant identifies a single grant in the ACL for a bucket or object. An - * ACL is composed of any number of grants, which specify a grantee and the - * permissions given to that grantee. S3 does not normalize ACLs in any way, - * so a redundant ACL specification will lead to a redundant ACL stored in S3. - **/ -typedef struct S3AclGrant -{ - /** - * The granteeType gives the type of grantee specified by this grant. - **/ - S3GranteeType granteeType; - /** - * The identifier of the grantee that is set is determined by the - * granteeType: - * - * S3GranteeTypeAmazonCustomerByEmail - amazonCustomerByEmail.emailAddress - * S3GranteeTypeCanonicalUser - canonicalUser.id, canonicalUser.displayName - * S3GranteeTypeAllAwsUsers - none - * S3GranteeTypeAllUsers - none - **/ - union - { - /** - * This structure is used iff the granteeType is - * S3GranteeTypeAmazonCustomerByEmail. - **/ - struct - { - /** - * This is the email address of the Amazon Customer being granted - * permissions by this S3AclGrant. - **/ - char emailAddress[S3_MAX_GRANTEE_EMAIL_ADDRESS_SIZE]; - } amazonCustomerByEmail; - /** - * This structure is used iff the granteeType is - * S3GranteeTypeCanonicalUser. - **/ - struct - { - /** - * This is the CanonicalUser ID of the grantee - **/ - char id[S3_MAX_GRANTEE_USER_ID_SIZE]; - /** - * This is the display name of the grantee - **/ - char displayName[S3_MAX_GRANTEE_DISPLAY_NAME_SIZE]; - } canonicalUser; - } grantee; - /** - * This is the S3Permission to be granted to the grantee - **/ - S3Permission permission; -} S3AclGrant; - - -/** - * A context for working with objects within a bucket. A bucket context holds - * all information necessary for working with a bucket, and may be used - * repeatedly over many consecutive (or simultaneous) calls into libs3 bucket - * operation functions. - **/ -typedef struct S3BucketContext -{ - /** - * The name of the bucket to use in the bucket context - **/ - const char *bucketName; - - /** - * The protocol to use when accessing the bucket - **/ - S3Protocol protocol; - - /** - * The URI style to use for all URIs sent to Amazon S3 while working with - * this bucket context - **/ - S3UriStyle uriStyle; - - /** - * The Amazon Access Key ID to use for access to the bucket - **/ - const char *accessKeyId; - - /** - * The Amazon Secret Access Key to use for access to the bucket - **/ - const char *secretAccessKey; -} S3BucketContext; - - -/** - * This is a single entry supplied to the list bucket callback by a call to - * S3_list_bucket. It identifies a single matching key from the list - * operation. - **/ -typedef struct S3ListBucketContent -{ - /** - * This is the next key in the list bucket results. - **/ - const char *key; - - /** - * This is the number of seconds since UNIX epoch of the last modified - * date of the object identified by the key. - **/ - int64_t lastModified; - - /** - * This gives a tag which gives a signature of the contents of the object, - * which is the MD5 of the contents of the object. - **/ - const char *eTag; - - /** - * This is the size of the object in bytes. - **/ - uint64_t size; - - /** - * This is the ID of the owner of the key; it is present only if access - * permissions allow it to be viewed. - **/ - const char *ownerId; - - /** - * This is the display name of the owner of the key; it is present only if - * access permissions allow it to be viewed. - **/ - const char *ownerDisplayName; -} S3ListBucketContent; - - -/** - * S3PutProperties is the set of properties that may optionally be set by the - * user when putting objects to S3. Each field of this structure is optional - * and may or may not be present. - **/ -typedef struct S3PutProperties -{ - /** - * If present, this is the Content-Type that should be associated with the - * object. If not provided, S3 defaults to "binary/octet-stream". - **/ - const char *contentType; - - /** - * If present, this provides the MD5 signature of the contents, and is - * used to validate the contents. This is highly recommended by Amazon - * but not required. Its format is as a base64-encoded MD5 sum. - **/ - const char *md5; - - /** - * If present, this gives a Cache-Control header string to be supplied to - * HTTP clients which download this - **/ - const char *cacheControl; - - /** - * If present, this gives the filename to save the downloaded file to, - * whenever the object is downloaded via a web browser. This is only - * relevent for objects which are intended to be shared to users via web - * browsers and which is additionally intended to be downloaded rather - * than viewed. - **/ - const char *contentDispositionFilename; - - /** - * If present, this identifies the content encoding of the object. This - * is only applicable to encoded (usually, compressed) content, and only - * relevent if the object is intended to be downloaded via a browser. - **/ - const char *contentEncoding; - - /** - * If >= 0, this gives an expiration date for the content. This - * information is typically only delivered to users who download the - * content via a web browser. - **/ - int64_t expires; - - /** - * This identifies the "canned ACL" that should be used for this object. - * The default (0) gives only the owner of the object access to it. - **/ - S3CannedAcl cannedAcl; - - /** - * This is the number of values in the metaData field. - **/ - int metaDataCount; - - /** - * These are the meta data to pass to S3. In each case, the name part of - * the Name - Value pair should not include any special S3 HTTP header - * prefix (i.e., should be of the form 'foo', NOT 'x-amz-meta-foo'). - **/ - const S3NameValue *metaData; -} S3PutProperties; - - -/** - * S3GetConditions is used for the get_object operation, and specifies - * conditions which the object must meet in order to be successfully returned. - **/ -typedef struct S3GetConditions -{ - /** - * The request will be processed if the Last-Modification header of the - * object is greater than or equal to this value, specified as a number of - * seconds since Unix epoch. If this value is less than zero, it will not - * be used in the conditional. - **/ - int64_t ifModifiedSince; - - /** - * The request will be processed if the Last-Modification header of the - * object is less than this value, specified as a number of seconds since - * Unix epoch. If this value is less than zero, it will not be used in - * the conditional. - **/ - int64_t ifNotModifiedSince; - - /** - * If non-NULL, this gives an eTag header value which the object must - * match in order to be returned. Note that altough the eTag is simply an - * MD5, this must be presented in the S3 eTag form, which typically - * includes double-quotes. - **/ - const char *ifMatchETag; - - /** - * If non-NULL, this gives an eTag header value which the object must not - * match in order to be returned. Note that altough the eTag is simply an - * MD5, this must be presented in the S3 eTag form, which typically - * includes double-quotes. - **/ - const char *ifNotMatchETag; -} S3GetConditions; - - -/** - * S3ErrorDetails provides detailed information describing an S3 error. This - * is only presented when the error is an S3-generated error (i.e. one of the - * S3StatusErrorXXX values). - **/ -typedef struct S3ErrorDetails -{ - /** - * This is the human-readable message that Amazon supplied describing the - * error - **/ - const char *message; - - /** - * This identifies the resource for which the error occurred - **/ - const char *resource; - - /** - * This gives human-readable further details describing the specifics of - * this error - **/ - const char *furtherDetails; - - /** - * This gives the number of S3NameValue pairs present in the extraDetails - * array - **/ - int extraDetailsCount; - - /** - * S3 can provide extra details in a freeform Name - Value pair format. - * Each error can have any number of these, and this array provides these - * additional extra details. - **/ - S3NameValue *extraDetails; -} S3ErrorDetails; - - -/** ************************************************************************** - * Callback Signatures - ************************************************************************** **/ - -/** - * This callback is made whenever the response properties become available for - * any request. - * - * @param properties are the properties that are available from the response - * @param callbackData is the callback data as specified when the request - * was issued. - * @return S3StatusOK to continue processing the request, anything else to - * immediately abort the request with a status which will be - * passed to the S3ResponseCompleteCallback for this request. - * Typically, this will return either S3StatusOK or - * S3StatusAbortedByCallback. - **/ -typedef S3Status (S3ResponsePropertiesCallback) - (const S3ResponseProperties *properties, void *callbackData); - - -/** - * This callback is made when the response has been completely received, or an - * error has occurred which has prematurely aborted the request, or one of the - * other user-supplied callbacks returned a value intended to abort the - * request. This callback is always made for every request, as the very last - * callback made for that request. - * - * @param status gives the overall status of the response, indicating success - * or failure; use S3_status_is_retryable() as a simple way to detect - * whether or not the status indicates that the request failed but may - * be retried. - * @param errorDetails if non-NULL, gives details as returned by the S3 - * service, describing the error - * @param callbackData is the callback data as specified when the request - * was issued. - **/ -typedef void (S3ResponseCompleteCallback)(S3Status status, - const S3ErrorDetails *errorDetails, - void *callbackData); - - -/** - * This callback is made for each bucket resulting from a list service - * operation. - * - * @param ownerId is the ID of the owner of the bucket - * @param ownerDisplayName is the owner display name of the owner of the bucket - * @param bucketName is the name of the bucket - * @param creationDateSeconds if < 0 indicates that no creation date was - * supplied for the bucket; if >= 0 indicates the number of seconds - * since UNIX Epoch of the creation date of the bucket - * @param callbackData is the callback data as specified when the request - * was issued. - * @return S3StatusOK to continue processing the request, anything else to - * immediately abort the request with a status which will be - * passed to the S3ResponseCompleteCallback for this request. - * Typically, this will return either S3StatusOK or - * S3StatusAbortedByCallback. - **/ -typedef S3Status (S3ListServiceCallback)(const char *ownerId, - const char *ownerDisplayName, - const char *bucketName, - int64_t creationDateSeconds, - void *callbackData); - - -/** - * This callback is made repeatedly as a list bucket operation progresses. - * The contents reported via this callback are only reported once per list - * bucket operation, but multiple calls to this callback may be necessary to - * report all items resulting from the list bucket operation. - * - * @param isTruncated is true if the list bucket request was truncated by the - * S3 service, in which case the remainder of the list may be obtained - * by querying again using the Marker parameter to start the query - * after this set of results - * @param nextMarker if present, gives the largest (alphabetically) key - * returned in the response, which, if isTruncated is true, may be used - * as the marker in a subsequent list buckets operation to continue - * listing - * @param contentsCount is the number of ListBucketContent structures in the - * contents parameter - * @param contents is an array of ListBucketContent structures, each one - * describing an object in the bucket - * @param commonPrefixesCount is the number of common prefixes strings in the - * commonPrefixes parameter - * @param commonPrefixes is an array of strings, each specifing one of the - * common prefixes as returned by S3 - * @param callbackData is the callback data as specified when the request - * was issued. - * @return S3StatusOK to continue processing the request, anything else to - * immediately abort the request with a status which will be - * passed to the S3ResponseCompleteCallback for this request. - * Typically, this will return either S3StatusOK or - * S3StatusAbortedByCallback. - **/ -typedef S3Status (S3ListBucketCallback)(int isTruncated, - const char *nextMarker, - int contentsCount, - const S3ListBucketContent *contents, - int commonPrefixesCount, - const char **commonPrefixes, - void *callbackData); - - -/** - * This callback is made during a put object operation, to obtain the next - * chunk of data to put to the S3 service as the contents of the object. This - * callback is made repeatedly, each time acquiring the next chunk of data to - * write to the service, until a negative or 0 value is returned. - * - * @param bufferSize gives the maximum number of bytes that may be written - * into the buffer parameter by this callback - * @param buffer gives the buffer to fill with at most bufferSize bytes of - * data as the next chunk of data to send to S3 as the contents of this - * object - * @param callbackData is the callback data as specified when the request - * was issued. - * @return < 0 to abort the request with the S3StatusAbortedByCallback, which - * will be pased to the response complete callback for this request, or - * 0 to indicate the end of data, or > 0 to identify the number of - * bytes that were written into the buffer by this callback - **/ -typedef int (S3PutObjectDataCallback)(int bufferSize, char *buffer, - void *callbackData); - - -/** - * This callback is made during a get object operation, to provide the next - * chunk of data available from the S3 service constituting the contents of - * the object being fetched. This callback is made repeatedly, each time - * providing the next chunk of data read, until the complete object contents - * have been passed through the callback in this way, or the callback - * returns an error status. - * - * @param bufferSize gives the number of bytes in buffer - * @param buffer is the data being passed into the callback - * @param callbackData is the callback data as specified when the request - * was issued. - * @return S3StatusOK to continue processing the request, anything else to - * immediately abort the request with a status which will be - * passed to the S3ResponseCompleteCallback for this request. - * Typically, this will return either S3StatusOK or - * S3StatusAbortedByCallback. - **/ -typedef S3Status (S3GetObjectDataCallback)(int bufferSize, const char *buffer, - void *callbackData); - - -/** ************************************************************************** - * Callback Structures - ************************************************************************** **/ - - -/** - * An S3ResponseHandler defines the callbacks which are made for any - * request. - **/ -typedef struct S3ResponseHandler -{ - /** - * The propertiesCallback is made when the response properties have - * successfully been returned from S3. This function may not be called - * if the response properties were not successfully returned from S3. - **/ - S3ResponsePropertiesCallback *propertiesCallback; - - /** - * The completeCallback is always called for every request made to S3, - * regardless of the outcome of the request. It provides the status of - * the request upon its completion, as well as extra error details in the - * event of an S3 error. - **/ - S3ResponseCompleteCallback *completeCallback; -} S3ResponseHandler; - - -/** - * An S3ListServiceHandler defines the callbacks which are made for - * list_service requests. - **/ -typedef struct S3ListServiceHandler -{ - /** - * responseHandler provides the properties and complete callback - **/ - S3ResponseHandler responseHandler; - - /** - * The listServiceCallback is called as items are reported back from S3 as - * responses to the request - **/ - S3ListServiceCallback *listServiceCallback; -} S3ListServiceHandler; - - -/** - * An S3ListBucketHandler defines the callbacks which are made for - * list_bucket requests. - **/ -typedef struct S3ListBucketHandler -{ - /** - * responseHandler provides the properties and complete callback - **/ - S3ResponseHandler responseHandler; - - /** - * The listBucketCallback is called as items are reported back from S3 as - * responses to the request. This may be called more than one time per - * list bucket request, each time providing more items from the list - * operation. - **/ - S3ListBucketCallback *listBucketCallback; -} S3ListBucketHandler; - - -/** - * An S3PutObjectHandler defines the callbacks which are made for - * put_object requests. - **/ -typedef struct S3PutObjectHandler -{ - /** - * responseHandler provides the properties and complete callback - **/ - S3ResponseHandler responseHandler; - - /** - * The putObjectDataCallback is called to acquire data to send to S3 as - * the contents of the put_object request. It is made repeatedly until it - * returns a negative number (indicating that the request should be - * aborted), or 0 (indicating that all data has been supplied). - **/ - S3PutObjectDataCallback *putObjectDataCallback; -} S3PutObjectHandler; - - -/** - * An S3GetObjectHandler defines the callbacks which are made for - * get_object requests. - **/ -typedef struct S3GetObjectHandler -{ - /** - * responseHandler provides the properties and complete callback - **/ - S3ResponseHandler responseHandler; - - /** - * The getObjectDataCallback is called as data is read from S3 as the - * contents of the object being read in the get_object request. It is - * called repeatedly until there is no more data provided in the request, - * or until the callback returns an error status indicating that the - * request should be aborted. - **/ - S3GetObjectDataCallback *getObjectDataCallback; -} S3GetObjectHandler; - - -/** ************************************************************************** - * General Library Functions - ************************************************************************** **/ - -/** - * Initializes libs3 for use. This function must be called before any other - * libs3 function is called. It may be called multiple times, with the same - * effect as calling it once, as long as S3_deinitialize() is called an - * equal number of times when the program has finished. This function is NOT - * thread-safe and must only be called by one thread at a time. - * - * @param userAgentInfo is a string that will be included in the User-Agent - * header of every request made to the S3 service. You may provide - * NULL or the empty string if you don't care about this. The value - * will not be copied by this function and must remain unaltered by the - * caller until S3_deinitialize() is called. - * @param flags is a bitmask of some combination of S3_INIT_XXX flag, or - * S3_INIT_ALL, indicating which of the libraries that libs3 depends - * upon should be initialized by S3_initialize(). Only if your program - * initializes one of these dependency libraries itself should anything - * other than S3_INIT_ALL be passed in for this bitmask. - * - * You should pass S3_INIT_WINSOCK if and only if your application does - * not initialize winsock elsewhere. On non-Microsoft Windows - * platforms it has no effect. - * - * As a convenience, the macro S3_INIT_ALL is provided, which will do - * all necessary initialization; however, be warned that things may - * break if your application re-initializes the dependent libraries - * later. - * @return One of: - * S3StatusOK on success - * S3StatusInternalError if dependent libraries could not be - * initialized - * S3StatusOutOfMemory on failure due to out of memory - **/ -S3Status S3_initialize(const char *userAgentInfo, int flags); - - -/** - * Must be called once per program for each call to libs3_initialize(). After - * this call is complete, no libs3 function may be called except - * S3_initialize(). - **/ -void S3_deinitialize(); - - -/** - * Returns a string with the textual name of an S3Status code - * - * @param status is S3Status code for which the textual name will be returned - * @return a string with the textual name of an S3Status code - **/ -const char *S3_get_status_name(S3Status status); - - -/** - * This function may be used to validate an S3 bucket name as being in the - * correct form for use with the S3 service. Amazon S3 limits the allowed - * characters in S3 bucket names, as well as imposing some additional rules on - * the length of bucket names and their structure. There are actually two - * limits; one for bucket names used only in path-style URIs, and a more - * strict limit used for bucket names used in virtual-host-style URIs. It is - * advisable to use only bucket names which meet the more strict requirements - * regardless of how the bucket expected to be used. - * - * This method does NOT validate that the bucket is available for use in the - * S3 service, so the return value of this function cannot be used to decide - * whether or not a bucket with the give name already exists in Amazon S3 or - * is accessible by the caller. It merely validates that the bucket name is - * valid for use with S3. - * - * @param bucketName is the bucket name to validate - * @param uriStyle gives the URI style to validate the bucket name against. - * It is advisable to always use S3UriStyleVirtuallHost. - * @return One of: - * S3StatusOK if the bucket name was validates successfully - * S3StatusInvalidBucketNameTooLong if the bucket name exceeded the - * length limitation for the URI style, which is 255 bytes for - * path style URIs and 63 bytes for virtual host type URIs - * S3StatusInvalidBucketNameTooShort if the bucket name is less than - * 3 characters - * S3StatusInvalidBucketNameFirstCharacter if the bucket name as an - * invalid first character, which is anything other than - * an alphanumeric character - * S3StatusInvalidBucketNameCharacterSequence if the bucket name - * includes an invalid character sequence, which for virtual host - * style buckets is ".-" or "-." - * S3StatusInvalidBucketNameCharacter if the bucket name includes an - * invalid character, which is anything other than alphanumeric, - * '-', '.', or for path style URIs only, '_'. - * S3StatusInvalidBucketNameDotQuadNotation if the bucket name is in - * dot-quad notation, i.e. the form of an IP address, which is - * not allowed by Amazon S3. - **/ -S3Status S3_validate_bucket_name(const char *bucketName, S3UriStyle uriStyle); - - -/** - * Converts an XML representation of an ACL to a libs3 structured - * representation. This method is not strictly necessary for working with - * ACLs using libs3, but may be convenient for users of the library who read - * ACLs from elsewhere in XML format and need to use these ACLs with libs3. - * - * @param aclXml is the XML representation of the ACL. This must be a - * zero-terminated character string. - * @param ownerId will be filled in with the Owner ID specified in the XML. - * At most MAX_GRANTEE_USER_ID_SIZE bytes will be stored at this - * location. - * @param ownerDisplayName will be filled in with the Owner Display Name - * specified in the XML. At most MAX_GRANTEE_DISPLAY_NAME_SIZE bytes - * will be stored at this location. - * @param aclGrantCountReturn returns the number of S3AclGrant structures - * returned in the aclGrantsReturned array - * @param aclGrants must be passed in as an array of at least S3_ACL_MAXCOUNT - * structures, and on return from this function, the first - * aclGrantCountReturn structures will be filled in with the ACLs - * represented by the input XML. - * @return One of: - * S3StatusOK on successful conversion of the ACL - * S3StatusInternalError on internal error representing a bug in the - * libs3 library - * S3StatusXmlParseFailure if the XML document was malformed - **/ -S3Status S3_convert_acl(char *aclXml, char *ownerId, char *ownerDisplayName, - int *aclGrantCountReturn, S3AclGrant *aclGrants); - - -/** - * Returns nonzero if the status indicates that the request should be - * immediately retried, because the status indicates an error of a nature that - * is likely due to transient conditions on the local system or S3, such as - * network failures, or internal retryable errors reported by S3. Returns - * zero otherwise. - * - * @param status is the status to evaluate - * @return nonzero if the status indicates a retryable error, 0 otherwise - **/ -int S3_status_is_retryable(S3Status status); - - -/** ************************************************************************** - * Request Context Management Functions - ************************************************************************** **/ - -/** - * An S3RequestContext allows muliple requests to be serviced by the same - * thread simultaneously. It is an optional parameter to all libs3 request - * functions, and if provided, the request is managed by the S3RequestContext; - * if not, the request is handled synchronously and is complete when the libs3 - * request function has returned. - * - * @param requestContextReturn returns the newly-created S3RequestContext - * structure, which if successfully returned, must be destroyed via a - * call to S3_destroy_request_context when it is no longer needed. If - * an error status is returned from this function, then - * requestContextReturn will not have been filled in, and - * S3_destroy_request_context should not be called on it - * @return One of: - * S3StatusOK if the request context was successfully created - * S3StatusOutOfMemory if the request context could not be created due - * to an out of memory error - **/ -S3Status S3_create_request_context(S3RequestContext **requestContextReturn); - - -/** - * Destroys an S3RequestContext which was created with - * S3_create_request_context. Any requests which are currently being - * processed by the S3RequestContext will immediately be aborted and their - * request completed callbacks made with the status S3StatusInterrupted. - * - * @param requestContext is the S3RequestContext to destroy - **/ -void S3_destroy_request_context(S3RequestContext *requestContext); - - -/** - * Runs the S3RequestContext until all requests within it have completed, - * or until an error occurs. - * - * @param requestContext is the S3RequestContext to run until all requests - * within it have completed or until an error occurs - * @return One of: - * S3Status if all requests were successfully run to completion - * S3StatusInternalError if an internal error prevented the - * S3RequestContext from running one or more requests - * S3StatusOutOfMemory if requests could not be run to completion - * due to an out of memory error - **/ -S3Status S3_runall_request_context(S3RequestContext *requestContext); - - -/** - * Does some processing of requests within the S3RequestContext. One or more - * requests may have callbacks made on them and may complete. This function - * processes any requests which have immediately available I/O, and will not - * block waiting for I/O on any request. This function would normally be used - * with S3_get_request_context_fdsets. - * - * @param requestContext is the S3RequestContext to process - * @param requestsRemainingReturn returns the number of requests remaining - * and not yet completed within the S3RequestContext after this - * function returns. - * @return One of: - * S3StatusOK if request processing proceeded without error - * S3StatusInternalError if an internal error prevented the - * S3RequestContext from running one or more requests - * S3StatusOutOfMemory if requests could not be processed due to - * an out of memory error - **/ -S3Status S3_runonce_request_context(S3RequestContext *requestContext, - int *requestsRemainingReturn); - - -/** - * This function, in conjunction allows callers to manually manage a set of - * requests using an S3RequestContext. This function returns the set of file - * descriptors which the caller can watch (typically using select()), along - * with any other file descriptors of interest to the caller, and using - * whatever timeout (if any) the caller wishes, until one or more file - * descriptors in the returned sets become ready for I/O, at which point - * S3_runonce_request_context can be called to process requests with available - * I/O. - * - * @param requestContext is the S3RequestContext to get fd_sets from - * @param readFdSet is a pointer to an fd_set which will have all file - * descriptors to watch for read events for the requests in the - * S3RequestContext set into it upon return. Should be zero'd out - * (using FD_ZERO) before being passed into this function. - * @param writeFdSet is a pointer to an fd_set which will have all file - * descriptors to watch for write events for the requests in the - * S3RequestContext set into it upon return. Should be zero'd out - * (using FD_ZERO) before being passed into this function. - * @param exceptFdSet is a pointer to an fd_set which will have all file - * descriptors to watch for exception events for the requests in the - * S3RequestContext set into it upon return. Should be zero'd out - * (using FD_ZERO) before being passed into this function. - * @param maxFd returns the highest file descriptor set into any of the - * fd_sets, or -1 if no file descriptors were set - * @return One of: - * S3StatusOK if all fd_sets were successfully set - * S3StatusInternalError if an internal error prevented this function - * from completing successfully - **/ -S3Status S3_get_request_context_fdsets(S3RequestContext *requestContext, - fd_set *readFdSet, fd_set *writeFdSet, - fd_set *exceptFdSet, int *maxFd); - - -/** - * This function returns the maximum number of milliseconds that the caller of - * S3_runonce_request_context should wait on the fdsets obtained via a call to - * S3_get_request_context_fdsets. In other words, this is essentially the - * select() timeout that needs to be used (shorter values are OK, but no - * longer than this) to ensure that internal timeout code of libs3 can work - * properly. This function should be called right before select() each time - * select() on the request_context fdsets are to be performed by the libs3 - * user. - * - * @param requestContext is the S3RequestContext to get the timeout from - * @return the maximum number of milliseconds to select() on fdsets. Callers - * could wait a shorter time if they wish, but not longer. - **/ -int64_t S3_get_request_context_timeout(S3RequestContext *requestContext); - - -/** ************************************************************************** - * S3 Utility Functions - ************************************************************************** **/ - -/** - * Generates an HTTP authenticated query string, which may then be used by - * a browser (or other web client) to issue the request. The request is - * implicitly a GET request; Amazon S3 is documented to only support this type - * of authenticated query string request. - * - * @param buffer is the output buffer for the authenticated query string. - * It must be at least S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in - * length. - * @param bucketContext gives the bucket and associated parameters for the - * request to generate. - * @param key gives the key which the authenticated request will GET. - * @param expires gives the number of seconds since Unix epoch for the - * expiration date of the request; after this time, the request will - * no longer be valid. If this value is negative, the largest - * expiration date possible is used (currently, Jan 19, 2038). - * @param resource gives a sub-resource to be fetched for the request, or NULL - * for none. This should be of the form "?", i.e. - * "?torrent". - * @return One of: - * S3StatusUriTooLong if, due to an internal error, the generated URI - * is longer than S3_MAX_AUTHENTICATED_QUERY_STRING_SIZE bytes in - * length and thus will not fit into the supplied buffer - * S3StatusOK on success - **/ -S3Status S3_generate_authenticated_query_string - (char *buffer, const S3BucketContext *bucketContext, - const char *key, int64_t expires, const char *resource); - - -/** ************************************************************************** - * Service Functions - ************************************************************************** **/ - -/** - * Lists all S3 buckets belonging to the access key id. - * - * @param protocol gives the protocol to use for this request - * @param accessKeyId gives the Amazon Access Key ID for which to list owned - * buckets - * @param secretAccessKey gives the Amazon Secret Access Key for which to list - * owned buckets - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_list_service(S3Protocol protocol, const char *accessKeyId, - const char *secretAccessKey, - S3RequestContext *requestContext, - const S3ListServiceHandler *handler, - void *callbackData); - - -/** ************************************************************************** - * Bucket Functions - ************************************************************************** **/ - -/** - * Tests the existence of an S3 bucket, additionally returning the bucket's - * location if it exists and is accessible. - * - * @param protocol gives the protocol to use for this request - * @param uriStyle gives the URI style to use for this request - * @param accessKeyId gives the Amazon Access Key ID for which to list owned - * buckets - * @param secretAccessKey gives the Amazon Secret Access Key for which to list - * owned buckets - * @param bucketName is the bucket name to test - * @param locationConstraintReturnSize gives the number of bytes in the - * locationConstraintReturn parameter - * @param locationConstraintReturn provides the location into which to write - * the name of the location constraint naming the geographic location - * of the S3 bucket. This must have at least as many characters in it - * as specified by locationConstraintReturn, and should start out - * NULL-terminated. On successful completion of this request, this - * will be set to the name of the geographic location of S3 bucket, or - * will be left as a zero-length string if no location was available. - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_test_bucket(S3Protocol protocol, S3UriStyle uriStyle, - const char *accessKeyId, const char *secretAccessKey, - const char *bucketName, int locationConstraintReturnSize, - char *locationConstraintReturn, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** - * Creates a new bucket. - * - * @param protocol gives the protocol to use for this request - * @param accessKeyId gives the Amazon Access Key ID for which to list owned - * buckets - * @param secretAccessKey gives the Amazon Secret Access Key for which to list - * owned buckets - * @param bucketName is the name of the bucket to be created - * @param cannedAcl gives the "REST canned ACL" to use for the created bucket - * @param locationConstraint if non-NULL, gives the geographic location for - * the bucket to create. - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_create_bucket(S3Protocol protocol, const char *accessKeyId, - const char *secretAccessKey, const char *bucketName, - S3CannedAcl cannedAcl, const char *locationConstraint, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** - * Deletes a bucket. The bucket must be empty, or the status - * S3StatusErrorBucketNotEmpty will result. - * - * @param protocol gives the protocol to use for this request - * @param uriStyle gives the URI style to use for this request - * @param accessKeyId gives the Amazon Access Key ID for which to list owned - * buckets - * @param secretAccessKey gives the Amazon Secret Access Key for which to list - * owned buckets - * @param bucketName is the name of the bucket to be deleted - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_delete_bucket(S3Protocol protocol, S3UriStyle uriStyle, - const char *accessKeyId, const char *secretAccessKey, - const char *bucketName, S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** - * Lists keys within a bucket. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param prefix if present, gives a prefix for matching keys - * @param marker if present, only keys occuring after this value will be - * listed - * @param delimiter if present, causes keys that contain the same string - * between the prefix and the first occurrence of the delimiter to be - * rolled up into a single result element - * @param maxkeys is the maximum number of keys to return - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_list_bucket(const S3BucketContext *bucketContext, - const char *prefix, const char *marker, - const char *delimiter, int maxkeys, - S3RequestContext *requestContext, - const S3ListBucketHandler *handler, void *callbackData); - - -/** ************************************************************************** - * Object Functions - ************************************************************************** **/ - -/** - * Puts object data to S3. This overwrites any existing object at that key; - * note that S3 currently only supports full-object upload. The data to - * upload will be acquired by calling the handler's putObjectDataCallback. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to put to - * @param contentLength is required and gives the total number of bytes that - * will be put - * @param putProperties optionally provides additional properties to apply to - * the object that is being put to - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_put_object(const S3BucketContext *bucketContext, const char *key, - uint64_t contentLength, - const S3PutProperties *putProperties, - S3RequestContext *requestContext, - const S3PutObjectHandler *handler, void *callbackData); - - -/** - * Copies an object from one location to another. The object may be copied - * back to itself, which is useful for replacing metadata without changing - * the object. - * - * @param bucketContext gives the source bucket and associated parameters for - * this request - * @param key is the source key - * @param destinationBucket gives the destination bucket into which to copy - * the object. If NULL, the source bucket will be used. - * @param destinationKey gives the destination key into which to copy the - * object. If NULL, the source key will be used. - * @param putProperties optionally provides properties to apply to the object - * that is being put to. If not supplied (i.e. NULL is passed in), - * then the copied object will retain the metadata of the copied - * object. - * @param lastModifiedReturn returns the last modified date of the copied - * object - * @param eTagReturnSize specifies the number of bytes provided in the - * eTagReturn buffer - * @param eTagReturn is a buffer into which the resulting eTag of the copied - * object will be written - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_copy_object(const S3BucketContext *bucketContext, - const char *key, const char *destinationBucket, - const char *destinationKey, - const S3PutProperties *putProperties, - int64_t *lastModifiedReturn, int eTagReturnSize, - char *eTagReturn, S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** - * Gets an object from S3. The contents of the object are returned in the - * handler's getObjectDataCallback. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to get - * @param getConditions if non-NULL, gives a set of conditions which must be - * met in order for the request to succeed - * @param startByte gives the start byte for the byte range of the contents - * to be returned - * @param byteCount gives the number of bytes to return; a value of 0 - * indicates that the contents up to the end should be returned - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_get_object(const S3BucketContext *bucketContext, const char *key, - const S3GetConditions *getConditions, - uint64_t startByte, uint64_t byteCount, - S3RequestContext *requestContext, - const S3GetObjectHandler *handler, void *callbackData); - - -/** - * Gets the response properties for the object, but not the object contents. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to get the properties of - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_head_object(const S3BucketContext *bucketContext, const char *key, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - -/** - * Deletes an object from S3. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to delete - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_delete_object(const S3BucketContext *bucketContext, const char *key, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** ************************************************************************** - * Access Control List Functions - ************************************************************************** **/ - -/** - * Gets the ACL for the given bucket or object. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to get the ACL of; or NULL to get the - * ACL of the bucket - * @param ownerId must be supplied as a buffer of at least - * S3_MAX_GRANTEE_USER_ID_SIZE bytes, and will be filled in with the - * owner ID of the object/bucket - * @param ownerDisplayName must be supplied as a buffer of at least - * S3_MAX_GRANTEE_DISPLAY_NAME_SIZE bytes, and will be filled in with - * the display name of the object/bucket - * @param aclGrantCountReturn returns the number of S3AclGrant structures - * returned in the aclGrants parameter - * @param aclGrants must be passed in as an array of at least - * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, which will be filled - * in with the grant information for the ACL - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_get_acl(const S3BucketContext *bucketContext, const char *key, - char *ownerId, char *ownerDisplayName, - int *aclGrantCountReturn, S3AclGrant *aclGrants, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** - * Sets the ACL for the given bucket or object. - * - * @param bucketContext gives the bucket and associated parameters for this - * request - * @param key is the key of the object to set the ACL for; or NULL to set the - * ACL for the bucket - * @param ownerId is the owner ID of the object/bucket. Unfortunately, S3 - * requires this to be valid and thus it must have been fetched by a - * previous S3 request, such as a list_buckets request. - * @param ownerDisplayName is the owner display name of the object/bucket. - * Unfortunately, S3 requires this to be valid and thus it must have - * been fetched by a previous S3 request, such as a list_buckets - * request. - * @param aclGrantCount is the number of ACL grants to set for the - * object/bucket - * @param aclGrants are the ACL grants to set for the object/bucket - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_set_acl(const S3BucketContext *bucketContext, const char *key, - const char *ownerId, const char *ownerDisplayName, - int aclGrantCount, const S3AclGrant *aclGrants, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, void *callbackData); - - -/** ************************************************************************** - * Server Access Log Functions - ************************************************************************** **/ - -/** - * Gets the service access logging settings for a bucket. The service access - * logging settings specify whether or not the S3 service will write service - * access logs for requests made for the given bucket, and if so, several - * settings controlling how these logs will be written. - * - * @param bucketContext gives the bucket and associated parameters for this - * request; this is the bucket for which service access logging is - * being requested - * @param targetBucketReturn must be passed in as a buffer of at least - * (S3_MAX_BUCKET_NAME_SIZE + 1) bytes in length, and will be filled - * in with the target bucket name for access logging for the given - * bucket, which is the bucket into which access logs for the specified - * bucket will be written. This is returned as an empty string if - * service access logging is not enabled for the given bucket. - * @param targetPrefixReturn must be passed in as a buffer of at least - * (S3_MAX_KEY_SIZE + 1) bytes in length, and will be filled in - * with the key prefix for server access logs for the given bucket, - * or the empty string if no such prefix is specified. - * @param aclGrantCountReturn returns the number of ACL grants that are - * associated with the server access logging for the given bucket. - * @param aclGrants must be passed in as an array of at least - * S3_MAX_ACL_GRANT_COUNT S3AclGrant structures, and these will be - * filled in with the target grants associated with the server access - * logging for the given bucket, whose number is returned in the - * aclGrantCountReturn parameter. These grants will be applied to the - * ACL of any server access logging log files generated by the S3 - * service for the given bucket. - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_get_server_access_logging(const S3BucketContext *bucketContext, - char *targetBucketReturn, - char *targetPrefixReturn, - int *aclGrantCountReturn, - S3AclGrant *aclGrants, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, - void *callbackData); - - -/** - * Sets the service access logging settings for a bucket. The service access - * logging settings specify whether or not the S3 service will write service - * access logs for requests made for the given bucket, and if so, several - * settings controlling how these logs will be written. - * - * @param bucketContext gives the bucket and associated parameters for this - * request; this is the bucket for which service access logging is - * being set - * @param targetBucket gives the target bucket name for access logging for the - * given bucket, which is the bucket into which access logs for the - * specified bucket will be written. - * @param targetPrefix is an option parameter which specifies the key prefix - * for server access logs for the given bucket, or NULL if no such - * prefix is to be used. - * @param aclGrantCount specifies the number of ACL grants that are to be - * associated with the server access logging for the given bucket. - * @param aclGrants is as an array of S3AclGrant structures, whose number is - * given by the aclGrantCount parameter. These grants will be applied - * to the ACL of any server access logging log files generated by the - * S3 service for the given bucket. - * @param requestContext if non-NULL, gives the S3RequestContext to add this - * request to, and does not perform the request immediately. If NULL, - * performs the request immediately and synchronously. - * @param handler gives the callbacks to call as the request is processed and - * completed - * @param callbackData will be passed in as the callbackData parameter to - * all callbacks for this request - **/ -void S3_set_server_access_logging(const S3BucketContext *bucketContext, - const char *targetBucket, - const char *targetPrefix, int aclGrantCount, - const S3AclGrant *aclGrants, - S3RequestContext *requestContext, - const S3ResponseHandler *handler, - void *callbackData); - - -#ifdef __cplusplus -} -#endif - -#endif /* LIBS3_H */ diff --git a/main.c b/main.c deleted file mode 100644 index c02bc22..0000000 --- a/main.c +++ /dev/null @@ -1,59 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#include -#include -#include -#include - -#include "bluesky.h" - -/* Small test program for BlueSkyFS. Doesn't do much useful. */ - -int main(int argc, char *argv[]) -{ - g_thread_init(NULL); - - printf("BlueSkyFS starting...\n"); - - printf(" time = %lld\n", bluesky_get_current_time()); - - BlueSkyFS *fs = bluesky_new_fs("export"); - - BlueSkyInode *root; - root = bluesky_new_inode(BLUESKY_ROOT_INUM, fs, BLUESKY_DIRECTORY); - root->nlink = 1; - root->mode = 0755; - bluesky_insert_inode(fs, root); - - BlueSkyInode *file; - file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); - file->nlink = 1; - file->mode = 0755; - bluesky_insert_inode(fs, file); - bluesky_directory_insert(root, "foo", file->inum); - - file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); - file->nlink = 1; - file->mode = 0755; - bluesky_insert_inode(fs, file); - bluesky_directory_insert(root, "bar", file->inum); - - file = bluesky_new_inode(bluesky_fs_alloc_inode(fs), fs, BLUESKY_REGULAR); - file->nlink = 1; - file->mode = 0755; - bluesky_insert_inode(fs, file); - bluesky_directory_insert(root, "baz", file->inum); - - bluesky_directory_dump(root); - bluesky_directory_lookup(root, "foo"); - bluesky_directory_lookup(root, "bar"); - bluesky_directory_lookup(root, "baz"); - - return 0; -} diff --git a/nfs3/CMakeLists.txt b/nfs3/CMakeLists.txt new file mode 100644 index 0000000..86edeb2 --- /dev/null +++ b/nfs3/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(/home/mvrable/scratch/libs3-1.4/build/lib) + +add_executable(nfsproxy + nfsd.c rpc.c mount.c nfs3.c mount_prot_xdr.c nfs3_prot_xdr.c) + +include_directories(${GLIB_INCLUDE_DIRS} "../bluesky") +target_link_libraries(nfsproxy bluesky ${GLIB_LIBRARIES}) diff --git a/nfs3/Makefile b/nfs3/Makefile index 9905682..633793d 100644 --- a/nfs3/Makefile +++ b/nfs3/Makefile @@ -1,23 +1,296 @@ -LIBS3_PATH=$(HOME)/scratch/libs3-1.4/build -PACKAGES=glib-2.0 gthread-2.0 -DEBUG=-g -CFLAGS=-Wall -D_FILE_OFFSET_BITS=64 $(DEBUG) -I.. \ - $(shell pkg-config --cflags $(PACKAGES)) -LDFLAGS=$(DEBUG) $(shell pkg-config --libs $(PACKAGES)) +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 2.6 -SRCS=nfsd.c rpc.c mount.c nfs3.c mount_prot_xdr.c nfs3_prot_xdr.c -OBJS=$(SRCS:.c=.o) +# Default target executed when no arguments are given to make. +default_target: all +.PHONY : default_target -nfsproxy : $(OBJS) - $(CXX) $(LDFLAGS) -o $@ $^ ../bluesky.a -Wl,-rpath=$(LIBS3_PATH)/lib -L$(LIBS3_PATH)/lib -ls3 +#============================================================================= +# Special targets provided by cmake. -clean : - rm -f $(OBJS) nfsproxy +# Disable implicit rules so canoncical targets will work. +.SUFFIXES: -dep : - touch Makefile.dep - makedepend -fMakefile.dep $(SRCS) +# Remove some rules from gmake that .SUFFIXES does not remove. +SUFFIXES = -.PHONY : clean dep +.SUFFIXES: .hpux_make_needs_suffix_list + +# Suppress display of executed commands. +$(VERBOSE).SILENT: + +# A target that is always out of date. +cmake_force: +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/bin/cmake + +# The command to remove a file. +RM = /usr/bin/cmake -E remove -f + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /home/mvrable/local/bluesky.git + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /home/mvrable/local/bluesky.git + +#============================================================================= +# Targets provided globally by CMake. + +# Special rule for the target edit_cache +edit_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running interactive CMake command-line interface..." + /usr/bin/cmake -i . +.PHONY : edit_cache + +# Special rule for the target edit_cache +edit_cache/fast: edit_cache +.PHONY : edit_cache/fast + +# Special rule for the target rebuild_cache +rebuild_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..." + /usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : rebuild_cache + +# Special rule for the target rebuild_cache +rebuild_cache/fast: rebuild_cache +.PHONY : rebuild_cache/fast + +# The main all target +all: cmake_check_build_system + cd /home/mvrable/local/bluesky.git && $(CMAKE_COMMAND) -E cmake_progress_start /home/mvrable/local/bluesky.git/CMakeFiles /home/mvrable/local/bluesky.git/nfs3/CMakeFiles/progress.make + cd /home/mvrable/local/bluesky.git && $(MAKE) -f CMakeFiles/Makefile2 nfs3/all + $(CMAKE_COMMAND) -E cmake_progress_start /home/mvrable/local/bluesky.git/CMakeFiles 0 +.PHONY : all + +# The main clean target +clean: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f CMakeFiles/Makefile2 nfs3/clean +.PHONY : clean + +# The main clean target +clean/fast: clean +.PHONY : clean/fast + +# Prepare targets for installation. +preinstall: all + cd /home/mvrable/local/bluesky.git && $(MAKE) -f CMakeFiles/Makefile2 nfs3/preinstall +.PHONY : preinstall + +# Prepare targets for installation. +preinstall/fast: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f CMakeFiles/Makefile2 nfs3/preinstall +.PHONY : preinstall/fast + +# clear depends +depend: + cd /home/mvrable/local/bluesky.git && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1 +.PHONY : depend + +# Convenience name for target. +nfs3/CMakeFiles/nfsproxy.dir/rule: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f CMakeFiles/Makefile2 nfs3/CMakeFiles/nfsproxy.dir/rule +.PHONY : nfs3/CMakeFiles/nfsproxy.dir/rule + +# Convenience name for target. +nfsproxy: nfs3/CMakeFiles/nfsproxy.dir/rule +.PHONY : nfsproxy + +# fast build rule for target. +nfsproxy/fast: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/build +.PHONY : nfsproxy/fast + +mount.o: mount.c.o +.PHONY : mount.o + +# target to build an object file +mount.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount.c.o +.PHONY : mount.c.o + +mount.i: mount.c.i +.PHONY : mount.i + +# target to preprocess a source file +mount.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount.c.i +.PHONY : mount.c.i + +mount.s: mount.c.s +.PHONY : mount.s + +# target to generate assembly for a file +mount.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount.c.s +.PHONY : mount.c.s + +mount_prot_xdr.o: mount_prot_xdr.c.o +.PHONY : mount_prot_xdr.o + +# target to build an object file +mount_prot_xdr.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount_prot_xdr.c.o +.PHONY : mount_prot_xdr.c.o + +mount_prot_xdr.i: mount_prot_xdr.c.i +.PHONY : mount_prot_xdr.i + +# target to preprocess a source file +mount_prot_xdr.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount_prot_xdr.c.i +.PHONY : mount_prot_xdr.c.i + +mount_prot_xdr.s: mount_prot_xdr.c.s +.PHONY : mount_prot_xdr.s + +# target to generate assembly for a file +mount_prot_xdr.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/mount_prot_xdr.c.s +.PHONY : mount_prot_xdr.c.s + +nfs3.o: nfs3.c.o +.PHONY : nfs3.o + +# target to build an object file +nfs3.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3.c.o +.PHONY : nfs3.c.o + +nfs3.i: nfs3.c.i +.PHONY : nfs3.i + +# target to preprocess a source file +nfs3.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3.c.i +.PHONY : nfs3.c.i + +nfs3.s: nfs3.c.s +.PHONY : nfs3.s + +# target to generate assembly for a file +nfs3.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3.c.s +.PHONY : nfs3.c.s + +nfs3_prot_xdr.o: nfs3_prot_xdr.c.o +.PHONY : nfs3_prot_xdr.o + +# target to build an object file +nfs3_prot_xdr.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3_prot_xdr.c.o +.PHONY : nfs3_prot_xdr.c.o + +nfs3_prot_xdr.i: nfs3_prot_xdr.c.i +.PHONY : nfs3_prot_xdr.i + +# target to preprocess a source file +nfs3_prot_xdr.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3_prot_xdr.c.i +.PHONY : nfs3_prot_xdr.c.i + +nfs3_prot_xdr.s: nfs3_prot_xdr.c.s +.PHONY : nfs3_prot_xdr.s + +# target to generate assembly for a file +nfs3_prot_xdr.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfs3_prot_xdr.c.s +.PHONY : nfs3_prot_xdr.c.s + +nfsd.o: nfsd.c.o +.PHONY : nfsd.o + +# target to build an object file +nfsd.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfsd.c.o +.PHONY : nfsd.c.o + +nfsd.i: nfsd.c.i +.PHONY : nfsd.i + +# target to preprocess a source file +nfsd.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfsd.c.i +.PHONY : nfsd.c.i + +nfsd.s: nfsd.c.s +.PHONY : nfsd.s + +# target to generate assembly for a file +nfsd.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/nfsd.c.s +.PHONY : nfsd.c.s + +rpc.o: rpc.c.o +.PHONY : rpc.o + +# target to build an object file +rpc.c.o: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/rpc.c.o +.PHONY : rpc.c.o + +rpc.i: rpc.c.i +.PHONY : rpc.i + +# target to preprocess a source file +rpc.c.i: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/rpc.c.i +.PHONY : rpc.c.i + +rpc.s: rpc.c.s +.PHONY : rpc.s + +# target to generate assembly for a file +rpc.c.s: + cd /home/mvrable/local/bluesky.git && $(MAKE) -f nfs3/CMakeFiles/nfsproxy.dir/build.make nfs3/CMakeFiles/nfsproxy.dir/rpc.c.s +.PHONY : rpc.c.s + +# Help Target +help: + @echo "The following are some of the valid targets for this Makefile:" + @echo "... all (the default if no target is provided)" + @echo "... clean" + @echo "... depend" + @echo "... edit_cache" + @echo "... nfsproxy" + @echo "... rebuild_cache" + @echo "... mount.o" + @echo "... mount.i" + @echo "... mount.s" + @echo "... mount_prot_xdr.o" + @echo "... mount_prot_xdr.i" + @echo "... mount_prot_xdr.s" + @echo "... nfs3.o" + @echo "... nfs3.i" + @echo "... nfs3.s" + @echo "... nfs3_prot_xdr.o" + @echo "... nfs3_prot_xdr.i" + @echo "... nfs3_prot_xdr.s" + @echo "... nfsd.o" + @echo "... nfsd.i" + @echo "... nfsd.s" + @echo "... rpc.o" + @echo "... rpc.i" + @echo "... rpc.s" +.PHONY : help + + + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + cd /home/mvrable/local/bluesky.git && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system --include *.dep diff --git a/s3store.cc b/s3store.cc deleted file mode 100644 index ac748d1..0000000 --- a/s3store.cc +++ /dev/null @@ -1,119 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#include -#include -#include -#include - -#include "bluesky.h" -#include "libs3.h" - -/* Interface to Amazon S3 storage. */ - -/* Simple in-memory data store for test purposes. */ -struct S3Store { - S3BucketContext bucket; -}; - -S3Store *s3store_new() -{ - S3Store *store = g_new(S3Store, 1); - store->bucket.bucketName = "mvrable-bluesky"; - store->bucket.protocol = S3ProtocolHTTP; - store->bucket.uriStyle = S3UriStylePath; - store->bucket.accessKeyId = getenv("AWS_ACCESS_KEY_ID"); - store->bucket.secretAccessKey = getenv("AWS_SECRET_ACCESS_KEY"); - - g_print("Initializing S3 with bucket %s, access key %s\n", - store->bucket.bucketName, store->bucket.accessKeyId); - - return store; -} - -struct get_info { - gchar *buf; - gint offset; -}; - -static S3Status s3store_get_handler(int bufferSize, const char *buffer, - void *callbackData) -{ - struct get_info *info = (struct get_info *)callbackData; - gint bytes = MIN(bufferSize, (int)(BLUESKY_BLOCK_SIZE - info->offset)); - memcpy(info->buf + info->offset, buffer, bytes); - info->offset += bytes; - return S3StatusOK; -} - -struct put_info { - BlueSkyRCStr *val; - gint offset; -}; - -static int s3store_put_handler(int bufferSize, char *buffer, - void *callbackData) -{ - struct put_info *info = (struct put_info *)callbackData; - gint bytes = MIN(bufferSize, (int)(info->val->len - info->offset)); - memcpy(buffer, (char *)info->val->data + info->offset, bytes); - info->offset += bytes; - return bytes; -} - -static S3Status s3store_properties_callback(const S3ResponseProperties *properties, - void *callbackData) -{ - g_print("(Properties callback)\n"); - return S3StatusOK; -} - -void s3store_response_callback(S3Status status, - const S3ErrorDetails *errorDetails, - void *callbackData) -{ - g_print("S3 operation complete, status=%s\n", - S3_get_status_name(status)); - if (errorDetails != NULL) { - g_print(" Error message: %s\n", errorDetails->message); - } -} - -BlueSkyRCStr *s3store_get(S3Store *store, const gchar *key) -{ - struct get_info info; - info.buf = (char *)g_malloc0(BLUESKY_BLOCK_SIZE); - info.offset = 0; - - struct S3GetObjectHandler handler; - handler.responseHandler.propertiesCallback = s3store_properties_callback; - handler.responseHandler.completeCallback = s3store_response_callback; - handler.getObjectDataCallback = s3store_get_handler; - - g_print("Starting fetch of %s from S3...\n", key); - S3_get_object(&store->bucket, key, NULL, 0, 0, NULL, - &handler, &info); - - return bluesky_string_new(info.buf, BLUESKY_BLOCK_SIZE); -} - -void s3store_put(S3Store *store, const gchar *key, BlueSkyRCStr *val) -{ - struct put_info info; - info.val = val; - info.offset = 0; - - struct S3PutObjectHandler handler; - handler.responseHandler.propertiesCallback = s3store_properties_callback; - handler.responseHandler.completeCallback = s3store_response_callback; - handler.putObjectDataCallback = s3store_put_handler; - - g_print("Starting store of %s to S3...\n", key); - S3_put_object(&store->bucket, key, val->len, NULL, NULL, - &handler, &info); -} diff --git a/store.c b/store.c deleted file mode 100644 index 9b509e6..0000000 --- a/store.c +++ /dev/null @@ -1,103 +0,0 @@ -/* Blue Sky: File Systems in the Cloud - * - * Copyright (C) 2009 The Regents of the University of California - * Written by Michael Vrable - * - * TODO: Licensing - */ - -#include -#include -#include - -#include "bluesky.h" - -/* Interaction with cloud storage. We expose very simple GET/PUT style - * interface, which different backends can implement. Available backends - * (will) include Amazon S3 and a simple local store for testing purposes. */ - -/* Create and return a new reference-counted string. The reference count is - * initially one. The newly-returned string takes ownership of the memory - * pointed at by data, and will call g_free on it when the reference count - * drops to zero. */ -BlueSkyRCStr *bluesky_string_new(gpointer data, gsize len) -{ - BlueSkyRCStr *string = g_new(BlueSkyRCStr, 1); - string->data = data; - string->len = len; - g_atomic_int_set(&string->refcount, 1); - return string; -} - -void bluesky_string_ref(BlueSkyRCStr *string) -{ - if (string == NULL) - return; - - g_atomic_int_inc(&string->refcount); -} - -void bluesky_string_unref(BlueSkyRCStr *string) -{ - if (string == NULL) - return; - - if (g_atomic_int_dec_and_test(&string->refcount)) { - g_free(string->data); - g_free(string); - } -} - -/* Duplicate and return a new reference-counted string, containing a copy of - * the original data, with a reference count of 1. As an optimization, if the - * passed-in string already has a reference count of 1, the original is - * returned. Can be used to make a mutable copy of a shared string. For this - * to truly be safe, it is probably needed that there be some type of lock - * protecting access to the string. */ -BlueSkyRCStr *bluesky_string_dup(BlueSkyRCStr *string) -{ - if (string == NULL) - return NULL; - - if (g_atomic_int_dec_and_test(&string->refcount)) { - /* There are no other shared copies, so return this one. */ - g_atomic_int_inc(&string->refcount); - return string; - } else { - return bluesky_string_new(g_memdup(string->data, string->len), - string->len); - } -} - -/* Simple in-memory data store for test purposes. */ -typedef struct { - GMutex *lock; - - /* TODO: A hashtable isn't optimal for list queries... */ - GHashTable *store; -} MemStore; - -MemStore *memstore_new() -{ - MemStore *store = g_new(MemStore, 1); - store->lock = g_mutex_new(); - store->store = g_hash_table_new_full(g_str_hash, g_str_equal, - g_free, - (GDestroyNotify)bluesky_string_unref); - - return store; -} - -BlueSkyRCStr *memstore_get(MemStore *store, const gchar *key) -{ - BlueSkyRCStr *s = g_hash_table_lookup(store->store, key); - if (s != NULL) - bluesky_string_ref(s); - return s; -} - -void memstore_put(MemStore *store, const gchar *key, BlueSkyRCStr *val) -{ - bluesky_string_ref(val); - g_hash_table_insert(store->store, g_strdup(key), val); -}